1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4 #include <linux/errno.h>
5 #include <linux/kernel.h>
8 #include <linux/prctl.h>
9 #include <linux/slab.h>
10 #include <linux/sched.h>
11 #include <linux/sched/idle.h>
12 #include <linux/sched/debug.h>
13 #include <linux/sched/task.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/init.h>
16 #include <linux/export.h>
18 #include <linux/tick.h>
19 #include <linux/random.h>
20 #include <linux/user-return-notifier.h>
21 #include <linux/dmi.h>
22 #include <linux/utsname.h>
23 #include <linux/stackprotector.h>
24 #include <linux/cpuidle.h>
25 #include <linux/acpi.h>
26 #include <linux/elf-randomize.h>
27 #include <trace/events/power.h>
28 #include <linux/hw_breakpoint.h>
31 #include <linux/uaccess.h>
32 #include <asm/mwait.h>
33 #include <asm/fpu/internal.h>
34 #include <asm/debugreg.h>
36 #include <asm/tlbflush.h>
39 #include <asm/switch_to.h>
41 #include <asm/prctl.h>
42 #include <asm/spec-ctrl.h>
43 #include <asm/io_bitmap.h>
44 #include <asm/proto.h>
45 #include <asm/frame.h>
50 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
51 * no more per-task TSS's. The TSS size is kept cacheline-aligned
52 * so they are allowed to end up in the .data..cacheline_aligned
53 * section. Since TSS's are completely CPU-local, we want them
54 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
56 __visible
DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct
, cpu_tss_rw
) = {
59 * .sp0 is only used when entering ring 0 from a lower
60 * privilege level. Since the init task never runs anything
61 * but ring 0 code, there is no need for a valid value here.
64 .sp0
= (1UL << (BITS_PER_LONG
-1)) + 1,
67 * .sp1 is cpu_current_top_of_stack. The init task never
68 * runs user code, but cpu_current_top_of_stack should still
69 * be well defined before the first context switch.
71 .sp1
= TOP_OF_INIT_STACK
,
77 .io_bitmap_base
= IO_BITMAP_OFFSET_INVALID
,
80 EXPORT_PER_CPU_SYMBOL(cpu_tss_rw
);
82 DEFINE_PER_CPU(bool, __tss_limit_invalid
);
83 EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid
);
86 * this gets called so that we can store lazy state into memory and copy the
87 * current task into the new thread.
89 int arch_dup_task_struct(struct task_struct
*dst
, struct task_struct
*src
)
91 memcpy(dst
, src
, arch_task_struct_size
);
93 dst
->thread
.vm86
= NULL
;
96 return fpu__copy(dst
, src
);
100 * Free thread data structures etc..
102 void exit_thread(struct task_struct
*tsk
)
104 struct thread_struct
*t
= &tsk
->thread
;
105 struct fpu
*fpu
= &t
->fpu
;
107 if (test_thread_flag(TIF_IO_BITMAP
))
115 static int set_new_tls(struct task_struct
*p
, unsigned long tls
)
117 struct user_desc __user
*utls
= (struct user_desc __user
*)tls
;
119 if (in_ia32_syscall())
120 return do_set_thread_area(p
, -1, utls
, 0);
122 return do_set_thread_area_64(p
, ARCH_SET_FS
, tls
);
125 int copy_thread(unsigned long clone_flags
, unsigned long sp
, unsigned long arg
,
126 struct task_struct
*p
, unsigned long tls
)
128 struct inactive_task_frame
*frame
;
129 struct fork_frame
*fork_frame
;
130 struct pt_regs
*childregs
;
133 childregs
= task_pt_regs(p
);
134 fork_frame
= container_of(childregs
, struct fork_frame
, regs
);
135 frame
= &fork_frame
->frame
;
137 frame
->bp
= encode_frame_pointer(childregs
);
138 frame
->ret_addr
= (unsigned long) ret_from_fork
;
139 p
->thread
.sp
= (unsigned long) fork_frame
;
140 p
->thread
.io_bitmap
= NULL
;
141 memset(p
->thread
.ptrace_bps
, 0, sizeof(p
->thread
.ptrace_bps
));
145 p
->thread
.fsindex
= current
->thread
.fsindex
;
146 p
->thread
.fsbase
= current
->thread
.fsbase
;
147 p
->thread
.gsindex
= current
->thread
.gsindex
;
148 p
->thread
.gsbase
= current
->thread
.gsbase
;
150 savesegment(es
, p
->thread
.es
);
151 savesegment(ds
, p
->thread
.ds
);
153 p
->thread
.sp0
= (unsigned long) (childregs
+ 1);
155 * Clear all status flags including IF and set fixed bit. 64bit
156 * does not have this initialization as the frame does not contain
157 * flags. The flags consistency (especially vs. AC) is there
158 * ensured via objtool, which lacks 32bit support.
160 frame
->flags
= X86_EFLAGS_FIXED
;
163 /* Kernel thread ? */
164 if (unlikely(p
->flags
& PF_KTHREAD
)) {
165 memset(childregs
, 0, sizeof(struct pt_regs
));
166 kthread_frame_init(frame
, sp
, arg
);
171 *childregs
= *current_pt_regs();
177 task_user_gs(p
) = get_user_gs(current_pt_regs());
180 /* Set a new TLS for the child thread? */
181 if (clone_flags
& CLONE_SETTLS
)
182 ret
= set_new_tls(p
, tls
);
184 if (!ret
&& unlikely(test_tsk_thread_flag(current
, TIF_IO_BITMAP
)))
190 void flush_thread(void)
192 struct task_struct
*tsk
= current
;
194 flush_ptrace_hw_breakpoint(tsk
);
195 memset(tsk
->thread
.tls_array
, 0, sizeof(tsk
->thread
.tls_array
));
197 fpu__clear_all(&tsk
->thread
.fpu
);
200 void disable_TSC(void)
203 if (!test_and_set_thread_flag(TIF_NOTSC
))
205 * Must flip the CPU state synchronously with
206 * TIF_NOTSC in the current running context.
208 cr4_set_bits(X86_CR4_TSD
);
212 static void enable_TSC(void)
215 if (test_and_clear_thread_flag(TIF_NOTSC
))
217 * Must flip the CPU state synchronously with
218 * TIF_NOTSC in the current running context.
220 cr4_clear_bits(X86_CR4_TSD
);
224 int get_tsc_mode(unsigned long adr
)
228 if (test_thread_flag(TIF_NOTSC
))
229 val
= PR_TSC_SIGSEGV
;
233 return put_user(val
, (unsigned int __user
*)adr
);
236 int set_tsc_mode(unsigned int val
)
238 if (val
== PR_TSC_SIGSEGV
)
240 else if (val
== PR_TSC_ENABLE
)
248 DEFINE_PER_CPU(u64
, msr_misc_features_shadow
);
250 static void set_cpuid_faulting(bool on
)
254 msrval
= this_cpu_read(msr_misc_features_shadow
);
255 msrval
&= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT
;
256 msrval
|= (on
<< MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT
);
257 this_cpu_write(msr_misc_features_shadow
, msrval
);
258 wrmsrl(MSR_MISC_FEATURES_ENABLES
, msrval
);
261 static void disable_cpuid(void)
264 if (!test_and_set_thread_flag(TIF_NOCPUID
)) {
266 * Must flip the CPU state synchronously with
267 * TIF_NOCPUID in the current running context.
269 set_cpuid_faulting(true);
274 static void enable_cpuid(void)
277 if (test_and_clear_thread_flag(TIF_NOCPUID
)) {
279 * Must flip the CPU state synchronously with
280 * TIF_NOCPUID in the current running context.
282 set_cpuid_faulting(false);
287 static int get_cpuid_mode(void)
289 return !test_thread_flag(TIF_NOCPUID
);
292 static int set_cpuid_mode(struct task_struct
*task
, unsigned long cpuid_enabled
)
294 if (!boot_cpu_has(X86_FEATURE_CPUID_FAULT
))
306 * Called immediately after a successful exec.
308 void arch_setup_new_exec(void)
310 /* If cpuid was previously disabled for this task, re-enable it. */
311 if (test_thread_flag(TIF_NOCPUID
))
315 * Don't inherit TIF_SSBD across exec boundary when
316 * PR_SPEC_DISABLE_NOEXEC is used.
318 if (test_thread_flag(TIF_SSBD
) &&
319 task_spec_ssb_noexec(current
)) {
320 clear_thread_flag(TIF_SSBD
);
321 task_clear_spec_ssb_disable(current
);
322 task_clear_spec_ssb_noexec(current
);
323 speculation_ctrl_update(task_thread_info(current
)->flags
);
327 #ifdef CONFIG_X86_IOPL_IOPERM
328 static inline void switch_to_bitmap(unsigned long tifp
)
331 * Invalidate I/O bitmap if the previous task used it. This prevents
332 * any possible leakage of an active I/O bitmap.
334 * If the next task has an I/O bitmap it will handle it on exit to
337 if (tifp
& _TIF_IO_BITMAP
)
338 tss_invalidate_io_bitmap();
341 static void tss_copy_io_bitmap(struct tss_struct
*tss
, struct io_bitmap
*iobm
)
344 * Copy at least the byte range of the incoming tasks bitmap which
345 * covers the permitted I/O ports.
347 * If the previous task which used an I/O bitmap had more bits
348 * permitted, then the copy needs to cover those as well so they
351 memcpy(tss
->io_bitmap
.bitmap
, iobm
->bitmap
,
352 max(tss
->io_bitmap
.prev_max
, iobm
->max
));
355 * Store the new max and the sequence number of this bitmap
356 * and a pointer to the bitmap itself.
358 tss
->io_bitmap
.prev_max
= iobm
->max
;
359 tss
->io_bitmap
.prev_sequence
= iobm
->sequence
;
363 * tss_update_io_bitmap - Update I/O bitmap before exiting to usermode
365 void native_tss_update_io_bitmap(void)
367 struct tss_struct
*tss
= this_cpu_ptr(&cpu_tss_rw
);
368 struct thread_struct
*t
= ¤t
->thread
;
369 u16
*base
= &tss
->x86_tss
.io_bitmap_base
;
371 if (!test_thread_flag(TIF_IO_BITMAP
)) {
372 native_tss_invalidate_io_bitmap();
376 if (IS_ENABLED(CONFIG_X86_IOPL_IOPERM
) && t
->iopl_emul
== 3) {
377 *base
= IO_BITMAP_OFFSET_VALID_ALL
;
379 struct io_bitmap
*iobm
= t
->io_bitmap
;
382 * Only copy bitmap data when the sequence number differs. The
383 * update time is accounted to the incoming task.
385 if (tss
->io_bitmap
.prev_sequence
!= iobm
->sequence
)
386 tss_copy_io_bitmap(tss
, iobm
);
388 /* Enable the bitmap */
389 *base
= IO_BITMAP_OFFSET_VALID_MAP
;
393 * Make sure that the TSS limit is covering the IO bitmap. It might have
394 * been cut down by a VMEXIT to 0x67 which would cause a subsequent I/O
395 * access from user space to trigger a #GP because tbe bitmap is outside
400 #else /* CONFIG_X86_IOPL_IOPERM */
401 static inline void switch_to_bitmap(unsigned long tifp
) { }
407 struct ssb_state
*shared_state
;
409 unsigned int disable_state
;
410 unsigned long local_state
;
415 static DEFINE_PER_CPU(struct ssb_state
, ssb_state
);
417 void speculative_store_bypass_ht_init(void)
419 struct ssb_state
*st
= this_cpu_ptr(&ssb_state
);
420 unsigned int this_cpu
= smp_processor_id();
426 * Shared state setup happens once on the first bringup
427 * of the CPU. It's not destroyed on CPU hotunplug.
429 if (st
->shared_state
)
432 raw_spin_lock_init(&st
->lock
);
435 * Go over HT siblings and check whether one of them has set up the
436 * shared state pointer already.
438 for_each_cpu(cpu
, topology_sibling_cpumask(this_cpu
)) {
442 if (!per_cpu(ssb_state
, cpu
).shared_state
)
445 /* Link it to the state of the sibling: */
446 st
->shared_state
= per_cpu(ssb_state
, cpu
).shared_state
;
451 * First HT sibling to come up on the core. Link shared state of
452 * the first HT sibling to itself. The siblings on the same core
453 * which come up later will see the shared state pointer and link
454 * themself to the state of this CPU.
456 st
->shared_state
= st
;
460 * Logic is: First HT sibling enables SSBD for both siblings in the core
461 * and last sibling to disable it, disables it for the whole core. This how
462 * MSR_SPEC_CTRL works in "hardware":
464 * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
466 static __always_inline
void amd_set_core_ssb_state(unsigned long tifn
)
468 struct ssb_state
*st
= this_cpu_ptr(&ssb_state
);
469 u64 msr
= x86_amd_ls_cfg_base
;
471 if (!static_cpu_has(X86_FEATURE_ZEN
)) {
472 msr
|= ssbd_tif_to_amd_ls_cfg(tifn
);
473 wrmsrl(MSR_AMD64_LS_CFG
, msr
);
477 if (tifn
& _TIF_SSBD
) {
479 * Since this can race with prctl(), block reentry on the
482 if (__test_and_set_bit(LSTATE_SSB
, &st
->local_state
))
485 msr
|= x86_amd_ls_cfg_ssbd_mask
;
487 raw_spin_lock(&st
->shared_state
->lock
);
488 /* First sibling enables SSBD: */
489 if (!st
->shared_state
->disable_state
)
490 wrmsrl(MSR_AMD64_LS_CFG
, msr
);
491 st
->shared_state
->disable_state
++;
492 raw_spin_unlock(&st
->shared_state
->lock
);
494 if (!__test_and_clear_bit(LSTATE_SSB
, &st
->local_state
))
497 raw_spin_lock(&st
->shared_state
->lock
);
498 st
->shared_state
->disable_state
--;
499 if (!st
->shared_state
->disable_state
)
500 wrmsrl(MSR_AMD64_LS_CFG
, msr
);
501 raw_spin_unlock(&st
->shared_state
->lock
);
505 static __always_inline
void amd_set_core_ssb_state(unsigned long tifn
)
507 u64 msr
= x86_amd_ls_cfg_base
| ssbd_tif_to_amd_ls_cfg(tifn
);
509 wrmsrl(MSR_AMD64_LS_CFG
, msr
);
513 static __always_inline
void amd_set_ssb_virt_state(unsigned long tifn
)
516 * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
517 * so ssbd_tif_to_spec_ctrl() just works.
519 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL
, ssbd_tif_to_spec_ctrl(tifn
));
523 * Update the MSRs managing speculation control, during context switch.
525 * tifp: Previous task's thread flags
526 * tifn: Next task's thread flags
528 static __always_inline
void __speculation_ctrl_update(unsigned long tifp
,
531 unsigned long tif_diff
= tifp
^ tifn
;
532 u64 msr
= x86_spec_ctrl_base
;
535 lockdep_assert_irqs_disabled();
537 /* Handle change of TIF_SSBD depending on the mitigation method. */
538 if (static_cpu_has(X86_FEATURE_VIRT_SSBD
)) {
539 if (tif_diff
& _TIF_SSBD
)
540 amd_set_ssb_virt_state(tifn
);
541 } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD
)) {
542 if (tif_diff
& _TIF_SSBD
)
543 amd_set_core_ssb_state(tifn
);
544 } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD
) ||
545 static_cpu_has(X86_FEATURE_AMD_SSBD
)) {
546 updmsr
|= !!(tif_diff
& _TIF_SSBD
);
547 msr
|= ssbd_tif_to_spec_ctrl(tifn
);
550 /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */
551 if (IS_ENABLED(CONFIG_SMP
) &&
552 static_branch_unlikely(&switch_to_cond_stibp
)) {
553 updmsr
|= !!(tif_diff
& _TIF_SPEC_IB
);
554 msr
|= stibp_tif_to_spec_ctrl(tifn
);
558 wrmsrl(MSR_IA32_SPEC_CTRL
, msr
);
561 static unsigned long speculation_ctrl_update_tif(struct task_struct
*tsk
)
563 if (test_and_clear_tsk_thread_flag(tsk
, TIF_SPEC_FORCE_UPDATE
)) {
564 if (task_spec_ssb_disable(tsk
))
565 set_tsk_thread_flag(tsk
, TIF_SSBD
);
567 clear_tsk_thread_flag(tsk
, TIF_SSBD
);
569 if (task_spec_ib_disable(tsk
))
570 set_tsk_thread_flag(tsk
, TIF_SPEC_IB
);
572 clear_tsk_thread_flag(tsk
, TIF_SPEC_IB
);
574 /* Return the updated threadinfo flags*/
575 return task_thread_info(tsk
)->flags
;
578 void speculation_ctrl_update(unsigned long tif
)
582 /* Forced update. Make sure all relevant TIF flags are different */
583 local_irq_save(flags
);
584 __speculation_ctrl_update(~tif
, tif
);
585 local_irq_restore(flags
);
588 /* Called from seccomp/prctl update */
589 void speculation_ctrl_update_current(void)
592 speculation_ctrl_update(speculation_ctrl_update_tif(current
));
596 static inline void cr4_toggle_bits_irqsoff(unsigned long mask
)
598 unsigned long newval
, cr4
= this_cpu_read(cpu_tlbstate
.cr4
);
602 this_cpu_write(cpu_tlbstate
.cr4
, newval
);
607 void __switch_to_xtra(struct task_struct
*prev_p
, struct task_struct
*next_p
)
609 unsigned long tifp
, tifn
;
611 tifn
= READ_ONCE(task_thread_info(next_p
)->flags
);
612 tifp
= READ_ONCE(task_thread_info(prev_p
)->flags
);
614 switch_to_bitmap(tifp
);
616 propagate_user_return_notify(prev_p
, next_p
);
618 if ((tifp
& _TIF_BLOCKSTEP
|| tifn
& _TIF_BLOCKSTEP
) &&
619 arch_has_block_step()) {
620 unsigned long debugctl
, msk
;
622 rdmsrl(MSR_IA32_DEBUGCTLMSR
, debugctl
);
623 debugctl
&= ~DEBUGCTLMSR_BTF
;
624 msk
= tifn
& _TIF_BLOCKSTEP
;
625 debugctl
|= (msk
>> TIF_BLOCKSTEP
) << DEBUGCTLMSR_BTF_SHIFT
;
626 wrmsrl(MSR_IA32_DEBUGCTLMSR
, debugctl
);
629 if ((tifp
^ tifn
) & _TIF_NOTSC
)
630 cr4_toggle_bits_irqsoff(X86_CR4_TSD
);
632 if ((tifp
^ tifn
) & _TIF_NOCPUID
)
633 set_cpuid_faulting(!!(tifn
& _TIF_NOCPUID
));
635 if (likely(!((tifp
| tifn
) & _TIF_SPEC_FORCE_UPDATE
))) {
636 __speculation_ctrl_update(tifp
, tifn
);
638 speculation_ctrl_update_tif(prev_p
);
639 tifn
= speculation_ctrl_update_tif(next_p
);
641 /* Enforce MSR update to ensure consistent state */
642 __speculation_ctrl_update(~tifn
, tifn
);
645 if ((tifp
^ tifn
) & _TIF_SLD
)
650 * Idle related variables and functions
652 unsigned long boot_option_idle_override
= IDLE_NO_OVERRIDE
;
653 EXPORT_SYMBOL(boot_option_idle_override
);
655 static void (*x86_idle
)(void);
658 static inline void play_dead(void)
664 void arch_cpu_idle_enter(void)
666 tsc_verify_tsc_adjust(false);
670 void arch_cpu_idle_dead(void)
676 * Called from the generic idle code.
678 void arch_cpu_idle(void)
684 * We use this if we don't have any better idle routine..
686 void __cpuidle
default_idle(void)
690 #if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
691 EXPORT_SYMBOL(default_idle
);
695 bool xen_set_default_idle(void)
697 bool ret
= !!x86_idle
;
699 x86_idle
= default_idle
;
705 void stop_this_cpu(void *dummy
)
711 set_cpu_online(smp_processor_id(), false);
712 disable_local_APIC();
713 mcheck_cpu_clear(this_cpu_ptr(&cpu_info
));
716 * Use wbinvd on processors that support SME. This provides support
717 * for performing a successful kexec when going from SME inactive
718 * to SME active (or vice-versa). The cache must be cleared so that
719 * if there are entries with the same physical address, both with and
720 * without the encryption bit, they don't race each other when flushed
721 * and potentially end up with the wrong entry being committed to
724 if (boot_cpu_has(X86_FEATURE_SME
))
728 * Use native_halt() so that memory contents don't change
729 * (stack usage and variables) after possibly issuing the
730 * native_wbinvd() above.
737 * AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
738 * states (local apic timer and TSC stop).
740 * XXX this function is completely buggered vs RCU and tracing.
742 static void amd_e400_idle(void)
745 * We cannot use static_cpu_has_bug() here because X86_BUG_AMD_APIC_C1E
746 * gets set after static_cpu_has() places have been converted via
749 if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E
)) {
754 tick_broadcast_enter();
759 * The switch back from broadcast mode needs to be called with
760 * interrupts disabled.
762 raw_local_irq_disable();
763 tick_broadcast_exit();
764 raw_local_irq_enable();
768 * Intel Core2 and older machines prefer MWAIT over HALT for C1.
769 * We can't rely on cpuidle installing MWAIT, because it will not load
770 * on systems that support only C1 -- so the boot default must be MWAIT.
772 * Some AMD machines are the opposite, they depend on using HALT.
774 * So for default C1, which is used during boot until cpuidle loads,
775 * use MWAIT-C1 on Intel HW that has it, else use HALT.
777 static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86
*c
)
779 if (c
->x86_vendor
!= X86_VENDOR_INTEL
)
782 if (!cpu_has(c
, X86_FEATURE_MWAIT
) || boot_cpu_has_bug(X86_BUG_MONITOR
))
789 * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
790 * with interrupts enabled and no flags, which is backwards compatible with the
791 * original MWAIT implementation.
793 static __cpuidle
void mwait_idle(void)
795 if (!current_set_polling_and_test()) {
796 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR
)) {
798 clflush((void *)¤t_thread_info()->flags
);
802 __monitor((void *)¤t_thread_info()->flags
, 0, 0);
806 raw_local_irq_enable();
808 raw_local_irq_enable();
810 __current_clr_polling();
813 void select_idle_routine(const struct cpuinfo_x86
*c
)
816 if (boot_option_idle_override
== IDLE_POLL
&& smp_num_siblings
> 1)
817 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
819 if (x86_idle
|| boot_option_idle_override
== IDLE_POLL
)
822 if (boot_cpu_has_bug(X86_BUG_AMD_E400
)) {
823 pr_info("using AMD E400 aware idle routine\n");
824 x86_idle
= amd_e400_idle
;
825 } else if (prefer_mwait_c1_over_halt(c
)) {
826 pr_info("using mwait in idle threads\n");
827 x86_idle
= mwait_idle
;
829 x86_idle
= default_idle
;
832 void amd_e400_c1e_apic_setup(void)
834 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E
)) {
835 pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
837 tick_broadcast_force();
842 void __init
arch_post_acpi_subsys_init(void)
846 if (!boot_cpu_has_bug(X86_BUG_AMD_E400
))
850 * AMD E400 detection needs to happen after ACPI has been enabled. If
851 * the machine is affected K8_INTP_C1E_ACTIVE_MASK bits are set in
852 * MSR_K8_INT_PENDING_MSG.
854 rdmsr(MSR_K8_INT_PENDING_MSG
, lo
, hi
);
855 if (!(lo
& K8_INTP_C1E_ACTIVE_MASK
))
858 boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E
);
860 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC
))
861 mark_tsc_unstable("TSC halt in AMD C1E");
862 pr_info("System has AMD C1E enabled\n");
865 static int __init
idle_setup(char *str
)
870 if (!strcmp(str
, "poll")) {
871 pr_info("using polling idle threads\n");
872 boot_option_idle_override
= IDLE_POLL
;
873 cpu_idle_poll_ctrl(true);
874 } else if (!strcmp(str
, "halt")) {
876 * When the boot option of idle=halt is added, halt is
877 * forced to be used for CPU idle. In such case CPU C2/C3
878 * won't be used again.
879 * To continue to load the CPU idle driver, don't touch
880 * the boot_option_idle_override.
882 x86_idle
= default_idle
;
883 boot_option_idle_override
= IDLE_HALT
;
884 } else if (!strcmp(str
, "nomwait")) {
886 * If the boot option of "idle=nomwait" is added,
887 * it means that mwait will be disabled for CPU C2/C3
888 * states. In such case it won't touch the variable
889 * of boot_option_idle_override.
891 boot_option_idle_override
= IDLE_NOMWAIT
;
897 early_param("idle", idle_setup
);
899 unsigned long arch_align_stack(unsigned long sp
)
901 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
902 sp
-= get_random_int() % 8192;
906 unsigned long arch_randomize_brk(struct mm_struct
*mm
)
908 return randomize_page(mm
->brk
, 0x02000000);
912 * Called from fs/proc with a reference on @p to find the function
913 * which called into schedule(). This needs to be done carefully
914 * because the task might wake up and we might look at a stack
917 unsigned long get_wchan(struct task_struct
*p
)
919 unsigned long start
, bottom
, top
, sp
, fp
, ip
, ret
= 0;
922 if (p
== current
|| p
->state
== TASK_RUNNING
)
925 if (!try_get_task_stack(p
))
928 start
= (unsigned long)task_stack_page(p
);
933 * Layout of the stack page:
935 * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
937 * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
939 * ----------- bottom = start
941 * The tasks stack pointer points at the location where the
942 * framepointer is stored. The data on the stack is:
943 * ... IP FP ... IP FP
945 * We need to read FP and IP, so we need to adjust the upper
946 * bound by another unsigned long.
948 top
= start
+ THREAD_SIZE
- TOP_OF_KERNEL_STACK_PADDING
;
949 top
-= 2 * sizeof(unsigned long);
952 sp
= READ_ONCE(p
->thread
.sp
);
953 if (sp
< bottom
|| sp
> top
)
956 fp
= READ_ONCE_NOCHECK(((struct inactive_task_frame
*)sp
)->bp
);
958 if (fp
< bottom
|| fp
> top
)
960 ip
= READ_ONCE_NOCHECK(*(unsigned long *)(fp
+ sizeof(unsigned long)));
961 if (!in_sched_functions(ip
)) {
965 fp
= READ_ONCE_NOCHECK(*(unsigned long *)fp
);
966 } while (count
++ < 16 && p
->state
!= TASK_RUNNING
);
973 long do_arch_prctl_common(struct task_struct
*task
, int option
,
974 unsigned long cpuid_enabled
)
978 return get_cpuid_mode();
980 return set_cpuid_mode(task
, cpuid_enabled
);