2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
17 #include <linux/stackprotector.h>
18 #include <linux/cpu.h>
19 #include <linux/errno.h>
20 #include <linux/sched.h>
22 #include <linux/kernel.h>
24 #include <linux/elfcore.h>
25 #include <linux/smp.h>
26 #include <linux/slab.h>
27 #include <linux/user.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/module.h>
31 #include <linux/ptrace.h>
32 #include <linux/notifier.h>
33 #include <linux/kprobes.h>
34 #include <linux/kdebug.h>
35 #include <linux/tick.h>
36 #include <linux/prctl.h>
37 #include <linux/uaccess.h>
39 #include <linux/ftrace.h>
40 #include <linux/cpuidle.h>
42 #include <asm/pgtable.h>
43 #include <asm/system.h>
44 #include <asm/processor.h>
46 #include <asm/mmu_context.h>
47 #include <asm/prctl.h>
49 #include <asm/proto.h>
52 #include <asm/syscalls.h>
53 #include <asm/debugreg.h>
56 asmlinkage
extern void ret_from_fork(void);
58 DEFINE_PER_CPU(unsigned long, old_rsp
);
59 static DEFINE_PER_CPU(unsigned char, is_idle
);
61 static ATOMIC_NOTIFIER_HEAD(idle_notifier
);
63 void idle_notifier_register(struct notifier_block
*n
)
65 atomic_notifier_chain_register(&idle_notifier
, n
);
67 EXPORT_SYMBOL_GPL(idle_notifier_register
);
69 void idle_notifier_unregister(struct notifier_block
*n
)
71 atomic_notifier_chain_unregister(&idle_notifier
, n
);
73 EXPORT_SYMBOL_GPL(idle_notifier_unregister
);
77 percpu_write(is_idle
, 1);
78 atomic_notifier_call_chain(&idle_notifier
, IDLE_START
, NULL
);
81 static void __exit_idle(void)
83 if (x86_test_and_clear_bit_percpu(0, is_idle
) == 0)
85 atomic_notifier_call_chain(&idle_notifier
, IDLE_END
, NULL
);
88 /* Called from interrupts to signify idle end */
91 /* idle loop has pid 0 */
98 static inline void play_dead(void)
105 * The idle thread. There's no useful work to be
106 * done, so just try to conserve power and have a
107 * low exit latency (ie sit in a loop waiting for
108 * somebody to say that they'd like to reschedule)
112 current_thread_info()->status
|= TS_POLLING
;
115 * If we're the non-boot CPU, nothing set the stack canary up
116 * for us. CPU0 already has it initialized but no harm in
117 * doing it again. This is a good place for updating it, as
118 * we wont ever return from this function (so the invalid
119 * canaries already on the stack wont ever trigger).
121 boot_init_stack_canary();
123 /* endless idle loop with no priority at all */
125 tick_nohz_idle_enter();
126 while (!need_resched()) {
130 if (cpu_is_offline(smp_processor_id()))
133 * Idle routines should keep interrupts disabled
134 * from here on, until they go to idle.
135 * Otherwise, idle callbacks can misfire.
140 /* Don't trace irqs off for idle */
141 stop_critical_timings();
143 /* enter_idle() needs rcu for notifiers */
146 if (cpuidle_idle_call())
150 start_critical_timings();
152 /* In many cases the interrupt that ended idle
153 has already called exit_idle. But some idle
154 loops can be woken up without interrupt. */
158 tick_nohz_idle_exit();
159 preempt_enable_no_resched();
165 /* Prints also some state that isn't saved in the pt_regs */
166 void __show_regs(struct pt_regs
*regs
, int all
)
168 unsigned long cr0
= 0L, cr2
= 0L, cr3
= 0L, cr4
= 0L, fs
, gs
, shadowgs
;
169 unsigned long d0
, d1
, d2
, d3
, d6
, d7
;
170 unsigned int fsindex
, gsindex
;
171 unsigned int ds
, cs
, es
;
174 printk(KERN_DEFAULT
"RIP: %04lx:[<%016lx>] ", regs
->cs
& 0xffff, regs
->ip
);
175 printk_address(regs
->ip
, 1);
176 printk(KERN_DEFAULT
"RSP: %04lx:%016lx EFLAGS: %08lx\n", regs
->ss
,
177 regs
->sp
, regs
->flags
);
178 printk(KERN_DEFAULT
"RAX: %016lx RBX: %016lx RCX: %016lx\n",
179 regs
->ax
, regs
->bx
, regs
->cx
);
180 printk(KERN_DEFAULT
"RDX: %016lx RSI: %016lx RDI: %016lx\n",
181 regs
->dx
, regs
->si
, regs
->di
);
182 printk(KERN_DEFAULT
"RBP: %016lx R08: %016lx R09: %016lx\n",
183 regs
->bp
, regs
->r8
, regs
->r9
);
184 printk(KERN_DEFAULT
"R10: %016lx R11: %016lx R12: %016lx\n",
185 regs
->r10
, regs
->r11
, regs
->r12
);
186 printk(KERN_DEFAULT
"R13: %016lx R14: %016lx R15: %016lx\n",
187 regs
->r13
, regs
->r14
, regs
->r15
);
189 asm("movl %%ds,%0" : "=r" (ds
));
190 asm("movl %%cs,%0" : "=r" (cs
));
191 asm("movl %%es,%0" : "=r" (es
));
192 asm("movl %%fs,%0" : "=r" (fsindex
));
193 asm("movl %%gs,%0" : "=r" (gsindex
));
195 rdmsrl(MSR_FS_BASE
, fs
);
196 rdmsrl(MSR_GS_BASE
, gs
);
197 rdmsrl(MSR_KERNEL_GS_BASE
, shadowgs
);
207 printk(KERN_DEFAULT
"FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
208 fs
, fsindex
, gs
, gsindex
, shadowgs
);
209 printk(KERN_DEFAULT
"CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs
, ds
,
211 printk(KERN_DEFAULT
"CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2
, cr3
,
217 printk(KERN_DEFAULT
"DR0: %016lx DR1: %016lx DR2: %016lx\n", d0
, d1
, d2
);
221 printk(KERN_DEFAULT
"DR3: %016lx DR6: %016lx DR7: %016lx\n", d3
, d6
, d7
);
224 void release_thread(struct task_struct
*dead_task
)
227 if (dead_task
->mm
->context
.size
) {
228 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
230 dead_task
->mm
->context
.ldt
,
231 dead_task
->mm
->context
.size
);
237 static inline void set_32bit_tls(struct task_struct
*t
, int tls
, u32 addr
)
239 struct user_desc ud
= {
246 struct desc_struct
*desc
= t
->thread
.tls_array
;
251 static inline u32
read_32bit_tls(struct task_struct
*t
, int tls
)
253 return get_desc_base(&t
->thread
.tls_array
[tls
]);
257 * This gets called before we allocate a new thread and copy
258 * the current task into it.
260 void prepare_to_copy(struct task_struct
*tsk
)
265 int copy_thread(unsigned long clone_flags
, unsigned long sp
,
266 unsigned long unused
,
267 struct task_struct
*p
, struct pt_regs
*regs
)
270 struct pt_regs
*childregs
;
271 struct task_struct
*me
= current
;
273 childregs
= ((struct pt_regs
*)
274 (THREAD_SIZE
+ task_stack_page(p
))) - 1;
281 childregs
->sp
= (unsigned long)childregs
;
283 p
->thread
.sp
= (unsigned long) childregs
;
284 p
->thread
.sp0
= (unsigned long) (childregs
+1);
285 p
->thread
.usersp
= me
->thread
.usersp
;
287 set_tsk_thread_flag(p
, TIF_FORK
);
290 p
->thread
.io_bitmap_ptr
= NULL
;
292 savesegment(gs
, p
->thread
.gsindex
);
293 p
->thread
.gs
= p
->thread
.gsindex
? 0 : me
->thread
.gs
;
294 savesegment(fs
, p
->thread
.fsindex
);
295 p
->thread
.fs
= p
->thread
.fsindex
? 0 : me
->thread
.fs
;
296 savesegment(es
, p
->thread
.es
);
297 savesegment(ds
, p
->thread
.ds
);
300 memset(p
->thread
.ptrace_bps
, 0, sizeof(p
->thread
.ptrace_bps
));
302 if (unlikely(test_tsk_thread_flag(me
, TIF_IO_BITMAP
))) {
303 p
->thread
.io_bitmap_ptr
= kmemdup(me
->thread
.io_bitmap_ptr
,
304 IO_BITMAP_BYTES
, GFP_KERNEL
);
305 if (!p
->thread
.io_bitmap_ptr
) {
306 p
->thread
.io_bitmap_max
= 0;
309 set_tsk_thread_flag(p
, TIF_IO_BITMAP
);
313 * Set a new TLS for the child thread?
315 if (clone_flags
& CLONE_SETTLS
) {
316 #ifdef CONFIG_IA32_EMULATION
317 if (test_thread_flag(TIF_IA32
))
318 err
= do_set_thread_area(p
, -1,
319 (struct user_desc __user
*)childregs
->si
, 0);
322 err
= do_arch_prctl(p
, ARCH_SET_FS
, childregs
->r8
);
328 if (err
&& p
->thread
.io_bitmap_ptr
) {
329 kfree(p
->thread
.io_bitmap_ptr
);
330 p
->thread
.io_bitmap_max
= 0;
337 start_thread_common(struct pt_regs
*regs
, unsigned long new_ip
,
338 unsigned long new_sp
,
339 unsigned int _cs
, unsigned int _ss
, unsigned int _ds
)
342 loadsegment(es
, _ds
);
343 loadsegment(ds
, _ds
);
347 percpu_write(old_rsp
, new_sp
);
350 regs
->flags
= X86_EFLAGS_IF
;
352 * Free the old FP and other extended state
354 free_thread_xstate(current
);
358 start_thread(struct pt_regs
*regs
, unsigned long new_ip
, unsigned long new_sp
)
360 start_thread_common(regs
, new_ip
, new_sp
,
361 __USER_CS
, __USER_DS
, 0);
364 #ifdef CONFIG_IA32_EMULATION
365 void start_thread_ia32(struct pt_regs
*regs
, u32 new_ip
, u32 new_sp
)
367 start_thread_common(regs
, new_ip
, new_sp
,
368 __USER32_CS
, __USER32_DS
, __USER32_DS
);
373 * switch_to(x,y) should switch tasks from x to y.
375 * This could still be optimized:
376 * - fold all the options into a flag word and test it with a single test.
377 * - could test fs/gs bitsliced
379 * Kprobes not supported here. Set the probe on schedule instead.
380 * Function graph tracer not supported too.
382 __notrace_funcgraph
struct task_struct
*
383 __switch_to(struct task_struct
*prev_p
, struct task_struct
*next_p
)
385 struct thread_struct
*prev
= &prev_p
->thread
;
386 struct thread_struct
*next
= &next_p
->thread
;
387 int cpu
= smp_processor_id();
388 struct tss_struct
*tss
= &per_cpu(init_tss
, cpu
);
389 unsigned fsindex
, gsindex
;
392 fpu
= switch_fpu_prepare(prev_p
, next_p
, cpu
);
395 * Reload esp0, LDT and the page table pointer:
401 * This won't pick up thread selector changes, but I guess that is ok.
403 savesegment(es
, prev
->es
);
404 if (unlikely(next
->es
| prev
->es
))
405 loadsegment(es
, next
->es
);
407 savesegment(ds
, prev
->ds
);
408 if (unlikely(next
->ds
| prev
->ds
))
409 loadsegment(ds
, next
->ds
);
412 /* We must save %fs and %gs before load_TLS() because
413 * %fs and %gs may be cleared by load_TLS().
415 * (e.g. xen_load_tls())
417 savesegment(fs
, fsindex
);
418 savesegment(gs
, gsindex
);
423 * Leave lazy mode, flushing any hypercalls made here.
424 * This must be done before restoring TLS segments so
425 * the GDT and LDT are properly updated, and must be
426 * done before math_state_restore, so the TS bit is up
429 arch_end_context_switch(next_p
);
434 * Segment register != 0 always requires a reload. Also
435 * reload when it has changed. When prev process used 64bit
436 * base always reload to avoid an information leak.
438 if (unlikely(fsindex
| next
->fsindex
| prev
->fs
)) {
439 loadsegment(fs
, next
->fsindex
);
441 * Check if the user used a selector != 0; if yes
442 * clear 64bit base, since overloaded base is always
443 * mapped to the Null selector
448 /* when next process has a 64bit base use it */
450 wrmsrl(MSR_FS_BASE
, next
->fs
);
451 prev
->fsindex
= fsindex
;
453 if (unlikely(gsindex
| next
->gsindex
| prev
->gs
)) {
454 load_gs_index(next
->gsindex
);
459 wrmsrl(MSR_KERNEL_GS_BASE
, next
->gs
);
460 prev
->gsindex
= gsindex
;
462 switch_fpu_finish(next_p
, fpu
);
465 * Switch the PDA and FPU contexts.
467 prev
->usersp
= percpu_read(old_rsp
);
468 percpu_write(old_rsp
, next
->usersp
);
469 percpu_write(current_task
, next_p
);
471 percpu_write(kernel_stack
,
472 (unsigned long)task_stack_page(next_p
) +
473 THREAD_SIZE
- KERNEL_STACK_OFFSET
);
476 * Now maybe reload the debug registers and handle I/O bitmaps
478 if (unlikely(task_thread_info(next_p
)->flags
& _TIF_WORK_CTXSW_NEXT
||
479 task_thread_info(prev_p
)->flags
& _TIF_WORK_CTXSW_PREV
))
480 __switch_to_xtra(prev_p
, next_p
, tss
);
485 void set_personality_64bit(void)
487 /* inherit personality from parent */
489 /* Make sure to be in 64bit mode */
490 clear_thread_flag(TIF_IA32
);
492 /* Ensure the corresponding mm is not marked. */
494 current
->mm
->context
.ia32_compat
= 0;
496 /* TBD: overwrites user setup. Should have two bits.
497 But 64bit processes have always behaved this way,
498 so it's not too bad. The main problem is just that
499 32bit childs are affected again. */
500 current
->personality
&= ~READ_IMPLIES_EXEC
;
503 void set_personality_ia32(void)
505 /* inherit personality from parent */
507 /* Make sure to be in 32bit mode */
508 set_thread_flag(TIF_IA32
);
509 current
->personality
|= force_personality32
;
511 /* Mark the associated mm as containing 32-bit tasks. */
513 current
->mm
->context
.ia32_compat
= 1;
515 /* Prepare the first "return" to user space */
516 current_thread_info()->status
|= TS_COMPAT
;
519 unsigned long get_wchan(struct task_struct
*p
)
525 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
527 stack
= (unsigned long)task_stack_page(p
);
528 if (p
->thread
.sp
< stack
|| p
->thread
.sp
>= stack
+THREAD_SIZE
)
530 fp
= *(u64
*)(p
->thread
.sp
);
532 if (fp
< (unsigned long)stack
||
533 fp
>= (unsigned long)stack
+THREAD_SIZE
)
536 if (!in_sched_functions(ip
))
539 } while (count
++ < 16);
543 long do_arch_prctl(struct task_struct
*task
, int code
, unsigned long addr
)
546 int doit
= task
== current
;
551 if (addr
>= TASK_SIZE_OF(task
))
554 /* handle small bases via the GDT because that's faster to
556 if (addr
<= 0xffffffff) {
557 set_32bit_tls(task
, GS_TLS
, addr
);
559 load_TLS(&task
->thread
, cpu
);
560 load_gs_index(GS_TLS_SEL
);
562 task
->thread
.gsindex
= GS_TLS_SEL
;
565 task
->thread
.gsindex
= 0;
566 task
->thread
.gs
= addr
;
569 ret
= checking_wrmsrl(MSR_KERNEL_GS_BASE
, addr
);
575 /* Not strictly needed for fs, but do it for symmetry
577 if (addr
>= TASK_SIZE_OF(task
))
580 /* handle small bases via the GDT because that's faster to
582 if (addr
<= 0xffffffff) {
583 set_32bit_tls(task
, FS_TLS
, addr
);
585 load_TLS(&task
->thread
, cpu
);
586 loadsegment(fs
, FS_TLS_SEL
);
588 task
->thread
.fsindex
= FS_TLS_SEL
;
591 task
->thread
.fsindex
= 0;
592 task
->thread
.fs
= addr
;
594 /* set the selector to 0 to not confuse
597 ret
= checking_wrmsrl(MSR_FS_BASE
, addr
);
604 if (task
->thread
.fsindex
== FS_TLS_SEL
)
605 base
= read_32bit_tls(task
, FS_TLS
);
607 rdmsrl(MSR_FS_BASE
, base
);
609 base
= task
->thread
.fs
;
610 ret
= put_user(base
, (unsigned long __user
*)addr
);
616 if (task
->thread
.gsindex
== GS_TLS_SEL
)
617 base
= read_32bit_tls(task
, GS_TLS
);
619 savesegment(gs
, gsindex
);
621 rdmsrl(MSR_KERNEL_GS_BASE
, base
);
623 base
= task
->thread
.gs
;
625 base
= task
->thread
.gs
;
626 ret
= put_user(base
, (unsigned long __user
*)addr
);
638 long sys_arch_prctl(int code
, unsigned long addr
)
640 return do_arch_prctl(current
, code
, addr
);
643 unsigned long KSTK_ESP(struct task_struct
*task
)
645 return (test_tsk_thread_flag(task
, TIF_IA32
)) ?
646 (task_pt_regs(task
)->sp
) : ((task
)->thread
.usersp
);