2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/elf.h>
28 #include <linux/init.h>
29 #include <linux/prctl.h>
30 #include <linux/init_task.h>
31 #include <linux/module.h>
32 #include <linux/kallsyms.h>
33 #include <linux/mqueue.h>
34 #include <linux/hardirq.h>
35 #include <linux/utsname.h>
36 #include <linux/ftrace.h>
37 #include <linux/kernel_stat.h>
38 #include <linux/personality.h>
39 #include <linux/random.h>
40 #include <linux/hw_breakpoint.h>
42 #include <asm/pgtable.h>
43 #include <asm/uaccess.h>
44 #include <asm/system.h>
46 #include <asm/processor.h>
49 #include <asm/machdep.h>
51 #include <asm/syscalls.h>
53 #include <asm/firmware.h>
55 #include <linux/kprobes.h>
56 #include <linux/kdebug.h>
58 extern unsigned long _get_SP(void);
61 struct task_struct
*last_task_used_math
= NULL
;
62 struct task_struct
*last_task_used_altivec
= NULL
;
63 struct task_struct
*last_task_used_vsx
= NULL
;
64 struct task_struct
*last_task_used_spe
= NULL
;
68 * Make sure the floating-point register state in the
69 * the thread_struct is up to date for task tsk.
71 void flush_fp_to_thread(struct task_struct
*tsk
)
73 if (tsk
->thread
.regs
) {
75 * We need to disable preemption here because if we didn't,
76 * another process could get scheduled after the regs->msr
77 * test but before we have finished saving the FP registers
78 * to the thread_struct. That process could take over the
79 * FPU, and then when we get scheduled again we would store
80 * bogus values for the remaining FP registers.
83 if (tsk
->thread
.regs
->msr
& MSR_FP
) {
86 * This should only ever be called for current or
87 * for a stopped child process. Since we save away
88 * the FP register state on context switch on SMP,
89 * there is something wrong if a stopped child appears
90 * to still have its FP state in the CPU registers.
92 BUG_ON(tsk
!= current
);
100 void enable_kernel_fp(void)
102 WARN_ON(preemptible());
105 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_FP
))
108 giveup_fpu(NULL
); /* just enables FP for kernel */
110 giveup_fpu(last_task_used_math
);
111 #endif /* CONFIG_SMP */
113 EXPORT_SYMBOL(enable_kernel_fp
);
115 #ifdef CONFIG_ALTIVEC
116 void enable_kernel_altivec(void)
118 WARN_ON(preemptible());
121 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_VEC
))
122 giveup_altivec(current
);
124 giveup_altivec(NULL
); /* just enable AltiVec for kernel - force */
126 giveup_altivec(last_task_used_altivec
);
127 #endif /* CONFIG_SMP */
129 EXPORT_SYMBOL(enable_kernel_altivec
);
132 * Make sure the VMX/Altivec register state in the
133 * the thread_struct is up to date for task tsk.
135 void flush_altivec_to_thread(struct task_struct
*tsk
)
137 if (tsk
->thread
.regs
) {
139 if (tsk
->thread
.regs
->msr
& MSR_VEC
) {
141 BUG_ON(tsk
!= current
);
148 #endif /* CONFIG_ALTIVEC */
152 /* not currently used, but some crazy RAID module might want to later */
153 void enable_kernel_vsx(void)
155 WARN_ON(preemptible());
158 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_VSX
))
161 giveup_vsx(NULL
); /* just enable vsx for kernel - force */
163 giveup_vsx(last_task_used_vsx
);
164 #endif /* CONFIG_SMP */
166 EXPORT_SYMBOL(enable_kernel_vsx
);
169 void giveup_vsx(struct task_struct
*tsk
)
176 void flush_vsx_to_thread(struct task_struct
*tsk
)
178 if (tsk
->thread
.regs
) {
180 if (tsk
->thread
.regs
->msr
& MSR_VSX
) {
182 BUG_ON(tsk
!= current
);
189 #endif /* CONFIG_VSX */
193 void enable_kernel_spe(void)
195 WARN_ON(preemptible());
198 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_SPE
))
201 giveup_spe(NULL
); /* just enable SPE for kernel - force */
203 giveup_spe(last_task_used_spe
);
204 #endif /* __SMP __ */
206 EXPORT_SYMBOL(enable_kernel_spe
);
208 void flush_spe_to_thread(struct task_struct
*tsk
)
210 if (tsk
->thread
.regs
) {
212 if (tsk
->thread
.regs
->msr
& MSR_SPE
) {
214 BUG_ON(tsk
!= current
);
221 #endif /* CONFIG_SPE */
225 * If we are doing lazy switching of CPU state (FP, altivec or SPE),
226 * and the current task has some state, discard it.
228 void discard_lazy_cpu_state(void)
231 if (last_task_used_math
== current
)
232 last_task_used_math
= NULL
;
233 #ifdef CONFIG_ALTIVEC
234 if (last_task_used_altivec
== current
)
235 last_task_used_altivec
= NULL
;
236 #endif /* CONFIG_ALTIVEC */
238 if (last_task_used_vsx
== current
)
239 last_task_used_vsx
= NULL
;
240 #endif /* CONFIG_VSX */
242 if (last_task_used_spe
== current
)
243 last_task_used_spe
= NULL
;
247 #endif /* CONFIG_SMP */
249 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
250 void do_send_trap(struct pt_regs
*regs
, unsigned long address
,
251 unsigned long error_code
, int signal_code
, int breakpt
)
255 if (notify_die(DIE_DABR_MATCH
, "dabr_match", regs
, error_code
,
256 11, SIGSEGV
) == NOTIFY_STOP
)
259 /* Deliver the signal to userspace */
260 info
.si_signo
= SIGTRAP
;
261 info
.si_errno
= breakpt
; /* breakpoint or watchpoint id */
262 info
.si_code
= signal_code
;
263 info
.si_addr
= (void __user
*)address
;
264 force_sig_info(SIGTRAP
, &info
, current
);
266 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
267 void do_dabr(struct pt_regs
*regs
, unsigned long address
,
268 unsigned long error_code
)
272 if (notify_die(DIE_DABR_MATCH
, "dabr_match", regs
, error_code
,
273 11, SIGSEGV
) == NOTIFY_STOP
)
276 if (debugger_dabr_match(regs
))
282 /* Deliver the signal to userspace */
283 info
.si_signo
= SIGTRAP
;
285 info
.si_code
= TRAP_HWBKPT
;
286 info
.si_addr
= (void __user
*)address
;
287 force_sig_info(SIGTRAP
, &info
, current
);
289 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
291 static DEFINE_PER_CPU(unsigned long, current_dabr
);
293 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
295 * Set the debug registers back to their default "safe" values.
297 static void set_debug_reg_defaults(struct thread_struct
*thread
)
299 thread
->iac1
= thread
->iac2
= 0;
300 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
301 thread
->iac3
= thread
->iac4
= 0;
303 thread
->dac1
= thread
->dac2
= 0;
304 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
305 thread
->dvc1
= thread
->dvc2
= 0;
310 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
312 thread
->dbcr1
= DBCR1_IAC1US
| DBCR1_IAC2US
| \
313 DBCR1_IAC3US
| DBCR1_IAC4US
;
315 * Force Data Address Compare User/Supervisor bits to be User-only
316 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
318 thread
->dbcr2
= DBCR2_DAC1US
| DBCR2_DAC2US
;
324 static void prime_debug_regs(struct thread_struct
*thread
)
326 mtspr(SPRN_IAC1
, thread
->iac1
);
327 mtspr(SPRN_IAC2
, thread
->iac2
);
328 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
329 mtspr(SPRN_IAC3
, thread
->iac3
);
330 mtspr(SPRN_IAC4
, thread
->iac4
);
332 mtspr(SPRN_DAC1
, thread
->dac1
);
333 mtspr(SPRN_DAC2
, thread
->dac2
);
334 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
335 mtspr(SPRN_DVC1
, thread
->dvc1
);
336 mtspr(SPRN_DVC2
, thread
->dvc2
);
338 mtspr(SPRN_DBCR0
, thread
->dbcr0
);
339 mtspr(SPRN_DBCR1
, thread
->dbcr1
);
341 mtspr(SPRN_DBCR2
, thread
->dbcr2
);
345 * Unless neither the old or new thread are making use of the
346 * debug registers, set the debug registers from the values
347 * stored in the new thread.
349 static void switch_booke_debug_regs(struct thread_struct
*new_thread
)
351 if ((current
->thread
.dbcr0
& DBCR0_IDM
)
352 || (new_thread
->dbcr0
& DBCR0_IDM
))
353 prime_debug_regs(new_thread
);
355 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
356 #ifndef CONFIG_HAVE_HW_BREAKPOINT
357 static void set_debug_reg_defaults(struct thread_struct
*thread
)
364 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
365 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
367 int set_dabr(unsigned long dabr
)
369 __get_cpu_var(current_dabr
) = dabr
;
372 return ppc_md
.set_dabr(dabr
);
374 /* XXX should we have a CPU_FTR_HAS_DABR ? */
375 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
376 mtspr(SPRN_DAC1
, dabr
);
377 #ifdef CONFIG_PPC_47x
380 #elif defined(CONFIG_PPC_BOOK3S)
381 mtspr(SPRN_DABR
, dabr
);
389 DEFINE_PER_CPU(struct cpu_usage
, cpu_usage_array
);
392 struct task_struct
*__switch_to(struct task_struct
*prev
,
393 struct task_struct
*new)
395 struct thread_struct
*new_thread
, *old_thread
;
397 struct task_struct
*last
;
398 #ifdef CONFIG_PPC_BOOK3S_64
399 struct ppc64_tlb_batch
*batch
;
403 /* avoid complexity of lazy save/restore of fpu
404 * by just saving it every time we switch out if
405 * this task used the fpu during the last quantum.
407 * If it tries to use the fpu again, it'll trap and
408 * reload its fp regs. So we don't have to do a restore
409 * every switch, just a save.
412 if (prev
->thread
.regs
&& (prev
->thread
.regs
->msr
& MSR_FP
))
414 #ifdef CONFIG_ALTIVEC
416 * If the previous thread used altivec in the last quantum
417 * (thus changing altivec regs) then save them.
418 * We used to check the VRSAVE register but not all apps
419 * set it, so we don't rely on it now (and in fact we need
420 * to save & restore VSCR even if VRSAVE == 0). -- paulus
422 * On SMP we always save/restore altivec regs just to avoid the
423 * complexity of changing processors.
426 if (prev
->thread
.regs
&& (prev
->thread
.regs
->msr
& MSR_VEC
))
427 giveup_altivec(prev
);
428 #endif /* CONFIG_ALTIVEC */
430 if (prev
->thread
.regs
&& (prev
->thread
.regs
->msr
& MSR_VSX
))
431 /* VMX and FPU registers are already save here */
433 #endif /* CONFIG_VSX */
436 * If the previous thread used spe in the last quantum
437 * (thus changing spe regs) then save them.
439 * On SMP we always save/restore spe regs just to avoid the
440 * complexity of changing processors.
442 if ((prev
->thread
.regs
&& (prev
->thread
.regs
->msr
& MSR_SPE
)))
444 #endif /* CONFIG_SPE */
446 #else /* CONFIG_SMP */
447 #ifdef CONFIG_ALTIVEC
448 /* Avoid the trap. On smp this this never happens since
449 * we don't set last_task_used_altivec -- Cort
451 if (new->thread
.regs
&& last_task_used_altivec
== new)
452 new->thread
.regs
->msr
|= MSR_VEC
;
453 #endif /* CONFIG_ALTIVEC */
455 if (new->thread
.regs
&& last_task_used_vsx
== new)
456 new->thread
.regs
->msr
|= MSR_VSX
;
457 #endif /* CONFIG_VSX */
459 /* Avoid the trap. On smp this this never happens since
460 * we don't set last_task_used_spe
462 if (new->thread
.regs
&& last_task_used_spe
== new)
463 new->thread
.regs
->msr
|= MSR_SPE
;
464 #endif /* CONFIG_SPE */
466 #endif /* CONFIG_SMP */
468 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
469 switch_booke_debug_regs(&new->thread
);
472 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
475 #ifndef CONFIG_HAVE_HW_BREAKPOINT
476 if (unlikely(__get_cpu_var(current_dabr
) != new->thread
.dabr
))
477 set_dabr(new->thread
.dabr
);
478 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
482 new_thread
= &new->thread
;
483 old_thread
= ¤t
->thread
;
485 #if defined(CONFIG_PPC_BOOK3E_64)
486 /* XXX Current Book3E code doesn't deal with kernel side DBCR0,
487 * we always hold the user values, so we set it now.
489 * However, we ensure the kernel MSR:DE is appropriately cleared too
490 * to avoid spurrious single step exceptions in the kernel.
492 * This will have to change to merge with the ppc32 code at some point,
493 * but I don't like much what ppc32 is doing today so there's some
494 * thinking needed there
496 if ((new_thread
->dbcr0
| old_thread
->dbcr0
) & DBCR0_IDM
) {
499 mtmsr(mfmsr() & ~MSR_DE
);
501 dbcr0
= mfspr(SPRN_DBCR0
);
502 dbcr0
= (dbcr0
& DBCR0_EDM
) | new_thread
->dbcr0
;
503 mtspr(SPRN_DBCR0
, dbcr0
);
505 #endif /* CONFIG_PPC64_BOOK3E */
509 * Collect processor utilization data per process
511 if (firmware_has_feature(FW_FEATURE_SPLPAR
)) {
512 struct cpu_usage
*cu
= &__get_cpu_var(cpu_usage_array
);
513 long unsigned start_tb
, current_tb
;
514 start_tb
= old_thread
->start_tb
;
515 cu
->current_tb
= current_tb
= mfspr(SPRN_PURR
);
516 old_thread
->accum_tb
+= (current_tb
- start_tb
);
517 new_thread
->start_tb
= current_tb
;
519 #endif /* CONFIG_PPC64 */
521 #ifdef CONFIG_PPC_BOOK3S_64
522 batch
= &__get_cpu_var(ppc64_tlb_batch
);
524 current_thread_info()->local_flags
|= _TLF_LAZY_MMU
;
526 __flush_tlb_pending(batch
);
529 #endif /* CONFIG_PPC_BOOK3S_64 */
531 local_irq_save(flags
);
533 account_system_vtime(current
);
534 account_process_vtime(current
);
537 * We can't take a PMU exception inside _switch() since there is a
538 * window where the kernel stack SLB and the kernel stack are out
539 * of sync. Hard disable here.
542 last
= _switch(old_thread
, new_thread
);
544 #ifdef CONFIG_PPC_BOOK3S_64
545 if (current_thread_info()->local_flags
& _TLF_LAZY_MMU
) {
546 current_thread_info()->local_flags
&= ~_TLF_LAZY_MMU
;
547 batch
= &__get_cpu_var(ppc64_tlb_batch
);
550 #endif /* CONFIG_PPC_BOOK3S_64 */
552 local_irq_restore(flags
);
557 static int instructions_to_print
= 16;
559 static void show_instructions(struct pt_regs
*regs
)
562 unsigned long pc
= regs
->nip
- (instructions_to_print
* 3 / 4 *
565 printk("Instruction dump:");
567 for (i
= 0; i
< instructions_to_print
; i
++) {
573 #if !defined(CONFIG_BOOKE)
574 /* If executing with the IMMU off, adjust pc rather
575 * than print XXXXXXXX.
577 if (!(regs
->msr
& MSR_IR
))
578 pc
= (unsigned long)phys_to_virt(pc
);
581 /* We use __get_user here *only* to avoid an OOPS on a
582 * bad address because the pc *should* only be a
585 if (!__kernel_text_address(pc
) ||
586 __get_user(instr
, (unsigned int __user
*)pc
)) {
590 printk("<%08x> ", instr
);
592 printk("%08x ", instr
);
601 static struct regbit
{
618 static void printbits(unsigned long val
, struct regbit
*bits
)
620 const char *sep
= "";
623 for (; bits
->bit
; ++bits
)
624 if (val
& bits
->bit
) {
625 printk("%s%s", sep
, bits
->name
);
633 #define REGS_PER_LINE 4
634 #define LAST_VOLATILE 13
637 #define REGS_PER_LINE 8
638 #define LAST_VOLATILE 12
641 void show_regs(struct pt_regs
* regs
)
645 printk("NIP: "REG
" LR: "REG
" CTR: "REG
"\n",
646 regs
->nip
, regs
->link
, regs
->ctr
);
647 printk("REGS: %p TRAP: %04lx %s (%s)\n",
648 regs
, regs
->trap
, print_tainted(), init_utsname()->release
);
649 printk("MSR: "REG
" ", regs
->msr
);
650 printbits(regs
->msr
, msr_bits
);
651 printk(" CR: %08lx XER: %08lx\n", regs
->ccr
, regs
->xer
);
653 if (trap
== 0x300 || trap
== 0x600)
654 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
655 printk("DEAR: "REG
", ESR: "REG
"\n", regs
->dar
, regs
->dsisr
);
657 printk("DAR: "REG
", DSISR: %08lx\n", regs
->dar
, regs
->dsisr
);
659 printk("TASK = %p[%d] '%s' THREAD: %p",
660 current
, task_pid_nr(current
), current
->comm
, task_thread_info(current
));
663 printk(" CPU: %d", raw_smp_processor_id());
664 #endif /* CONFIG_SMP */
666 for (i
= 0; i
< 32; i
++) {
667 if ((i
% REGS_PER_LINE
) == 0)
668 printk("\nGPR%02d: ", i
);
669 printk(REG
" ", regs
->gpr
[i
]);
670 if (i
== LAST_VOLATILE
&& !FULL_REGS(regs
))
674 #ifdef CONFIG_KALLSYMS
676 * Lookup NIP late so we have the best change of getting the
677 * above info out without failing
679 printk("NIP ["REG
"] %pS\n", regs
->nip
, (void *)regs
->nip
);
680 printk("LR ["REG
"] %pS\n", regs
->link
, (void *)regs
->link
);
682 show_stack(current
, (unsigned long *) regs
->gpr
[1]);
683 if (!user_mode(regs
))
684 show_instructions(regs
);
687 void exit_thread(void)
689 discard_lazy_cpu_state();
692 void flush_thread(void)
694 discard_lazy_cpu_state();
696 #ifdef CONFIG_HAVE_HW_BREAKPOINT
697 flush_ptrace_hw_breakpoint(current
);
698 #else /* CONFIG_HAVE_HW_BREAKPOINT */
699 set_debug_reg_defaults(¤t
->thread
);
700 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
704 release_thread(struct task_struct
*t
)
709 * This gets called before we allocate a new thread and copy
710 * the current task into it.
712 void prepare_to_copy(struct task_struct
*tsk
)
714 flush_fp_to_thread(current
);
715 flush_altivec_to_thread(current
);
716 flush_vsx_to_thread(current
);
717 flush_spe_to_thread(current
);
718 #ifdef CONFIG_HAVE_HW_BREAKPOINT
719 flush_ptrace_hw_breakpoint(tsk
);
720 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
726 extern unsigned long dscr_default
; /* defined in arch/powerpc/kernel/sysfs.c */
728 int copy_thread(unsigned long clone_flags
, unsigned long usp
,
729 unsigned long unused
, struct task_struct
*p
,
730 struct pt_regs
*regs
)
732 struct pt_regs
*childregs
, *kregs
;
733 extern void ret_from_fork(void);
734 unsigned long sp
= (unsigned long)task_stack_page(p
) + THREAD_SIZE
;
736 CHECK_FULL_REGS(regs
);
738 sp
-= sizeof(struct pt_regs
);
739 childregs
= (struct pt_regs
*) sp
;
741 if ((childregs
->msr
& MSR_PR
) == 0) {
742 /* for kernel thread, set `current' and stackptr in new task */
743 childregs
->gpr
[1] = sp
+ sizeof(struct pt_regs
);
745 childregs
->gpr
[2] = (unsigned long) p
;
747 clear_tsk_thread_flag(p
, TIF_32BIT
);
749 p
->thread
.regs
= NULL
; /* no user register state */
751 childregs
->gpr
[1] = usp
;
752 p
->thread
.regs
= childregs
;
753 if (clone_flags
& CLONE_SETTLS
) {
755 if (!is_32bit_task())
756 childregs
->gpr
[13] = childregs
->gpr
[6];
759 childregs
->gpr
[2] = childregs
->gpr
[6];
762 childregs
->gpr
[3] = 0; /* Result from fork() */
763 sp
-= STACK_FRAME_OVERHEAD
;
766 * The way this works is that at some point in the future
767 * some task will call _switch to switch to the new task.
768 * That will pop off the stack frame created below and start
769 * the new task running at ret_from_fork. The new task will
770 * do some house keeping and then return from the fork or clone
771 * system call, using the stack frame created above.
773 sp
-= sizeof(struct pt_regs
);
774 kregs
= (struct pt_regs
*) sp
;
775 sp
-= STACK_FRAME_OVERHEAD
;
777 p
->thread
.ksp_limit
= (unsigned long)task_stack_page(p
) +
778 _ALIGN_UP(sizeof(struct thread_info
), 16);
780 #ifdef CONFIG_PPC_STD_MMU_64
781 if (mmu_has_feature(MMU_FTR_SLB
)) {
782 unsigned long sp_vsid
;
783 unsigned long llp
= mmu_psize_defs
[mmu_linear_psize
].sllp
;
785 if (mmu_has_feature(MMU_FTR_1T_SEGMENT
))
786 sp_vsid
= get_kernel_vsid(sp
, MMU_SEGSIZE_1T
)
787 << SLB_VSID_SHIFT_1T
;
789 sp_vsid
= get_kernel_vsid(sp
, MMU_SEGSIZE_256M
)
791 sp_vsid
|= SLB_VSID_KERNEL
| llp
;
792 p
->thread
.ksp_vsid
= sp_vsid
;
794 #endif /* CONFIG_PPC_STD_MMU_64 */
796 if (cpu_has_feature(CPU_FTR_DSCR
)) {
797 if (current
->thread
.dscr_inherit
) {
798 p
->thread
.dscr_inherit
= 1;
799 p
->thread
.dscr
= current
->thread
.dscr
;
800 } else if (0 != dscr_default
) {
801 p
->thread
.dscr_inherit
= 1;
802 p
->thread
.dscr
= dscr_default
;
804 p
->thread
.dscr_inherit
= 0;
811 * The PPC64 ABI makes use of a TOC to contain function
812 * pointers. The function (ret_from_except) is actually a pointer
813 * to the TOC entry. The first entry is a pointer to the actual
817 kregs
->nip
= *((unsigned long *)ret_from_fork
);
819 kregs
->nip
= (unsigned long)ret_from_fork
;
826 * Set up a thread for executing a new program
828 void start_thread(struct pt_regs
*regs
, unsigned long start
, unsigned long sp
)
831 unsigned long load_addr
= regs
->gpr
[2]; /* saved by ELF_PLAT_INIT */
837 * If we exec out of a kernel thread then thread.regs will not be
840 if (!current
->thread
.regs
) {
841 struct pt_regs
*regs
= task_stack_page(current
) + THREAD_SIZE
;
842 current
->thread
.regs
= regs
- 1;
845 memset(regs
->gpr
, 0, sizeof(regs
->gpr
));
853 * We have just cleared all the nonvolatile GPRs, so make
854 * FULL_REGS(regs) return true. This is necessary to allow
855 * ptrace to examine the thread immediately after exec.
862 regs
->msr
= MSR_USER
;
864 if (!is_32bit_task()) {
865 unsigned long entry
, toc
;
867 /* start is a relocated pointer to the function descriptor for
868 * the elf _start routine. The first entry in the function
869 * descriptor is the entry address of _start and the second
870 * entry is the TOC value we need to use.
872 __get_user(entry
, (unsigned long __user
*)start
);
873 __get_user(toc
, (unsigned long __user
*)start
+1);
875 /* Check whether the e_entry function descriptor entries
876 * need to be relocated before we can use them.
878 if (load_addr
!= 0) {
884 regs
->msr
= MSR_USER64
;
888 regs
->msr
= MSR_USER32
;
892 discard_lazy_cpu_state();
894 current
->thread
.used_vsr
= 0;
896 memset(current
->thread
.fpr
, 0, sizeof(current
->thread
.fpr
));
897 current
->thread
.fpscr
.val
= 0;
898 #ifdef CONFIG_ALTIVEC
899 memset(current
->thread
.vr
, 0, sizeof(current
->thread
.vr
));
900 memset(¤t
->thread
.vscr
, 0, sizeof(current
->thread
.vscr
));
901 current
->thread
.vscr
.u
[3] = 0x00010000; /* Java mode disabled */
902 current
->thread
.vrsave
= 0;
903 current
->thread
.used_vr
= 0;
904 #endif /* CONFIG_ALTIVEC */
906 memset(current
->thread
.evr
, 0, sizeof(current
->thread
.evr
));
907 current
->thread
.acc
= 0;
908 current
->thread
.spefscr
= 0;
909 current
->thread
.used_spe
= 0;
910 #endif /* CONFIG_SPE */
913 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
914 | PR_FP_EXC_RES | PR_FP_EXC_INV)
916 int set_fpexc_mode(struct task_struct
*tsk
, unsigned int val
)
918 struct pt_regs
*regs
= tsk
->thread
.regs
;
920 /* This is a bit hairy. If we are an SPE enabled processor
921 * (have embedded fp) we store the IEEE exception enable flags in
922 * fpexc_mode. fpexc_mode is also used for setting FP exception
923 * mode (asyn, precise, disabled) for 'Classic' FP. */
924 if (val
& PR_FP_EXC_SW_ENABLE
) {
926 if (cpu_has_feature(CPU_FTR_SPE
)) {
927 tsk
->thread
.fpexc_mode
= val
&
928 (PR_FP_EXC_SW_ENABLE
| PR_FP_ALL_EXCEPT
);
938 /* on a CONFIG_SPE this does not hurt us. The bits that
939 * __pack_fe01 use do not overlap with bits used for
940 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
941 * on CONFIG_SPE implementations are reserved so writing to
942 * them does not change anything */
943 if (val
> PR_FP_EXC_PRECISE
)
945 tsk
->thread
.fpexc_mode
= __pack_fe01(val
);
946 if (regs
!= NULL
&& (regs
->msr
& MSR_FP
) != 0)
947 regs
->msr
= (regs
->msr
& ~(MSR_FE0
|MSR_FE1
))
948 | tsk
->thread
.fpexc_mode
;
952 int get_fpexc_mode(struct task_struct
*tsk
, unsigned long adr
)
956 if (tsk
->thread
.fpexc_mode
& PR_FP_EXC_SW_ENABLE
)
958 if (cpu_has_feature(CPU_FTR_SPE
))
959 val
= tsk
->thread
.fpexc_mode
;
966 val
= __unpack_fe01(tsk
->thread
.fpexc_mode
);
967 return put_user(val
, (unsigned int __user
*) adr
);
970 int set_endian(struct task_struct
*tsk
, unsigned int val
)
972 struct pt_regs
*regs
= tsk
->thread
.regs
;
974 if ((val
== PR_ENDIAN_LITTLE
&& !cpu_has_feature(CPU_FTR_REAL_LE
)) ||
975 (val
== PR_ENDIAN_PPC_LITTLE
&& !cpu_has_feature(CPU_FTR_PPC_LE
)))
981 if (val
== PR_ENDIAN_BIG
)
982 regs
->msr
&= ~MSR_LE
;
983 else if (val
== PR_ENDIAN_LITTLE
|| val
== PR_ENDIAN_PPC_LITTLE
)
991 int get_endian(struct task_struct
*tsk
, unsigned long adr
)
993 struct pt_regs
*regs
= tsk
->thread
.regs
;
996 if (!cpu_has_feature(CPU_FTR_PPC_LE
) &&
997 !cpu_has_feature(CPU_FTR_REAL_LE
))
1003 if (regs
->msr
& MSR_LE
) {
1004 if (cpu_has_feature(CPU_FTR_REAL_LE
))
1005 val
= PR_ENDIAN_LITTLE
;
1007 val
= PR_ENDIAN_PPC_LITTLE
;
1009 val
= PR_ENDIAN_BIG
;
1011 return put_user(val
, (unsigned int __user
*)adr
);
1014 int set_unalign_ctl(struct task_struct
*tsk
, unsigned int val
)
1016 tsk
->thread
.align_ctl
= val
;
1020 int get_unalign_ctl(struct task_struct
*tsk
, unsigned long adr
)
1022 return put_user(tsk
->thread
.align_ctl
, (unsigned int __user
*)adr
);
1025 #define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
1027 int sys_clone(unsigned long clone_flags
, unsigned long usp
,
1028 int __user
*parent_tidp
, void __user
*child_threadptr
,
1029 int __user
*child_tidp
, int p6
,
1030 struct pt_regs
*regs
)
1032 CHECK_FULL_REGS(regs
);
1034 usp
= regs
->gpr
[1]; /* stack pointer for child */
1036 if (is_32bit_task()) {
1037 parent_tidp
= TRUNC_PTR(parent_tidp
);
1038 child_tidp
= TRUNC_PTR(child_tidp
);
1041 return do_fork(clone_flags
, usp
, regs
, 0, parent_tidp
, child_tidp
);
1044 int sys_fork(unsigned long p1
, unsigned long p2
, unsigned long p3
,
1045 unsigned long p4
, unsigned long p5
, unsigned long p6
,
1046 struct pt_regs
*regs
)
1048 CHECK_FULL_REGS(regs
);
1049 return do_fork(SIGCHLD
, regs
->gpr
[1], regs
, 0, NULL
, NULL
);
1052 int sys_vfork(unsigned long p1
, unsigned long p2
, unsigned long p3
,
1053 unsigned long p4
, unsigned long p5
, unsigned long p6
,
1054 struct pt_regs
*regs
)
1056 CHECK_FULL_REGS(regs
);
1057 return do_fork(CLONE_VFORK
| CLONE_VM
| SIGCHLD
, regs
->gpr
[1],
1058 regs
, 0, NULL
, NULL
);
1061 int sys_execve(unsigned long a0
, unsigned long a1
, unsigned long a2
,
1062 unsigned long a3
, unsigned long a4
, unsigned long a5
,
1063 struct pt_regs
*regs
)
1068 filename
= getname((const char __user
*) a0
);
1069 error
= PTR_ERR(filename
);
1070 if (IS_ERR(filename
))
1072 flush_fp_to_thread(current
);
1073 flush_altivec_to_thread(current
);
1074 flush_spe_to_thread(current
);
1075 error
= do_execve(filename
,
1076 (const char __user
*const __user
*) a1
,
1077 (const char __user
*const __user
*) a2
, regs
);
1083 static inline int valid_irq_stack(unsigned long sp
, struct task_struct
*p
,
1084 unsigned long nbytes
)
1086 unsigned long stack_page
;
1087 unsigned long cpu
= task_cpu(p
);
1090 * Avoid crashing if the stack has overflowed and corrupted
1091 * task_cpu(p), which is in the thread_info struct.
1093 if (cpu
< NR_CPUS
&& cpu_possible(cpu
)) {
1094 stack_page
= (unsigned long) hardirq_ctx
[cpu
];
1095 if (sp
>= stack_page
+ sizeof(struct thread_struct
)
1096 && sp
<= stack_page
+ THREAD_SIZE
- nbytes
)
1099 stack_page
= (unsigned long) softirq_ctx
[cpu
];
1100 if (sp
>= stack_page
+ sizeof(struct thread_struct
)
1101 && sp
<= stack_page
+ THREAD_SIZE
- nbytes
)
1107 int validate_sp(unsigned long sp
, struct task_struct
*p
,
1108 unsigned long nbytes
)
1110 unsigned long stack_page
= (unsigned long)task_stack_page(p
);
1112 if (sp
>= stack_page
+ sizeof(struct thread_struct
)
1113 && sp
<= stack_page
+ THREAD_SIZE
- nbytes
)
1116 return valid_irq_stack(sp
, p
, nbytes
);
1119 EXPORT_SYMBOL(validate_sp
);
1121 unsigned long get_wchan(struct task_struct
*p
)
1123 unsigned long ip
, sp
;
1126 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
1130 if (!validate_sp(sp
, p
, STACK_FRAME_OVERHEAD
))
1134 sp
= *(unsigned long *)sp
;
1135 if (!validate_sp(sp
, p
, STACK_FRAME_OVERHEAD
))
1138 ip
= ((unsigned long *)sp
)[STACK_FRAME_LR_SAVE
];
1139 if (!in_sched_functions(ip
))
1142 } while (count
++ < 16);
1146 static int kstack_depth_to_print
= CONFIG_PRINT_STACK_DEPTH
;
1148 void show_stack(struct task_struct
*tsk
, unsigned long *stack
)
1150 unsigned long sp
, ip
, lr
, newsp
;
1153 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1154 int curr_frame
= current
->curr_ret_stack
;
1155 extern void return_to_handler(void);
1156 unsigned long rth
= (unsigned long)return_to_handler
;
1157 unsigned long mrth
= -1;
1159 extern void mod_return_to_handler(void);
1160 rth
= *(unsigned long *)rth
;
1161 mrth
= (unsigned long)mod_return_to_handler
;
1162 mrth
= *(unsigned long *)mrth
;
1166 sp
= (unsigned long) stack
;
1171 asm("mr %0,1" : "=r" (sp
));
1173 sp
= tsk
->thread
.ksp
;
1177 printk("Call Trace:\n");
1179 if (!validate_sp(sp
, tsk
, STACK_FRAME_OVERHEAD
))
1182 stack
= (unsigned long *) sp
;
1184 ip
= stack
[STACK_FRAME_LR_SAVE
];
1185 if (!firstframe
|| ip
!= lr
) {
1186 printk("["REG
"] ["REG
"] %pS", sp
, ip
, (void *)ip
);
1187 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1188 if ((ip
== rth
|| ip
== mrth
) && curr_frame
>= 0) {
1190 (void *)current
->ret_stack
[curr_frame
].ret
);
1195 printk(" (unreliable)");
1201 * See if this is an exception frame.
1202 * We look for the "regshere" marker in the current frame.
1204 if (validate_sp(sp
, tsk
, STACK_INT_FRAME_SIZE
)
1205 && stack
[STACK_FRAME_MARKER
] == STACK_FRAME_REGS_MARKER
) {
1206 struct pt_regs
*regs
= (struct pt_regs
*)
1207 (sp
+ STACK_FRAME_OVERHEAD
);
1209 printk("--- Exception: %lx at %pS\n LR = %pS\n",
1210 regs
->trap
, (void *)regs
->nip
, (void *)lr
);
1215 } while (count
++ < kstack_depth_to_print
);
1218 void dump_stack(void)
1220 show_stack(current
, NULL
);
1222 EXPORT_SYMBOL(dump_stack
);
1225 void ppc64_runlatch_on(void)
1229 if (cpu_has_feature(CPU_FTR_CTRL
) && !test_thread_flag(TIF_RUNLATCH
)) {
1232 ctrl
= mfspr(SPRN_CTRLF
);
1233 ctrl
|= CTRL_RUNLATCH
;
1234 mtspr(SPRN_CTRLT
, ctrl
);
1236 set_thread_flag(TIF_RUNLATCH
);
1240 void __ppc64_runlatch_off(void)
1246 clear_thread_flag(TIF_RUNLATCH
);
1248 ctrl
= mfspr(SPRN_CTRLF
);
1249 ctrl
&= ~CTRL_RUNLATCH
;
1250 mtspr(SPRN_CTRLT
, ctrl
);
1254 #if THREAD_SHIFT < PAGE_SHIFT
1256 static struct kmem_cache
*thread_info_cache
;
1258 struct thread_info
*alloc_thread_info_node(struct task_struct
*tsk
, int node
)
1260 struct thread_info
*ti
;
1262 ti
= kmem_cache_alloc_node(thread_info_cache
, GFP_KERNEL
, node
);
1263 if (unlikely(ti
== NULL
))
1265 #ifdef CONFIG_DEBUG_STACK_USAGE
1266 memset(ti
, 0, THREAD_SIZE
);
1271 void free_thread_info(struct thread_info
*ti
)
1273 kmem_cache_free(thread_info_cache
, ti
);
1276 void thread_info_cache_init(void)
1278 thread_info_cache
= kmem_cache_create("thread_info", THREAD_SIZE
,
1279 THREAD_SIZE
, 0, NULL
);
1280 BUG_ON(thread_info_cache
== NULL
);
1283 #endif /* THREAD_SHIFT < PAGE_SHIFT */
1285 unsigned long arch_align_stack(unsigned long sp
)
1287 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
1288 sp
-= get_random_int() & ~PAGE_MASK
;
1292 static inline unsigned long brk_rnd(void)
1294 unsigned long rnd
= 0;
1296 /* 8MB for 32bit, 1GB for 64bit */
1297 if (is_32bit_task())
1298 rnd
= (long)(get_random_int() % (1<<(23-PAGE_SHIFT
)));
1300 rnd
= (long)(get_random_int() % (1<<(30-PAGE_SHIFT
)));
1302 return rnd
<< PAGE_SHIFT
;
1305 unsigned long arch_randomize_brk(struct mm_struct
*mm
)
1307 unsigned long base
= mm
->brk
;
1310 #ifdef CONFIG_PPC_STD_MMU_64
1312 * If we are using 1TB segments and we are allowed to randomise
1313 * the heap, we can put it above 1TB so it is backed by a 1TB
1314 * segment. Otherwise the heap will be in the bottom 1TB
1315 * which always uses 256MB segments and this may result in a
1316 * performance penalty.
1318 if (!is_32bit_task() && (mmu_highuser_ssize
== MMU_SEGSIZE_1T
))
1319 base
= max_t(unsigned long, mm
->brk
, 1UL << SID_SHIFT_1T
);
1322 ret
= PAGE_ALIGN(base
+ brk_rnd());
1330 unsigned long randomize_et_dyn(unsigned long base
)
1332 unsigned long ret
= PAGE_ALIGN(base
+ brk_rnd());