rtnetlink: check DO_SETLINK_NOTIFY correctly in do_setlink
[linux/fpc-iii.git] / arch / powerpc / kernel / process.c
bloba0c74bbf345426bc9918860a203b8ab3427a3390
1 /*
2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
8 * PowerPC version
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/smp.h>
25 #include <linux/stddef.h>
26 #include <linux/unistd.h>
27 #include <linux/ptrace.h>
28 #include <linux/slab.h>
29 #include <linux/user.h>
30 #include <linux/elf.h>
31 #include <linux/prctl.h>
32 #include <linux/init_task.h>
33 #include <linux/export.h>
34 #include <linux/kallsyms.h>
35 #include <linux/mqueue.h>
36 #include <linux/hardirq.h>
37 #include <linux/utsname.h>
38 #include <linux/ftrace.h>
39 #include <linux/kernel_stat.h>
40 #include <linux/personality.h>
41 #include <linux/random.h>
42 #include <linux/hw_breakpoint.h>
43 #include <linux/uaccess.h>
44 #include <linux/elf-randomize.h>
46 #include <asm/pgtable.h>
47 #include <asm/io.h>
48 #include <asm/processor.h>
49 #include <asm/mmu.h>
50 #include <asm/prom.h>
51 #include <asm/machdep.h>
52 #include <asm/time.h>
53 #include <asm/runlatch.h>
54 #include <asm/syscalls.h>
55 #include <asm/switch_to.h>
56 #include <asm/tm.h>
57 #include <asm/debug.h>
58 #ifdef CONFIG_PPC64
59 #include <asm/firmware.h>
60 #endif
61 #include <asm/code-patching.h>
62 #include <asm/exec.h>
63 #include <asm/livepatch.h>
64 #include <asm/cpu_has_feature.h>
65 #include <asm/asm-prototypes.h>
67 #include <linux/kprobes.h>
68 #include <linux/kdebug.h>
70 /* Transactional Memory debug */
71 #ifdef TM_DEBUG_SW
72 #define TM_DEBUG(x...) printk(KERN_INFO x)
73 #else
74 #define TM_DEBUG(x...) do { } while(0)
75 #endif
77 extern unsigned long _get_SP(void);
79 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
80 static void check_if_tm_restore_required(struct task_struct *tsk)
83 * If we are saving the current thread's registers, and the
84 * thread is in a transactional state, set the TIF_RESTORE_TM
85 * bit so that we know to restore the registers before
86 * returning to userspace.
88 if (tsk == current && tsk->thread.regs &&
89 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
90 !test_thread_flag(TIF_RESTORE_TM)) {
91 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
92 set_thread_flag(TIF_RESTORE_TM);
96 static inline bool msr_tm_active(unsigned long msr)
98 return MSR_TM_ACTIVE(msr);
100 #else
101 static inline bool msr_tm_active(unsigned long msr) { return false; }
102 static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
103 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
105 bool strict_msr_control;
106 EXPORT_SYMBOL(strict_msr_control);
108 static int __init enable_strict_msr_control(char *str)
110 strict_msr_control = true;
111 pr_info("Enabling strict facility control\n");
113 return 0;
115 early_param("ppc_strict_facility_enable", enable_strict_msr_control);
117 unsigned long msr_check_and_set(unsigned long bits)
119 unsigned long oldmsr = mfmsr();
120 unsigned long newmsr;
122 newmsr = oldmsr | bits;
124 #ifdef CONFIG_VSX
125 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
126 newmsr |= MSR_VSX;
127 #endif
129 if (oldmsr != newmsr)
130 mtmsr_isync(newmsr);
132 return newmsr;
135 void __msr_check_and_clear(unsigned long bits)
137 unsigned long oldmsr = mfmsr();
138 unsigned long newmsr;
140 newmsr = oldmsr & ~bits;
142 #ifdef CONFIG_VSX
143 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
144 newmsr &= ~MSR_VSX;
145 #endif
147 if (oldmsr != newmsr)
148 mtmsr_isync(newmsr);
150 EXPORT_SYMBOL(__msr_check_and_clear);
152 #ifdef CONFIG_PPC_FPU
153 void __giveup_fpu(struct task_struct *tsk)
155 unsigned long msr;
157 save_fpu(tsk);
158 msr = tsk->thread.regs->msr;
159 msr &= ~MSR_FP;
160 #ifdef CONFIG_VSX
161 if (cpu_has_feature(CPU_FTR_VSX))
162 msr &= ~MSR_VSX;
163 #endif
164 tsk->thread.regs->msr = msr;
167 void giveup_fpu(struct task_struct *tsk)
169 check_if_tm_restore_required(tsk);
171 msr_check_and_set(MSR_FP);
172 __giveup_fpu(tsk);
173 msr_check_and_clear(MSR_FP);
175 EXPORT_SYMBOL(giveup_fpu);
178 * Make sure the floating-point register state in the
179 * the thread_struct is up to date for task tsk.
181 void flush_fp_to_thread(struct task_struct *tsk)
183 if (tsk->thread.regs) {
185 * We need to disable preemption here because if we didn't,
186 * another process could get scheduled after the regs->msr
187 * test but before we have finished saving the FP registers
188 * to the thread_struct. That process could take over the
189 * FPU, and then when we get scheduled again we would store
190 * bogus values for the remaining FP registers.
192 preempt_disable();
193 if (tsk->thread.regs->msr & MSR_FP) {
195 * This should only ever be called for current or
196 * for a stopped child process. Since we save away
197 * the FP register state on context switch,
198 * there is something wrong if a stopped child appears
199 * to still have its FP state in the CPU registers.
201 BUG_ON(tsk != current);
202 giveup_fpu(tsk);
204 preempt_enable();
207 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
209 void enable_kernel_fp(void)
211 unsigned long cpumsr;
213 WARN_ON(preemptible());
215 cpumsr = msr_check_and_set(MSR_FP);
217 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
218 check_if_tm_restore_required(current);
220 * If a thread has already been reclaimed then the
221 * checkpointed registers are on the CPU but have definitely
222 * been saved by the reclaim code. Don't need to and *cannot*
223 * giveup as this would save to the 'live' structure not the
224 * checkpointed structure.
226 if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
227 return;
228 __giveup_fpu(current);
231 EXPORT_SYMBOL(enable_kernel_fp);
233 static int restore_fp(struct task_struct *tsk)
235 if (tsk->thread.load_fp || msr_tm_active(tsk->thread.regs->msr)) {
236 load_fp_state(&current->thread.fp_state);
237 current->thread.load_fp++;
238 return 1;
240 return 0;
242 #else
243 static int restore_fp(struct task_struct *tsk) { return 0; }
244 #endif /* CONFIG_PPC_FPU */
246 #ifdef CONFIG_ALTIVEC
247 #define loadvec(thr) ((thr).load_vec)
249 static void __giveup_altivec(struct task_struct *tsk)
251 unsigned long msr;
253 save_altivec(tsk);
254 msr = tsk->thread.regs->msr;
255 msr &= ~MSR_VEC;
256 #ifdef CONFIG_VSX
257 if (cpu_has_feature(CPU_FTR_VSX))
258 msr &= ~MSR_VSX;
259 #endif
260 tsk->thread.regs->msr = msr;
263 void giveup_altivec(struct task_struct *tsk)
265 check_if_tm_restore_required(tsk);
267 msr_check_and_set(MSR_VEC);
268 __giveup_altivec(tsk);
269 msr_check_and_clear(MSR_VEC);
271 EXPORT_SYMBOL(giveup_altivec);
273 void enable_kernel_altivec(void)
275 unsigned long cpumsr;
277 WARN_ON(preemptible());
279 cpumsr = msr_check_and_set(MSR_VEC);
281 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
282 check_if_tm_restore_required(current);
284 * If a thread has already been reclaimed then the
285 * checkpointed registers are on the CPU but have definitely
286 * been saved by the reclaim code. Don't need to and *cannot*
287 * giveup as this would save to the 'live' structure not the
288 * checkpointed structure.
290 if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
291 return;
292 __giveup_altivec(current);
295 EXPORT_SYMBOL(enable_kernel_altivec);
298 * Make sure the VMX/Altivec register state in the
299 * the thread_struct is up to date for task tsk.
301 void flush_altivec_to_thread(struct task_struct *tsk)
303 if (tsk->thread.regs) {
304 preempt_disable();
305 if (tsk->thread.regs->msr & MSR_VEC) {
306 BUG_ON(tsk != current);
307 giveup_altivec(tsk);
309 preempt_enable();
312 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
314 static int restore_altivec(struct task_struct *tsk)
316 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
317 (tsk->thread.load_vec || msr_tm_active(tsk->thread.regs->msr))) {
318 load_vr_state(&tsk->thread.vr_state);
319 tsk->thread.used_vr = 1;
320 tsk->thread.load_vec++;
322 return 1;
324 return 0;
326 #else
327 #define loadvec(thr) 0
328 static inline int restore_altivec(struct task_struct *tsk) { return 0; }
329 #endif /* CONFIG_ALTIVEC */
331 #ifdef CONFIG_VSX
332 static void __giveup_vsx(struct task_struct *tsk)
334 unsigned long msr = tsk->thread.regs->msr;
337 * We should never be ssetting MSR_VSX without also setting
338 * MSR_FP and MSR_VEC
340 WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
342 /* __giveup_fpu will clear MSR_VSX */
343 if (msr & MSR_FP)
344 __giveup_fpu(tsk);
345 if (msr & MSR_VEC)
346 __giveup_altivec(tsk);
349 static void giveup_vsx(struct task_struct *tsk)
351 check_if_tm_restore_required(tsk);
353 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
354 __giveup_vsx(tsk);
355 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
358 void enable_kernel_vsx(void)
360 unsigned long cpumsr;
362 WARN_ON(preemptible());
364 cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
366 if (current->thread.regs &&
367 (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
368 check_if_tm_restore_required(current);
370 * If a thread has already been reclaimed then the
371 * checkpointed registers are on the CPU but have definitely
372 * been saved by the reclaim code. Don't need to and *cannot*
373 * giveup as this would save to the 'live' structure not the
374 * checkpointed structure.
376 if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
377 return;
378 __giveup_vsx(current);
381 EXPORT_SYMBOL(enable_kernel_vsx);
383 void flush_vsx_to_thread(struct task_struct *tsk)
385 if (tsk->thread.regs) {
386 preempt_disable();
387 if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
388 BUG_ON(tsk != current);
389 giveup_vsx(tsk);
391 preempt_enable();
394 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
396 static int restore_vsx(struct task_struct *tsk)
398 if (cpu_has_feature(CPU_FTR_VSX)) {
399 tsk->thread.used_vsr = 1;
400 return 1;
403 return 0;
405 #else
406 static inline int restore_vsx(struct task_struct *tsk) { return 0; }
407 #endif /* CONFIG_VSX */
409 #ifdef CONFIG_SPE
410 void giveup_spe(struct task_struct *tsk)
412 check_if_tm_restore_required(tsk);
414 msr_check_and_set(MSR_SPE);
415 __giveup_spe(tsk);
416 msr_check_and_clear(MSR_SPE);
418 EXPORT_SYMBOL(giveup_spe);
420 void enable_kernel_spe(void)
422 WARN_ON(preemptible());
424 msr_check_and_set(MSR_SPE);
426 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
427 check_if_tm_restore_required(current);
428 __giveup_spe(current);
431 EXPORT_SYMBOL(enable_kernel_spe);
433 void flush_spe_to_thread(struct task_struct *tsk)
435 if (tsk->thread.regs) {
436 preempt_disable();
437 if (tsk->thread.regs->msr & MSR_SPE) {
438 BUG_ON(tsk != current);
439 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
440 giveup_spe(tsk);
442 preempt_enable();
445 #endif /* CONFIG_SPE */
447 static unsigned long msr_all_available;
449 static int __init init_msr_all_available(void)
451 #ifdef CONFIG_PPC_FPU
452 msr_all_available |= MSR_FP;
453 #endif
454 #ifdef CONFIG_ALTIVEC
455 if (cpu_has_feature(CPU_FTR_ALTIVEC))
456 msr_all_available |= MSR_VEC;
457 #endif
458 #ifdef CONFIG_VSX
459 if (cpu_has_feature(CPU_FTR_VSX))
460 msr_all_available |= MSR_VSX;
461 #endif
462 #ifdef CONFIG_SPE
463 if (cpu_has_feature(CPU_FTR_SPE))
464 msr_all_available |= MSR_SPE;
465 #endif
467 return 0;
469 early_initcall(init_msr_all_available);
471 void giveup_all(struct task_struct *tsk)
473 unsigned long usermsr;
475 if (!tsk->thread.regs)
476 return;
478 usermsr = tsk->thread.regs->msr;
480 if ((usermsr & msr_all_available) == 0)
481 return;
483 msr_check_and_set(msr_all_available);
484 check_if_tm_restore_required(tsk);
486 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
488 #ifdef CONFIG_PPC_FPU
489 if (usermsr & MSR_FP)
490 __giveup_fpu(tsk);
491 #endif
492 #ifdef CONFIG_ALTIVEC
493 if (usermsr & MSR_VEC)
494 __giveup_altivec(tsk);
495 #endif
496 #ifdef CONFIG_SPE
497 if (usermsr & MSR_SPE)
498 __giveup_spe(tsk);
499 #endif
501 msr_check_and_clear(msr_all_available);
503 EXPORT_SYMBOL(giveup_all);
505 void restore_math(struct pt_regs *regs)
507 unsigned long msr;
509 if (!msr_tm_active(regs->msr) &&
510 !current->thread.load_fp && !loadvec(current->thread))
511 return;
513 msr = regs->msr;
514 msr_check_and_set(msr_all_available);
517 * Only reload if the bit is not set in the user MSR, the bit BEING set
518 * indicates that the registers are hot
520 if ((!(msr & MSR_FP)) && restore_fp(current))
521 msr |= MSR_FP | current->thread.fpexc_mode;
523 if ((!(msr & MSR_VEC)) && restore_altivec(current))
524 msr |= MSR_VEC;
526 if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) &&
527 restore_vsx(current)) {
528 msr |= MSR_VSX;
531 msr_check_and_clear(msr_all_available);
533 regs->msr = msr;
536 void save_all(struct task_struct *tsk)
538 unsigned long usermsr;
540 if (!tsk->thread.regs)
541 return;
543 usermsr = tsk->thread.regs->msr;
545 if ((usermsr & msr_all_available) == 0)
546 return;
548 msr_check_and_set(msr_all_available);
550 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
552 if (usermsr & MSR_FP)
553 save_fpu(tsk);
555 if (usermsr & MSR_VEC)
556 save_altivec(tsk);
558 if (usermsr & MSR_SPE)
559 __giveup_spe(tsk);
561 msr_check_and_clear(msr_all_available);
564 void flush_all_to_thread(struct task_struct *tsk)
566 if (tsk->thread.regs) {
567 preempt_disable();
568 BUG_ON(tsk != current);
569 save_all(tsk);
571 #ifdef CONFIG_SPE
572 if (tsk->thread.regs->msr & MSR_SPE)
573 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
574 #endif
576 preempt_enable();
579 EXPORT_SYMBOL(flush_all_to_thread);
581 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
582 void do_send_trap(struct pt_regs *regs, unsigned long address,
583 unsigned long error_code, int signal_code, int breakpt)
585 siginfo_t info;
587 current->thread.trap_nr = signal_code;
588 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
589 11, SIGSEGV) == NOTIFY_STOP)
590 return;
592 /* Deliver the signal to userspace */
593 info.si_signo = SIGTRAP;
594 info.si_errno = breakpt; /* breakpoint or watchpoint id */
595 info.si_code = signal_code;
596 info.si_addr = (void __user *)address;
597 force_sig_info(SIGTRAP, &info, current);
599 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
600 void do_break (struct pt_regs *regs, unsigned long address,
601 unsigned long error_code)
603 siginfo_t info;
605 current->thread.trap_nr = TRAP_HWBKPT;
606 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
607 11, SIGSEGV) == NOTIFY_STOP)
608 return;
610 if (debugger_break_match(regs))
611 return;
613 /* Clear the breakpoint */
614 hw_breakpoint_disable();
616 /* Deliver the signal to userspace */
617 info.si_signo = SIGTRAP;
618 info.si_errno = 0;
619 info.si_code = TRAP_HWBKPT;
620 info.si_addr = (void __user *)address;
621 force_sig_info(SIGTRAP, &info, current);
623 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
625 static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
627 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
629 * Set the debug registers back to their default "safe" values.
631 static void set_debug_reg_defaults(struct thread_struct *thread)
633 thread->debug.iac1 = thread->debug.iac2 = 0;
634 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
635 thread->debug.iac3 = thread->debug.iac4 = 0;
636 #endif
637 thread->debug.dac1 = thread->debug.dac2 = 0;
638 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
639 thread->debug.dvc1 = thread->debug.dvc2 = 0;
640 #endif
641 thread->debug.dbcr0 = 0;
642 #ifdef CONFIG_BOOKE
644 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
646 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
647 DBCR1_IAC3US | DBCR1_IAC4US;
649 * Force Data Address Compare User/Supervisor bits to be User-only
650 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
652 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
653 #else
654 thread->debug.dbcr1 = 0;
655 #endif
658 static void prime_debug_regs(struct debug_reg *debug)
661 * We could have inherited MSR_DE from userspace, since
662 * it doesn't get cleared on exception entry. Make sure
663 * MSR_DE is clear before we enable any debug events.
665 mtmsr(mfmsr() & ~MSR_DE);
667 mtspr(SPRN_IAC1, debug->iac1);
668 mtspr(SPRN_IAC2, debug->iac2);
669 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
670 mtspr(SPRN_IAC3, debug->iac3);
671 mtspr(SPRN_IAC4, debug->iac4);
672 #endif
673 mtspr(SPRN_DAC1, debug->dac1);
674 mtspr(SPRN_DAC2, debug->dac2);
675 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
676 mtspr(SPRN_DVC1, debug->dvc1);
677 mtspr(SPRN_DVC2, debug->dvc2);
678 #endif
679 mtspr(SPRN_DBCR0, debug->dbcr0);
680 mtspr(SPRN_DBCR1, debug->dbcr1);
681 #ifdef CONFIG_BOOKE
682 mtspr(SPRN_DBCR2, debug->dbcr2);
683 #endif
686 * Unless neither the old or new thread are making use of the
687 * debug registers, set the debug registers from the values
688 * stored in the new thread.
690 void switch_booke_debug_regs(struct debug_reg *new_debug)
692 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
693 || (new_debug->dbcr0 & DBCR0_IDM))
694 prime_debug_regs(new_debug);
696 EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
697 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
698 #ifndef CONFIG_HAVE_HW_BREAKPOINT
699 static void set_debug_reg_defaults(struct thread_struct *thread)
701 thread->hw_brk.address = 0;
702 thread->hw_brk.type = 0;
703 set_breakpoint(&thread->hw_brk);
705 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
706 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
708 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
709 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
711 mtspr(SPRN_DAC1, dabr);
712 #ifdef CONFIG_PPC_47x
713 isync();
714 #endif
715 return 0;
717 #elif defined(CONFIG_PPC_BOOK3S)
718 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
720 mtspr(SPRN_DABR, dabr);
721 if (cpu_has_feature(CPU_FTR_DABRX))
722 mtspr(SPRN_DABRX, dabrx);
723 return 0;
725 #elif defined(CONFIG_PPC_8xx)
726 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
728 unsigned long addr = dabr & ~HW_BRK_TYPE_DABR;
729 unsigned long lctrl1 = 0x90000000; /* compare type: equal on E & F */
730 unsigned long lctrl2 = 0x8e000002; /* watchpoint 1 on cmp E | F */
732 if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
733 lctrl1 |= 0xa0000;
734 else if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
735 lctrl1 |= 0xf0000;
736 else if ((dabr & HW_BRK_TYPE_RDWR) == 0)
737 lctrl2 = 0;
739 mtspr(SPRN_LCTRL2, 0);
740 mtspr(SPRN_CMPE, addr);
741 mtspr(SPRN_CMPF, addr + 4);
742 mtspr(SPRN_LCTRL1, lctrl1);
743 mtspr(SPRN_LCTRL2, lctrl2);
745 return 0;
747 #else
748 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
750 return -EINVAL;
752 #endif
754 static inline int set_dabr(struct arch_hw_breakpoint *brk)
756 unsigned long dabr, dabrx;
758 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
759 dabrx = ((brk->type >> 3) & 0x7);
761 if (ppc_md.set_dabr)
762 return ppc_md.set_dabr(dabr, dabrx);
764 return __set_dabr(dabr, dabrx);
767 static inline int set_dawr(struct arch_hw_breakpoint *brk)
769 unsigned long dawr, dawrx, mrd;
771 dawr = brk->address;
773 dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
774 << (63 - 58); //* read/write bits */
775 dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
776 << (63 - 59); //* translate */
777 dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
778 >> 3; //* PRIM bits */
779 /* dawr length is stored in field MDR bits 48:53. Matches range in
780 doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
781 0b111111=64DW.
782 brk->len is in bytes.
783 This aligns up to double word size, shifts and does the bias.
785 mrd = ((brk->len + 7) >> 3) - 1;
786 dawrx |= (mrd & 0x3f) << (63 - 53);
788 if (ppc_md.set_dawr)
789 return ppc_md.set_dawr(dawr, dawrx);
790 mtspr(SPRN_DAWR, dawr);
791 mtspr(SPRN_DAWRX, dawrx);
792 return 0;
795 void __set_breakpoint(struct arch_hw_breakpoint *brk)
797 memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk));
799 if (cpu_has_feature(CPU_FTR_DAWR))
800 set_dawr(brk);
801 else
802 set_dabr(brk);
805 void set_breakpoint(struct arch_hw_breakpoint *brk)
807 preempt_disable();
808 __set_breakpoint(brk);
809 preempt_enable();
812 #ifdef CONFIG_PPC64
813 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
814 #endif
816 static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
817 struct arch_hw_breakpoint *b)
819 if (a->address != b->address)
820 return false;
821 if (a->type != b->type)
822 return false;
823 if (a->len != b->len)
824 return false;
825 return true;
828 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
830 static inline bool tm_enabled(struct task_struct *tsk)
832 return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
835 static void tm_reclaim_thread(struct thread_struct *thr,
836 struct thread_info *ti, uint8_t cause)
839 * Use the current MSR TM suspended bit to track if we have
840 * checkpointed state outstanding.
841 * On signal delivery, we'd normally reclaim the checkpointed
842 * state to obtain stack pointer (see:get_tm_stackpointer()).
843 * This will then directly return to userspace without going
844 * through __switch_to(). However, if the stack frame is bad,
845 * we need to exit this thread which calls __switch_to() which
846 * will again attempt to reclaim the already saved tm state.
847 * Hence we need to check that we've not already reclaimed
848 * this state.
849 * We do this using the current MSR, rather tracking it in
850 * some specific thread_struct bit, as it has the additional
851 * benefit of checking for a potential TM bad thing exception.
853 if (!MSR_TM_SUSPENDED(mfmsr()))
854 return;
857 * If we are in a transaction and FP is off then we can't have
858 * used FP inside that transaction. Hence the checkpointed
859 * state is the same as the live state. We need to copy the
860 * live state to the checkpointed state so that when the
861 * transaction is restored, the checkpointed state is correct
862 * and the aborted transaction sees the correct state. We use
863 * ckpt_regs.msr here as that's what tm_reclaim will use to
864 * determine if it's going to write the checkpointed state or
865 * not. So either this will write the checkpointed registers,
866 * or reclaim will. Similarly for VMX.
868 if ((thr->ckpt_regs.msr & MSR_FP) == 0)
869 memcpy(&thr->ckfp_state, &thr->fp_state,
870 sizeof(struct thread_fp_state));
871 if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
872 memcpy(&thr->ckvr_state, &thr->vr_state,
873 sizeof(struct thread_vr_state));
875 giveup_all(container_of(thr, struct task_struct, thread));
877 tm_reclaim(thr, thr->ckpt_regs.msr, cause);
880 void tm_reclaim_current(uint8_t cause)
882 tm_enable();
883 tm_reclaim_thread(&current->thread, current_thread_info(), cause);
886 static inline void tm_reclaim_task(struct task_struct *tsk)
888 /* We have to work out if we're switching from/to a task that's in the
889 * middle of a transaction.
891 * In switching we need to maintain a 2nd register state as
892 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
893 * checkpointed (tbegin) state in ckpt_regs, ckfp_state and
894 * ckvr_state
896 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
898 struct thread_struct *thr = &tsk->thread;
900 if (!thr->regs)
901 return;
903 if (!MSR_TM_ACTIVE(thr->regs->msr))
904 goto out_and_saveregs;
906 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
907 "ccr=%lx, msr=%lx, trap=%lx)\n",
908 tsk->pid, thr->regs->nip,
909 thr->regs->ccr, thr->regs->msr,
910 thr->regs->trap);
912 tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED);
914 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
915 tsk->pid);
917 out_and_saveregs:
918 /* Always save the regs here, even if a transaction's not active.
919 * This context-switches a thread's TM info SPRs. We do it here to
920 * be consistent with the restore path (in recheckpoint) which
921 * cannot happen later in _switch().
923 tm_save_sprs(thr);
926 extern void __tm_recheckpoint(struct thread_struct *thread,
927 unsigned long orig_msr);
929 void tm_recheckpoint(struct thread_struct *thread,
930 unsigned long orig_msr)
932 unsigned long flags;
934 if (!(thread->regs->msr & MSR_TM))
935 return;
937 /* We really can't be interrupted here as the TEXASR registers can't
938 * change and later in the trecheckpoint code, we have a userspace R1.
939 * So let's hard disable over this region.
941 local_irq_save(flags);
942 hard_irq_disable();
944 /* The TM SPRs are restored here, so that TEXASR.FS can be set
945 * before the trecheckpoint and no explosion occurs.
947 tm_restore_sprs(thread);
949 __tm_recheckpoint(thread, orig_msr);
951 local_irq_restore(flags);
954 static inline void tm_recheckpoint_new_task(struct task_struct *new)
956 unsigned long msr;
958 if (!cpu_has_feature(CPU_FTR_TM))
959 return;
961 /* Recheckpoint the registers of the thread we're about to switch to.
963 * If the task was using FP, we non-lazily reload both the original and
964 * the speculative FP register states. This is because the kernel
965 * doesn't see if/when a TM rollback occurs, so if we take an FP
966 * unavailable later, we are unable to determine which set of FP regs
967 * need to be restored.
969 if (!tm_enabled(new))
970 return;
972 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
973 tm_restore_sprs(&new->thread);
974 return;
976 msr = new->thread.ckpt_regs.msr;
977 /* Recheckpoint to restore original checkpointed register state. */
978 TM_DEBUG("*** tm_recheckpoint of pid %d "
979 "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
980 new->pid, new->thread.regs->msr, msr);
982 tm_recheckpoint(&new->thread, msr);
985 * The checkpointed state has been restored but the live state has
986 * not, ensure all the math functionality is turned off to trigger
987 * restore_math() to reload.
989 new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
991 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
992 "(kernel msr 0x%lx)\n",
993 new->pid, mfmsr());
996 static inline void __switch_to_tm(struct task_struct *prev,
997 struct task_struct *new)
999 if (cpu_has_feature(CPU_FTR_TM)) {
1000 if (tm_enabled(prev) || tm_enabled(new))
1001 tm_enable();
1003 if (tm_enabled(prev)) {
1004 prev->thread.load_tm++;
1005 tm_reclaim_task(prev);
1006 if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
1007 prev->thread.regs->msr &= ~MSR_TM;
1010 tm_recheckpoint_new_task(new);
1015 * This is called if we are on the way out to userspace and the
1016 * TIF_RESTORE_TM flag is set. It checks if we need to reload
1017 * FP and/or vector state and does so if necessary.
1018 * If userspace is inside a transaction (whether active or
1019 * suspended) and FP/VMX/VSX instructions have ever been enabled
1020 * inside that transaction, then we have to keep them enabled
1021 * and keep the FP/VMX/VSX state loaded while ever the transaction
1022 * continues. The reason is that if we didn't, and subsequently
1023 * got a FP/VMX/VSX unavailable interrupt inside a transaction,
1024 * we don't know whether it's the same transaction, and thus we
1025 * don't know which of the checkpointed state and the transactional
1026 * state to use.
1028 void restore_tm_state(struct pt_regs *regs)
1030 unsigned long msr_diff;
1033 * This is the only moment we should clear TIF_RESTORE_TM as
1034 * it is here that ckpt_regs.msr and pt_regs.msr become the same
1035 * again, anything else could lead to an incorrect ckpt_msr being
1036 * saved and therefore incorrect signal contexts.
1038 clear_thread_flag(TIF_RESTORE_TM);
1039 if (!MSR_TM_ACTIVE(regs->msr))
1040 return;
1042 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
1043 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
1045 /* Ensure that restore_math() will restore */
1046 if (msr_diff & MSR_FP)
1047 current->thread.load_fp = 1;
1048 #ifdef CONFIG_ALTIVEC
1049 if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
1050 current->thread.load_vec = 1;
1051 #endif
1052 restore_math(regs);
1054 regs->msr |= msr_diff;
1057 #else
1058 #define tm_recheckpoint_new_task(new)
1059 #define __switch_to_tm(prev, new)
1060 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1062 static inline void save_sprs(struct thread_struct *t)
1064 #ifdef CONFIG_ALTIVEC
1065 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1066 t->vrsave = mfspr(SPRN_VRSAVE);
1067 #endif
1068 #ifdef CONFIG_PPC_BOOK3S_64
1069 if (cpu_has_feature(CPU_FTR_DSCR))
1070 t->dscr = mfspr(SPRN_DSCR);
1072 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1073 t->bescr = mfspr(SPRN_BESCR);
1074 t->ebbhr = mfspr(SPRN_EBBHR);
1075 t->ebbrr = mfspr(SPRN_EBBRR);
1077 t->fscr = mfspr(SPRN_FSCR);
1080 * Note that the TAR is not available for use in the kernel.
1081 * (To provide this, the TAR should be backed up/restored on
1082 * exception entry/exit instead, and be in pt_regs. FIXME,
1083 * this should be in pt_regs anyway (for debug).)
1085 t->tar = mfspr(SPRN_TAR);
1087 #endif
1090 static inline void restore_sprs(struct thread_struct *old_thread,
1091 struct thread_struct *new_thread)
1093 #ifdef CONFIG_ALTIVEC
1094 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1095 old_thread->vrsave != new_thread->vrsave)
1096 mtspr(SPRN_VRSAVE, new_thread->vrsave);
1097 #endif
1098 #ifdef CONFIG_PPC_BOOK3S_64
1099 if (cpu_has_feature(CPU_FTR_DSCR)) {
1100 u64 dscr = get_paca()->dscr_default;
1101 if (new_thread->dscr_inherit)
1102 dscr = new_thread->dscr;
1104 if (old_thread->dscr != dscr)
1105 mtspr(SPRN_DSCR, dscr);
1108 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1109 if (old_thread->bescr != new_thread->bescr)
1110 mtspr(SPRN_BESCR, new_thread->bescr);
1111 if (old_thread->ebbhr != new_thread->ebbhr)
1112 mtspr(SPRN_EBBHR, new_thread->ebbhr);
1113 if (old_thread->ebbrr != new_thread->ebbrr)
1114 mtspr(SPRN_EBBRR, new_thread->ebbrr);
1116 if (old_thread->fscr != new_thread->fscr)
1117 mtspr(SPRN_FSCR, new_thread->fscr);
1119 if (old_thread->tar != new_thread->tar)
1120 mtspr(SPRN_TAR, new_thread->tar);
1122 #endif
1125 #ifdef CONFIG_PPC_BOOK3S_64
1126 #define CP_SIZE 128
1127 static const u8 dummy_copy_buffer[CP_SIZE] __attribute__((aligned(CP_SIZE)));
1128 #endif
1130 struct task_struct *__switch_to(struct task_struct *prev,
1131 struct task_struct *new)
1133 struct thread_struct *new_thread, *old_thread;
1134 struct task_struct *last;
1135 #ifdef CONFIG_PPC_BOOK3S_64
1136 struct ppc64_tlb_batch *batch;
1137 #endif
1139 new_thread = &new->thread;
1140 old_thread = &current->thread;
1142 WARN_ON(!irqs_disabled());
1144 #ifdef CONFIG_PPC64
1146 * Collect processor utilization data per process
1148 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
1149 struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
1150 long unsigned start_tb, current_tb;
1151 start_tb = old_thread->start_tb;
1152 cu->current_tb = current_tb = mfspr(SPRN_PURR);
1153 old_thread->accum_tb += (current_tb - start_tb);
1154 new_thread->start_tb = current_tb;
1156 #endif /* CONFIG_PPC64 */
1158 #ifdef CONFIG_PPC_STD_MMU_64
1159 batch = this_cpu_ptr(&ppc64_tlb_batch);
1160 if (batch->active) {
1161 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1162 if (batch->index)
1163 __flush_tlb_pending(batch);
1164 batch->active = 0;
1166 #endif /* CONFIG_PPC_STD_MMU_64 */
1168 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1169 switch_booke_debug_regs(&new->thread.debug);
1170 #else
1172 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
1173 * schedule DABR
1175 #ifndef CONFIG_HAVE_HW_BREAKPOINT
1176 if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk)))
1177 __set_breakpoint(&new->thread.hw_brk);
1178 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1179 #endif
1182 * We need to save SPRs before treclaim/trecheckpoint as these will
1183 * change a number of them.
1185 save_sprs(&prev->thread);
1187 /* Save FPU, Altivec, VSX and SPE state */
1188 giveup_all(prev);
1190 __switch_to_tm(prev, new);
1192 if (!radix_enabled()) {
1194 * We can't take a PMU exception inside _switch() since there
1195 * is a window where the kernel stack SLB and the kernel stack
1196 * are out of sync. Hard disable here.
1198 hard_irq_disable();
1202 * Call restore_sprs() before calling _switch(). If we move it after
1203 * _switch() then we miss out on calling it for new tasks. The reason
1204 * for this is we manually create a stack frame for new tasks that
1205 * directly returns through ret_from_fork() or
1206 * ret_from_kernel_thread(). See copy_thread() for details.
1208 restore_sprs(old_thread, new_thread);
1210 last = _switch(old_thread, new_thread);
1212 #ifdef CONFIG_PPC_STD_MMU_64
1213 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1214 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
1215 batch = this_cpu_ptr(&ppc64_tlb_batch);
1216 batch->active = 1;
1219 if (current_thread_info()->task->thread.regs) {
1220 restore_math(current_thread_info()->task->thread.regs);
1223 * The copy-paste buffer can only store into foreign real
1224 * addresses, so unprivileged processes can not see the
1225 * data or use it in any way unless they have foreign real
1226 * mappings. We don't have a VAS driver that allocates those
1227 * yet, so no cpabort is required.
1229 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
1231 * DD1 allows paste into normal system memory, so we
1232 * do an unpaired copy here to clear the buffer and
1233 * prevent a covert channel being set up.
1235 * cpabort is not used because it is quite expensive.
1237 asm volatile(PPC_COPY(%0, %1)
1238 : : "r"(dummy_copy_buffer), "r"(0));
1241 #endif /* CONFIG_PPC_STD_MMU_64 */
1243 return last;
1246 static int instructions_to_print = 16;
1248 static void show_instructions(struct pt_regs *regs)
1250 int i;
1251 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
1252 sizeof(int));
1254 printk("Instruction dump:");
1256 for (i = 0; i < instructions_to_print; i++) {
1257 int instr;
1259 if (!(i % 8))
1260 pr_cont("\n");
1262 #if !defined(CONFIG_BOOKE)
1263 /* If executing with the IMMU off, adjust pc rather
1264 * than print XXXXXXXX.
1266 if (!(regs->msr & MSR_IR))
1267 pc = (unsigned long)phys_to_virt(pc);
1268 #endif
1270 if (!__kernel_text_address(pc) ||
1271 probe_kernel_address((unsigned int __user *)pc, instr)) {
1272 pr_cont("XXXXXXXX ");
1273 } else {
1274 if (regs->nip == pc)
1275 pr_cont("<%08x> ", instr);
1276 else
1277 pr_cont("%08x ", instr);
1280 pc += sizeof(int);
1283 pr_cont("\n");
1286 struct regbit {
1287 unsigned long bit;
1288 const char *name;
1291 static struct regbit msr_bits[] = {
1292 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1293 {MSR_SF, "SF"},
1294 {MSR_HV, "HV"},
1295 #endif
1296 {MSR_VEC, "VEC"},
1297 {MSR_VSX, "VSX"},
1298 #ifdef CONFIG_BOOKE
1299 {MSR_CE, "CE"},
1300 #endif
1301 {MSR_EE, "EE"},
1302 {MSR_PR, "PR"},
1303 {MSR_FP, "FP"},
1304 {MSR_ME, "ME"},
1305 #ifdef CONFIG_BOOKE
1306 {MSR_DE, "DE"},
1307 #else
1308 {MSR_SE, "SE"},
1309 {MSR_BE, "BE"},
1310 #endif
1311 {MSR_IR, "IR"},
1312 {MSR_DR, "DR"},
1313 {MSR_PMM, "PMM"},
1314 #ifndef CONFIG_BOOKE
1315 {MSR_RI, "RI"},
1316 {MSR_LE, "LE"},
1317 #endif
1318 {0, NULL}
1321 static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
1323 const char *s = "";
1325 for (; bits->bit; ++bits)
1326 if (val & bits->bit) {
1327 pr_cont("%s%s", s, bits->name);
1328 s = sep;
1332 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1333 static struct regbit msr_tm_bits[] = {
1334 {MSR_TS_T, "T"},
1335 {MSR_TS_S, "S"},
1336 {MSR_TM, "E"},
1337 {0, NULL}
1340 static void print_tm_bits(unsigned long val)
1343 * This only prints something if at least one of the TM bit is set.
1344 * Inside the TM[], the output means:
1345 * E: Enabled (bit 32)
1346 * S: Suspended (bit 33)
1347 * T: Transactional (bit 34)
1349 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1350 pr_cont(",TM[");
1351 print_bits(val, msr_tm_bits, "");
1352 pr_cont("]");
1355 #else
1356 static void print_tm_bits(unsigned long val) {}
1357 #endif
1359 static void print_msr_bits(unsigned long val)
1361 pr_cont("<");
1362 print_bits(val, msr_bits, ",");
1363 print_tm_bits(val);
1364 pr_cont(">");
1367 #ifdef CONFIG_PPC64
1368 #define REG "%016lx"
1369 #define REGS_PER_LINE 4
1370 #define LAST_VOLATILE 13
1371 #else
1372 #define REG "%08lx"
1373 #define REGS_PER_LINE 8
1374 #define LAST_VOLATILE 12
1375 #endif
1377 void show_regs(struct pt_regs * regs)
1379 int i, trap;
1381 show_regs_print_info(KERN_DEFAULT);
1383 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
1384 regs->nip, regs->link, regs->ctr);
1385 printk("REGS: %p TRAP: %04lx %s (%s)\n",
1386 regs, regs->trap, print_tainted(), init_utsname()->release);
1387 printk("MSR: "REG" ", regs->msr);
1388 print_msr_bits(regs->msr);
1389 pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
1390 trap = TRAP(regs);
1391 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
1392 pr_cont("CFAR: "REG" ", regs->orig_gpr3);
1393 if (trap == 0x200 || trap == 0x300 || trap == 0x600)
1394 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1395 pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
1396 #else
1397 pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1398 #endif
1399 #ifdef CONFIG_PPC64
1400 pr_cont("SOFTE: %ld ", regs->softe);
1401 #endif
1402 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1403 if (MSR_TM_ACTIVE(regs->msr))
1404 pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1405 #endif
1407 for (i = 0; i < 32; i++) {
1408 if ((i % REGS_PER_LINE) == 0)
1409 pr_cont("\nGPR%02d: ", i);
1410 pr_cont(REG " ", regs->gpr[i]);
1411 if (i == LAST_VOLATILE && !FULL_REGS(regs))
1412 break;
1414 pr_cont("\n");
1415 #ifdef CONFIG_KALLSYMS
1417 * Lookup NIP late so we have the best change of getting the
1418 * above info out without failing
1420 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1421 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1422 #endif
1423 show_stack(current, (unsigned long *) regs->gpr[1]);
1424 if (!user_mode(regs))
1425 show_instructions(regs);
1428 void flush_thread(void)
1430 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1431 flush_ptrace_hw_breakpoint(current);
1432 #else /* CONFIG_HAVE_HW_BREAKPOINT */
1433 set_debug_reg_defaults(&current->thread);
1434 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1437 void
1438 release_thread(struct task_struct *t)
1443 * this gets called so that we can store coprocessor state into memory and
1444 * copy the current task into the new thread.
1446 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1448 flush_all_to_thread(src);
1450 * Flush TM state out so we can copy it. __switch_to_tm() does this
1451 * flush but it removes the checkpointed state from the current CPU and
1452 * transitions the CPU out of TM mode. Hence we need to call
1453 * tm_recheckpoint_new_task() (on the same task) to restore the
1454 * checkpointed state back and the TM mode.
1456 * Can't pass dst because it isn't ready. Doesn't matter, passing
1457 * dst is only important for __switch_to()
1459 __switch_to_tm(src, src);
1461 *dst = *src;
1463 clear_task_ebb(dst);
1465 return 0;
1468 static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1470 #ifdef CONFIG_PPC_STD_MMU_64
1471 unsigned long sp_vsid;
1472 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1474 if (radix_enabled())
1475 return;
1477 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1478 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1479 << SLB_VSID_SHIFT_1T;
1480 else
1481 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1482 << SLB_VSID_SHIFT;
1483 sp_vsid |= SLB_VSID_KERNEL | llp;
1484 p->thread.ksp_vsid = sp_vsid;
1485 #endif
1489 * Copy a thread..
1493 * Copy architecture-specific thread state
1495 int copy_thread(unsigned long clone_flags, unsigned long usp,
1496 unsigned long kthread_arg, struct task_struct *p)
1498 struct pt_regs *childregs, *kregs;
1499 extern void ret_from_fork(void);
1500 extern void ret_from_kernel_thread(void);
1501 void (*f)(void);
1502 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1503 struct thread_info *ti = task_thread_info(p);
1505 klp_init_thread_info(ti);
1507 /* Copy registers */
1508 sp -= sizeof(struct pt_regs);
1509 childregs = (struct pt_regs *) sp;
1510 if (unlikely(p->flags & PF_KTHREAD)) {
1511 /* kernel thread */
1512 memset(childregs, 0, sizeof(struct pt_regs));
1513 childregs->gpr[1] = sp + sizeof(struct pt_regs);
1514 /* function */
1515 if (usp)
1516 childregs->gpr[14] = ppc_function_entry((void *)usp);
1517 #ifdef CONFIG_PPC64
1518 clear_tsk_thread_flag(p, TIF_32BIT);
1519 childregs->softe = 1;
1520 #endif
1521 childregs->gpr[15] = kthread_arg;
1522 p->thread.regs = NULL; /* no user register state */
1523 ti->flags |= _TIF_RESTOREALL;
1524 f = ret_from_kernel_thread;
1525 } else {
1526 /* user thread */
1527 struct pt_regs *regs = current_pt_regs();
1528 CHECK_FULL_REGS(regs);
1529 *childregs = *regs;
1530 if (usp)
1531 childregs->gpr[1] = usp;
1532 p->thread.regs = childregs;
1533 childregs->gpr[3] = 0; /* Result from fork() */
1534 if (clone_flags & CLONE_SETTLS) {
1535 #ifdef CONFIG_PPC64
1536 if (!is_32bit_task())
1537 childregs->gpr[13] = childregs->gpr[6];
1538 else
1539 #endif
1540 childregs->gpr[2] = childregs->gpr[6];
1543 f = ret_from_fork;
1545 childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
1546 sp -= STACK_FRAME_OVERHEAD;
1549 * The way this works is that at some point in the future
1550 * some task will call _switch to switch to the new task.
1551 * That will pop off the stack frame created below and start
1552 * the new task running at ret_from_fork. The new task will
1553 * do some house keeping and then return from the fork or clone
1554 * system call, using the stack frame created above.
1556 ((unsigned long *)sp)[0] = 0;
1557 sp -= sizeof(struct pt_regs);
1558 kregs = (struct pt_regs *) sp;
1559 sp -= STACK_FRAME_OVERHEAD;
1560 p->thread.ksp = sp;
1561 #ifdef CONFIG_PPC32
1562 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
1563 _ALIGN_UP(sizeof(struct thread_info), 16);
1564 #endif
1565 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1566 p->thread.ptrace_bps[0] = NULL;
1567 #endif
1569 p->thread.fp_save_area = NULL;
1570 #ifdef CONFIG_ALTIVEC
1571 p->thread.vr_save_area = NULL;
1572 #endif
1574 setup_ksp_vsid(p, sp);
1576 #ifdef CONFIG_PPC64
1577 if (cpu_has_feature(CPU_FTR_DSCR)) {
1578 p->thread.dscr_inherit = current->thread.dscr_inherit;
1579 p->thread.dscr = mfspr(SPRN_DSCR);
1581 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1582 p->thread.ppr = INIT_PPR;
1583 #endif
1584 kregs->nip = ppc_function_entry(f);
1585 return 0;
1589 * Set up a thread for executing a new program
1591 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1593 #ifdef CONFIG_PPC64
1594 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
1595 #endif
1598 * If we exec out of a kernel thread then thread.regs will not be
1599 * set. Do it now.
1601 if (!current->thread.regs) {
1602 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1603 current->thread.regs = regs - 1;
1606 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1608 * Clear any transactional state, we're exec()ing. The cause is
1609 * not important as there will never be a recheckpoint so it's not
1610 * user visible.
1612 if (MSR_TM_SUSPENDED(mfmsr()))
1613 tm_reclaim_current(0);
1614 #endif
1616 memset(regs->gpr, 0, sizeof(regs->gpr));
1617 regs->ctr = 0;
1618 regs->link = 0;
1619 regs->xer = 0;
1620 regs->ccr = 0;
1621 regs->gpr[1] = sp;
1624 * We have just cleared all the nonvolatile GPRs, so make
1625 * FULL_REGS(regs) return true. This is necessary to allow
1626 * ptrace to examine the thread immediately after exec.
1628 regs->trap &= ~1UL;
1630 #ifdef CONFIG_PPC32
1631 regs->mq = 0;
1632 regs->nip = start;
1633 regs->msr = MSR_USER;
1634 #else
1635 if (!is_32bit_task()) {
1636 unsigned long entry;
1638 if (is_elf2_task()) {
1639 /* Look ma, no function descriptors! */
1640 entry = start;
1643 * Ulrich says:
1644 * The latest iteration of the ABI requires that when
1645 * calling a function (at its global entry point),
1646 * the caller must ensure r12 holds the entry point
1647 * address (so that the function can quickly
1648 * establish addressability).
1650 regs->gpr[12] = start;
1651 /* Make sure that's restored on entry to userspace. */
1652 set_thread_flag(TIF_RESTOREALL);
1653 } else {
1654 unsigned long toc;
1656 /* start is a relocated pointer to the function
1657 * descriptor for the elf _start routine. The first
1658 * entry in the function descriptor is the entry
1659 * address of _start and the second entry is the TOC
1660 * value we need to use.
1662 __get_user(entry, (unsigned long __user *)start);
1663 __get_user(toc, (unsigned long __user *)start+1);
1665 /* Check whether the e_entry function descriptor entries
1666 * need to be relocated before we can use them.
1668 if (load_addr != 0) {
1669 entry += load_addr;
1670 toc += load_addr;
1672 regs->gpr[2] = toc;
1674 regs->nip = entry;
1675 regs->msr = MSR_USER64;
1676 } else {
1677 regs->nip = start;
1678 regs->gpr[2] = 0;
1679 regs->msr = MSR_USER32;
1681 #endif
1682 #ifdef CONFIG_VSX
1683 current->thread.used_vsr = 0;
1684 #endif
1685 current->thread.load_fp = 0;
1686 memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
1687 current->thread.fp_save_area = NULL;
1688 #ifdef CONFIG_ALTIVEC
1689 memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state));
1690 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
1691 current->thread.vr_save_area = NULL;
1692 current->thread.vrsave = 0;
1693 current->thread.used_vr = 0;
1694 current->thread.load_vec = 0;
1695 #endif /* CONFIG_ALTIVEC */
1696 #ifdef CONFIG_SPE
1697 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1698 current->thread.acc = 0;
1699 current->thread.spefscr = 0;
1700 current->thread.used_spe = 0;
1701 #endif /* CONFIG_SPE */
1702 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1703 current->thread.tm_tfhar = 0;
1704 current->thread.tm_texasr = 0;
1705 current->thread.tm_tfiar = 0;
1706 current->thread.load_tm = 0;
1707 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1709 EXPORT_SYMBOL(start_thread);
1711 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1712 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1714 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1716 struct pt_regs *regs = tsk->thread.regs;
1718 /* This is a bit hairy. If we are an SPE enabled processor
1719 * (have embedded fp) we store the IEEE exception enable flags in
1720 * fpexc_mode. fpexc_mode is also used for setting FP exception
1721 * mode (asyn, precise, disabled) for 'Classic' FP. */
1722 if (val & PR_FP_EXC_SW_ENABLE) {
1723 #ifdef CONFIG_SPE
1724 if (cpu_has_feature(CPU_FTR_SPE)) {
1726 * When the sticky exception bits are set
1727 * directly by userspace, it must call prctl
1728 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1729 * in the existing prctl settings) or
1730 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1731 * the bits being set). <fenv.h> functions
1732 * saving and restoring the whole
1733 * floating-point environment need to do so
1734 * anyway to restore the prctl settings from
1735 * the saved environment.
1737 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1738 tsk->thread.fpexc_mode = val &
1739 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1740 return 0;
1741 } else {
1742 return -EINVAL;
1744 #else
1745 return -EINVAL;
1746 #endif
1749 /* on a CONFIG_SPE this does not hurt us. The bits that
1750 * __pack_fe01 use do not overlap with bits used for
1751 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
1752 * on CONFIG_SPE implementations are reserved so writing to
1753 * them does not change anything */
1754 if (val > PR_FP_EXC_PRECISE)
1755 return -EINVAL;
1756 tsk->thread.fpexc_mode = __pack_fe01(val);
1757 if (regs != NULL && (regs->msr & MSR_FP) != 0)
1758 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1759 | tsk->thread.fpexc_mode;
1760 return 0;
1763 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1765 unsigned int val;
1767 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1768 #ifdef CONFIG_SPE
1769 if (cpu_has_feature(CPU_FTR_SPE)) {
1771 * When the sticky exception bits are set
1772 * directly by userspace, it must call prctl
1773 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1774 * in the existing prctl settings) or
1775 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1776 * the bits being set). <fenv.h> functions
1777 * saving and restoring the whole
1778 * floating-point environment need to do so
1779 * anyway to restore the prctl settings from
1780 * the saved environment.
1782 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1783 val = tsk->thread.fpexc_mode;
1784 } else
1785 return -EINVAL;
1786 #else
1787 return -EINVAL;
1788 #endif
1789 else
1790 val = __unpack_fe01(tsk->thread.fpexc_mode);
1791 return put_user(val, (unsigned int __user *) adr);
1794 int set_endian(struct task_struct *tsk, unsigned int val)
1796 struct pt_regs *regs = tsk->thread.regs;
1798 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1799 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1800 return -EINVAL;
1802 if (regs == NULL)
1803 return -EINVAL;
1805 if (val == PR_ENDIAN_BIG)
1806 regs->msr &= ~MSR_LE;
1807 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1808 regs->msr |= MSR_LE;
1809 else
1810 return -EINVAL;
1812 return 0;
1815 int get_endian(struct task_struct *tsk, unsigned long adr)
1817 struct pt_regs *regs = tsk->thread.regs;
1818 unsigned int val;
1820 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1821 !cpu_has_feature(CPU_FTR_REAL_LE))
1822 return -EINVAL;
1824 if (regs == NULL)
1825 return -EINVAL;
1827 if (regs->msr & MSR_LE) {
1828 if (cpu_has_feature(CPU_FTR_REAL_LE))
1829 val = PR_ENDIAN_LITTLE;
1830 else
1831 val = PR_ENDIAN_PPC_LITTLE;
1832 } else
1833 val = PR_ENDIAN_BIG;
1835 return put_user(val, (unsigned int __user *)adr);
1838 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1840 tsk->thread.align_ctl = val;
1841 return 0;
1844 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1846 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1849 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1850 unsigned long nbytes)
1852 unsigned long stack_page;
1853 unsigned long cpu = task_cpu(p);
1856 * Avoid crashing if the stack has overflowed and corrupted
1857 * task_cpu(p), which is in the thread_info struct.
1859 if (cpu < NR_CPUS && cpu_possible(cpu)) {
1860 stack_page = (unsigned long) hardirq_ctx[cpu];
1861 if (sp >= stack_page + sizeof(struct thread_struct)
1862 && sp <= stack_page + THREAD_SIZE - nbytes)
1863 return 1;
1865 stack_page = (unsigned long) softirq_ctx[cpu];
1866 if (sp >= stack_page + sizeof(struct thread_struct)
1867 && sp <= stack_page + THREAD_SIZE - nbytes)
1868 return 1;
1870 return 0;
1873 int validate_sp(unsigned long sp, struct task_struct *p,
1874 unsigned long nbytes)
1876 unsigned long stack_page = (unsigned long)task_stack_page(p);
1878 if (sp >= stack_page + sizeof(struct thread_struct)
1879 && sp <= stack_page + THREAD_SIZE - nbytes)
1880 return 1;
1882 return valid_irq_stack(sp, p, nbytes);
1885 EXPORT_SYMBOL(validate_sp);
1887 unsigned long get_wchan(struct task_struct *p)
1889 unsigned long ip, sp;
1890 int count = 0;
1892 if (!p || p == current || p->state == TASK_RUNNING)
1893 return 0;
1895 sp = p->thread.ksp;
1896 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1897 return 0;
1899 do {
1900 sp = *(unsigned long *)sp;
1901 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1902 return 0;
1903 if (count > 0) {
1904 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
1905 if (!in_sched_functions(ip))
1906 return ip;
1908 } while (count++ < 16);
1909 return 0;
1912 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
1914 void show_stack(struct task_struct *tsk, unsigned long *stack)
1916 unsigned long sp, ip, lr, newsp;
1917 int count = 0;
1918 int firstframe = 1;
1919 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1920 int curr_frame = current->curr_ret_stack;
1921 extern void return_to_handler(void);
1922 unsigned long rth = (unsigned long)return_to_handler;
1923 #endif
1925 sp = (unsigned long) stack;
1926 if (tsk == NULL)
1927 tsk = current;
1928 if (sp == 0) {
1929 if (tsk == current)
1930 sp = current_stack_pointer();
1931 else
1932 sp = tsk->thread.ksp;
1935 lr = 0;
1936 printk("Call Trace:\n");
1937 do {
1938 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
1939 return;
1941 stack = (unsigned long *) sp;
1942 newsp = stack[0];
1943 ip = stack[STACK_FRAME_LR_SAVE];
1944 if (!firstframe || ip != lr) {
1945 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
1946 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1947 if ((ip == rth) && curr_frame >= 0) {
1948 pr_cont(" (%pS)",
1949 (void *)current->ret_stack[curr_frame].ret);
1950 curr_frame--;
1952 #endif
1953 if (firstframe)
1954 pr_cont(" (unreliable)");
1955 pr_cont("\n");
1957 firstframe = 0;
1960 * See if this is an exception frame.
1961 * We look for the "regshere" marker in the current frame.
1963 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1964 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
1965 struct pt_regs *regs = (struct pt_regs *)
1966 (sp + STACK_FRAME_OVERHEAD);
1967 lr = regs->link;
1968 printk("--- interrupt: %lx at %pS\n LR = %pS\n",
1969 regs->trap, (void *)regs->nip, (void *)lr);
1970 firstframe = 1;
1973 sp = newsp;
1974 } while (count++ < kstack_depth_to_print);
1977 #ifdef CONFIG_PPC64
1978 /* Called with hard IRQs off */
1979 void notrace __ppc64_runlatch_on(void)
1981 struct thread_info *ti = current_thread_info();
1983 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
1985 * Least significant bit (RUN) is the only writable bit of
1986 * the CTRL register, so we can avoid mfspr. 2.06 is not the
1987 * earliest ISA where this is the case, but it's convenient.
1989 mtspr(SPRN_CTRLT, CTRL_RUNLATCH);
1990 } else {
1991 unsigned long ctrl;
1994 * Some architectures (e.g., Cell) have writable fields other
1995 * than RUN, so do the read-modify-write.
1997 ctrl = mfspr(SPRN_CTRLF);
1998 ctrl |= CTRL_RUNLATCH;
1999 mtspr(SPRN_CTRLT, ctrl);
2002 ti->local_flags |= _TLF_RUNLATCH;
2005 /* Called with hard IRQs off */
2006 void notrace __ppc64_runlatch_off(void)
2008 struct thread_info *ti = current_thread_info();
2010 ti->local_flags &= ~_TLF_RUNLATCH;
2012 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2013 mtspr(SPRN_CTRLT, 0);
2014 } else {
2015 unsigned long ctrl;
2017 ctrl = mfspr(SPRN_CTRLF);
2018 ctrl &= ~CTRL_RUNLATCH;
2019 mtspr(SPRN_CTRLT, ctrl);
2022 #endif /* CONFIG_PPC64 */
2024 unsigned long arch_align_stack(unsigned long sp)
2026 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2027 sp -= get_random_int() & ~PAGE_MASK;
2028 return sp & ~0xf;
2031 static inline unsigned long brk_rnd(void)
2033 unsigned long rnd = 0;
2035 /* 8MB for 32bit, 1GB for 64bit */
2036 if (is_32bit_task())
2037 rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
2038 else
2039 rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
2041 return rnd << PAGE_SHIFT;
2044 unsigned long arch_randomize_brk(struct mm_struct *mm)
2046 unsigned long base = mm->brk;
2047 unsigned long ret;
2049 #ifdef CONFIG_PPC_STD_MMU_64
2051 * If we are using 1TB segments and we are allowed to randomise
2052 * the heap, we can put it above 1TB so it is backed by a 1TB
2053 * segment. Otherwise the heap will be in the bottom 1TB
2054 * which always uses 256MB segments and this may result in a
2055 * performance penalty. We don't need to worry about radix. For
2056 * radix, mmu_highuser_ssize remains unchanged from 256MB.
2058 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2059 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2060 #endif
2062 ret = PAGE_ALIGN(base + brk_rnd());
2064 if (ret < mm->brk)
2065 return mm->brk;
2067 return ret;