2 * Architecture-specific setup.
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * 04/11/17 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
8 * 2005-10-07 Keith Owens <kaos@sgi.com>
9 * Add notify_die() hooks.
11 #include <linux/cpu.h>
13 #include <linux/elf.h>
14 #include <linux/errno.h>
15 #include <linux/kallsyms.h>
16 #include <linux/kernel.h>
18 #include <linux/slab.h>
19 #include <linux/module.h>
20 #include <linux/notifier.h>
21 #include <linux/personality.h>
22 #include <linux/sched.h>
23 #include <linux/stddef.h>
24 #include <linux/thread_info.h>
25 #include <linux/unistd.h>
26 #include <linux/efi.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/kdebug.h>
30 #include <linux/utsname.h>
31 #include <linux/tracehook.h>
32 #include <linux/rcupdate.h>
35 #include <asm/delay.h>
38 #include <asm/kexec.h>
39 #include <asm/pgalloc.h>
40 #include <asm/processor.h>
42 #include <asm/switch_to.h>
43 #include <asm/tlbflush.h>
44 #include <asm/uaccess.h>
45 #include <asm/unwind.h>
51 # include <asm/perfmon.h>
56 void (*ia64_mark_idle
)(int);
58 unsigned long boot_option_idle_override
= IDLE_NO_OVERRIDE
;
59 EXPORT_SYMBOL(boot_option_idle_override
);
60 void (*pm_idle
) (void);
61 EXPORT_SYMBOL(pm_idle
);
62 void (*pm_power_off
) (void);
63 EXPORT_SYMBOL(pm_power_off
);
66 ia64_do_show_stack (struct unw_frame_info
*info
, void *arg
)
68 unsigned long ip
, sp
, bsp
;
69 char buf
[128]; /* don't make it so big that it overflows the stack! */
71 printk("\nCall Trace:\n");
73 unw_get_ip(info
, &ip
);
77 unw_get_sp(info
, &sp
);
78 unw_get_bsp(info
, &bsp
);
79 snprintf(buf
, sizeof(buf
),
81 " sp=%016lx bsp=%016lx\n",
83 print_symbol(buf
, ip
);
84 } while (unw_unwind(info
) >= 0);
88 show_stack (struct task_struct
*task
, unsigned long *sp
)
91 unw_init_running(ia64_do_show_stack
, NULL
);
93 struct unw_frame_info info
;
95 unw_init_from_blocked_task(&info
, task
);
96 ia64_do_show_stack(&info
, NULL
);
103 show_stack(NULL
, NULL
);
106 EXPORT_SYMBOL(dump_stack
);
109 show_regs (struct pt_regs
*regs
)
111 unsigned long ip
= regs
->cr_iip
+ ia64_psr(regs
)->ri
;
114 printk("\nPid: %d, CPU %d, comm: %20s\n", task_pid_nr(current
),
115 smp_processor_id(), current
->comm
);
116 printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s (%s)\n",
117 regs
->cr_ipsr
, regs
->cr_ifs
, ip
, print_tainted(),
118 init_utsname()->release
);
119 print_symbol("ip is at %s\n", ip
);
120 printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
121 regs
->ar_unat
, regs
->ar_pfs
, regs
->ar_rsc
);
122 printk("rnat: %016lx bsps: %016lx pr : %016lx\n",
123 regs
->ar_rnat
, regs
->ar_bspstore
, regs
->pr
);
124 printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
125 regs
->loadrs
, regs
->ar_ccv
, regs
->ar_fpsr
);
126 printk("csd : %016lx ssd : %016lx\n", regs
->ar_csd
, regs
->ar_ssd
);
127 printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs
->b0
, regs
->b6
, regs
->b7
);
128 printk("f6 : %05lx%016lx f7 : %05lx%016lx\n",
129 regs
->f6
.u
.bits
[1], regs
->f6
.u
.bits
[0],
130 regs
->f7
.u
.bits
[1], regs
->f7
.u
.bits
[0]);
131 printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
132 regs
->f8
.u
.bits
[1], regs
->f8
.u
.bits
[0],
133 regs
->f9
.u
.bits
[1], regs
->f9
.u
.bits
[0]);
134 printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
135 regs
->f10
.u
.bits
[1], regs
->f10
.u
.bits
[0],
136 regs
->f11
.u
.bits
[1], regs
->f11
.u
.bits
[0]);
138 printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs
->r1
, regs
->r2
, regs
->r3
);
139 printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs
->r8
, regs
->r9
, regs
->r10
);
140 printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs
->r11
, regs
->r12
, regs
->r13
);
141 printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs
->r14
, regs
->r15
, regs
->r16
);
142 printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs
->r17
, regs
->r18
, regs
->r19
);
143 printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs
->r20
, regs
->r21
, regs
->r22
);
144 printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs
->r23
, regs
->r24
, regs
->r25
);
145 printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs
->r26
, regs
->r27
, regs
->r28
);
146 printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs
->r29
, regs
->r30
, regs
->r31
);
148 if (user_mode(regs
)) {
149 /* print the stacked registers */
150 unsigned long val
, *bsp
, ndirty
;
151 int i
, sof
, is_nat
= 0;
153 sof
= regs
->cr_ifs
& 0x7f; /* size of frame */
154 ndirty
= (regs
->loadrs
>> 19);
155 bsp
= ia64_rse_skip_regs((unsigned long *) regs
->ar_bspstore
, ndirty
);
156 for (i
= 0; i
< sof
; ++i
) {
157 get_user(val
, (unsigned long __user
*) ia64_rse_skip_regs(bsp
, i
));
158 printk("r%-3u:%c%016lx%s", 32 + i
, is_nat
? '*' : ' ', val
,
159 ((i
== sof
- 1) || (i
% 3) == 2) ? "\n" : " ");
162 show_stack(NULL
, NULL
);
165 /* local support for deprecated console_print */
167 console_print(const char *s
)
169 printk(KERN_EMERG
"%s", s
);
173 do_notify_resume_user(sigset_t
*unused
, struct sigscratch
*scr
, long in_syscall
)
175 if (fsys_mode(current
, &scr
->pt
)) {
177 * defer signal-handling etc. until we return to
180 if (!ia64_psr(&scr
->pt
)->lp
)
181 ia64_psr(&scr
->pt
)->lp
= 1;
185 #ifdef CONFIG_PERFMON
186 if (current
->thread
.pfm_needs_checking
)
188 * Note: pfm_handle_work() allow us to call it with interrupts
189 * disabled, and may enable interrupts within the function.
194 /* deal with pending signal delivery */
195 if (test_thread_flag(TIF_SIGPENDING
)) {
196 local_irq_enable(); /* force interrupt enable */
197 ia64_do_signal(scr
, in_syscall
);
200 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME
)) {
201 local_irq_enable(); /* force interrupt enable */
202 tracehook_notify_resume(&scr
->pt
);
205 /* copy user rbs to kernel rbs */
206 if (unlikely(test_thread_flag(TIF_RESTORE_RSE
))) {
207 local_irq_enable(); /* force interrupt enable */
211 local_irq_disable(); /* force interrupt disable */
214 static int pal_halt
= 1;
215 static int can_do_pal_halt
= 1;
217 static int __init
nohalt_setup(char * str
)
219 pal_halt
= can_do_pal_halt
= 0;
222 __setup("nohalt", nohalt_setup
);
225 update_pal_halt_status(int status
)
227 can_do_pal_halt
= pal_halt
&& status
;
231 * We use this if we don't have any better idle routine..
237 while (!need_resched()) {
238 if (can_do_pal_halt
) {
240 if (!need_resched()) {
249 #ifdef CONFIG_HOTPLUG_CPU
250 /* We don't actually take CPU down, just spin without interrupts. */
251 static inline void play_dead(void)
253 unsigned int this_cpu
= smp_processor_id();
256 __get_cpu_var(cpu_state
) = CPU_DEAD
;
261 ia64_jump_to_sal(&sal_boot_rendez_state
[this_cpu
]);
263 * The above is a point of no-return, the processor is
264 * expected to be in SAL loop now.
269 static inline void play_dead(void)
273 #endif /* CONFIG_HOTPLUG_CPU */
275 void __attribute__((noreturn
))
278 void (*mark_idle
)(int) = ia64_mark_idle
;
279 int cpu
= smp_processor_id();
281 /* endless idle loop with no priority at all */
284 if (can_do_pal_halt
) {
285 current_thread_info()->status
&= ~TS_POLLING
;
287 * TS_POLLING-cleared state must be visible before we
292 current_thread_info()->status
|= TS_POLLING
;
295 if (!need_resched()) {
315 schedule_preempt_disabled();
317 if (cpu_is_offline(cpu
))
323 ia64_save_extra (struct task_struct
*task
)
325 #ifdef CONFIG_PERFMON
329 if ((task
->thread
.flags
& IA64_THREAD_DBG_VALID
) != 0)
330 ia64_save_debug_regs(&task
->thread
.dbr
[0]);
332 #ifdef CONFIG_PERFMON
333 if ((task
->thread
.flags
& IA64_THREAD_PM_VALID
) != 0)
336 info
= __get_cpu_var(pfm_syst_info
);
337 if (info
& PFM_CPUINFO_SYST_WIDE
)
338 pfm_syst_wide_update_task(task
, info
, 0);
343 ia64_load_extra (struct task_struct
*task
)
345 #ifdef CONFIG_PERFMON
349 if ((task
->thread
.flags
& IA64_THREAD_DBG_VALID
) != 0)
350 ia64_load_debug_regs(&task
->thread
.dbr
[0]);
352 #ifdef CONFIG_PERFMON
353 if ((task
->thread
.flags
& IA64_THREAD_PM_VALID
) != 0)
356 info
= __get_cpu_var(pfm_syst_info
);
357 if (info
& PFM_CPUINFO_SYST_WIDE
)
358 pfm_syst_wide_update_task(task
, info
, 1);
363 * Copy the state of an ia-64 thread.
365 * We get here through the following call chain:
367 * from user-level: from kernel:
369 * <clone syscall> <some kernel call frames>
372 * copy_thread copy_thread
374 * This means that the stack layout is as follows:
376 * +---------------------+ (highest addr)
378 * +---------------------+
379 * | struct switch_stack |
380 * +---------------------+
383 * | | <-- sp (lowest addr)
384 * +---------------------+
386 * Observe that we copy the unat values that are in pt_regs and switch_stack. Spilling an
387 * integer to address X causes bit N in ar.unat to be set to the NaT bit of the register,
388 * with N=(X & 0x1ff)/8. Thus, copying the unat value preserves the NaT bits ONLY if the
389 * pt_regs structure in the parent is congruent to that of the child, modulo 512. Since
390 * the stack is page aligned and the page size is at least 4KB, this is always the case,
391 * so there is nothing to worry about.
394 copy_thread(unsigned long clone_flags
,
395 unsigned long user_stack_base
, unsigned long user_stack_size
,
396 struct task_struct
*p
)
398 extern char ia64_ret_from_clone
;
399 struct switch_stack
*child_stack
, *stack
;
400 unsigned long rbs
, child_rbs
, rbs_size
;
401 struct pt_regs
*child_ptregs
;
402 struct pt_regs
*regs
= current_pt_regs();
405 child_ptregs
= (struct pt_regs
*) ((unsigned long) p
+ IA64_STK_OFFSET
) - 1;
406 child_stack
= (struct switch_stack
*) child_ptregs
- 1;
408 rbs
= (unsigned long) current
+ IA64_RBS_OFFSET
;
409 child_rbs
= (unsigned long) p
+ IA64_RBS_OFFSET
;
411 /* copy parts of thread_struct: */
412 p
->thread
.ksp
= (unsigned long) child_stack
- 16;
415 * NOTE: The calling convention considers all floating point
416 * registers in the high partition (fph) to be scratch. Since
417 * the only way to get to this point is through a system call,
418 * we know that the values in fph are all dead. Hence, there
419 * is no need to inherit the fph state from the parent to the
420 * child and all we have to do is to make sure that
421 * IA64_THREAD_FPH_VALID is cleared in the child.
423 * XXX We could push this optimization a bit further by
424 * clearing IA64_THREAD_FPH_VALID on ANY system call.
425 * However, it's not clear this is worth doing. Also, it
426 * would be a slight deviation from the normal Linux system
427 * call behavior where scratch registers are preserved across
428 * system calls (unless used by the system call itself).
430 # define THREAD_FLAGS_TO_CLEAR (IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID \
431 | IA64_THREAD_PM_VALID)
432 # define THREAD_FLAGS_TO_SET 0
433 p
->thread
.flags
= ((current
->thread
.flags
& ~THREAD_FLAGS_TO_CLEAR
)
434 | THREAD_FLAGS_TO_SET
);
436 ia64_drop_fpu(p
); /* don't pick up stale state from a CPU's fph */
438 if (unlikely(p
->flags
& PF_KTHREAD
)) {
439 if (unlikely(!user_stack_base
)) {
440 /* fork_idle() called us */
443 memset(child_stack
, 0, sizeof(*child_ptregs
) + sizeof(*child_stack
));
444 child_stack
->r4
= user_stack_base
; /* payload */
445 child_stack
->r5
= user_stack_size
; /* argument */
447 * Preserve PSR bits, except for bits 32-34 and 37-45,
448 * which we can't read.
450 child_ptregs
->cr_ipsr
= ia64_getreg(_IA64_REG_PSR
) | IA64_PSR_BN
;
451 /* mark as valid, empty frame */
452 child_ptregs
->cr_ifs
= 1UL << 63;
453 child_stack
->ar_fpsr
= child_ptregs
->ar_fpsr
454 = ia64_getreg(_IA64_REG_AR_FPSR
);
455 child_stack
->pr
= (1 << PRED_KERNEL_STACK
);
456 child_stack
->ar_bspstore
= child_rbs
;
457 child_stack
->b0
= (unsigned long) &ia64_ret_from_clone
;
459 /* stop some PSR bits from being inherited.
460 * the psr.up/psr.pp bits must be cleared on fork but inherited on execve()
461 * therefore we must specify them explicitly here and not include them in
462 * IA64_PSR_BITS_TO_CLEAR.
464 child_ptregs
->cr_ipsr
= ((child_ptregs
->cr_ipsr
| IA64_PSR_BITS_TO_SET
)
465 & ~(IA64_PSR_BITS_TO_CLEAR
| IA64_PSR_PP
| IA64_PSR_UP
));
469 stack
= ((struct switch_stack
*) regs
) - 1;
470 /* copy parent's switch_stack & pt_regs to child: */
471 memcpy(child_stack
, stack
, sizeof(*child_ptregs
) + sizeof(*child_stack
));
473 /* copy the parent's register backing store to the child: */
474 rbs_size
= stack
->ar_bspstore
- rbs
;
475 memcpy((void *) child_rbs
, (void *) rbs
, rbs_size
);
476 if (clone_flags
& CLONE_SETTLS
)
477 child_ptregs
->r13
= regs
->r16
; /* see sys_clone2() in entry.S */
478 if (user_stack_base
) {
479 child_ptregs
->r12
= user_stack_base
+ user_stack_size
- 16;
480 child_ptregs
->ar_bspstore
= user_stack_base
;
481 child_ptregs
->ar_rnat
= 0;
482 child_ptregs
->loadrs
= 0;
484 child_stack
->ar_bspstore
= child_rbs
+ rbs_size
;
485 child_stack
->b0
= (unsigned long) &ia64_ret_from_clone
;
487 /* stop some PSR bits from being inherited.
488 * the psr.up/psr.pp bits must be cleared on fork but inherited on execve()
489 * therefore we must specify them explicitly here and not include them in
490 * IA64_PSR_BITS_TO_CLEAR.
492 child_ptregs
->cr_ipsr
= ((child_ptregs
->cr_ipsr
| IA64_PSR_BITS_TO_SET
)
493 & ~(IA64_PSR_BITS_TO_CLEAR
| IA64_PSR_PP
| IA64_PSR_UP
));
495 #ifdef CONFIG_PERFMON
496 if (current
->thread
.pfm_context
)
497 pfm_inherit(p
, child_ptregs
);
503 do_copy_task_regs (struct task_struct
*task
, struct unw_frame_info
*info
, void *arg
)
505 unsigned long mask
, sp
, nat_bits
= 0, ar_rnat
, urbs_end
, cfm
;
506 unsigned long uninitialized_var(ip
); /* GCC be quiet */
507 elf_greg_t
*dst
= arg
;
512 memset(dst
, 0, sizeof(elf_gregset_t
)); /* don't leak any kernel bits to user-level */
514 if (unw_unwind_to_user(info
) < 0)
517 unw_get_sp(info
, &sp
);
518 pt
= (struct pt_regs
*) (sp
+ 16);
520 urbs_end
= ia64_get_user_rbs_end(task
, pt
, &cfm
);
522 if (ia64_sync_user_rbs(task
, info
->sw
, pt
->ar_bspstore
, urbs_end
) < 0)
525 ia64_peek(task
, info
->sw
, urbs_end
, (long) ia64_rse_rnat_addr((long *) urbs_end
),
531 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
532 * predicate registers (p0-p63)
535 * ar.rsc ar.bsp ar.bspstore ar.rnat
536 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
540 for (i
= 1, mask
= (1UL << i
); i
< 32; ++i
) {
541 unw_get_gr(info
, i
, &dst
[i
], &nat
);
547 unw_get_pr(info
, &dst
[33]);
549 for (i
= 0; i
< 8; ++i
)
550 unw_get_br(info
, i
, &dst
[34 + i
]);
552 unw_get_rp(info
, &ip
);
553 dst
[42] = ip
+ ia64_psr(pt
)->ri
;
555 dst
[44] = pt
->cr_ipsr
& IA64_PSR_UM
;
557 unw_get_ar(info
, UNW_AR_RSC
, &dst
[45]);
559 * For bsp and bspstore, unw_get_ar() would return the kernel
560 * addresses, but we need the user-level addresses instead:
562 dst
[46] = urbs_end
; /* note: by convention PT_AR_BSP points to the end of the urbs! */
563 dst
[47] = pt
->ar_bspstore
;
565 unw_get_ar(info
, UNW_AR_CCV
, &dst
[49]);
566 unw_get_ar(info
, UNW_AR_UNAT
, &dst
[50]);
567 unw_get_ar(info
, UNW_AR_FPSR
, &dst
[51]);
568 dst
[52] = pt
->ar_pfs
; /* UNW_AR_PFS is == to pt->cr_ifs for interrupt frames */
569 unw_get_ar(info
, UNW_AR_LC
, &dst
[53]);
570 unw_get_ar(info
, UNW_AR_EC
, &dst
[54]);
571 unw_get_ar(info
, UNW_AR_CSD
, &dst
[55]);
572 unw_get_ar(info
, UNW_AR_SSD
, &dst
[56]);
576 do_dump_task_fpu (struct task_struct
*task
, struct unw_frame_info
*info
, void *arg
)
578 elf_fpreg_t
*dst
= arg
;
581 memset(dst
, 0, sizeof(elf_fpregset_t
)); /* don't leak any "random" bits */
583 if (unw_unwind_to_user(info
) < 0)
586 /* f0 is 0.0, f1 is 1.0 */
588 for (i
= 2; i
< 32; ++i
)
589 unw_get_fr(info
, i
, dst
+ i
);
591 ia64_flush_fph(task
);
592 if ((task
->thread
.flags
& IA64_THREAD_FPH_VALID
) != 0)
593 memcpy(dst
+ 32, task
->thread
.fph
, 96*16);
597 do_copy_regs (struct unw_frame_info
*info
, void *arg
)
599 do_copy_task_regs(current
, info
, arg
);
603 do_dump_fpu (struct unw_frame_info
*info
, void *arg
)
605 do_dump_task_fpu(current
, info
, arg
);
609 ia64_elf_core_copy_regs (struct pt_regs
*pt
, elf_gregset_t dst
)
611 unw_init_running(do_copy_regs
, dst
);
615 dump_fpu (struct pt_regs
*pt
, elf_fpregset_t dst
)
617 unw_init_running(do_dump_fpu
, dst
);
618 return 1; /* f0-f31 are always valid so we always return 1 */
622 * Flush thread state. This is called when a thread does an execve().
627 /* drop floating-point and debug-register state if it exists: */
628 current
->thread
.flags
&= ~(IA64_THREAD_FPH_VALID
| IA64_THREAD_DBG_VALID
);
629 ia64_drop_fpu(current
);
633 * Clean up state associated with current thread. This is called when
634 * the thread calls exit().
640 ia64_drop_fpu(current
);
641 #ifdef CONFIG_PERFMON
642 /* if needed, stop monitoring and flush state to perfmon context */
643 if (current
->thread
.pfm_context
)
644 pfm_exit_thread(current
);
646 /* free debug register resources */
647 if (current
->thread
.flags
& IA64_THREAD_DBG_VALID
)
648 pfm_release_debug_registers(current
);
653 get_wchan (struct task_struct
*p
)
655 struct unw_frame_info info
;
659 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
663 * Note: p may not be a blocked task (it could be current or
664 * another process running on some other CPU. Rather than
665 * trying to determine if p is really blocked, we just assume
666 * it's blocked and rely on the unwind routines to fail
667 * gracefully if the process wasn't really blocked after all.
670 unw_init_from_blocked_task(&info
, p
);
672 if (p
->state
== TASK_RUNNING
)
674 if (unw_unwind(&info
) < 0)
676 unw_get_ip(&info
, &ip
);
677 if (!in_sched_functions(ip
))
679 } while (count
++ < 16);
686 pal_power_mgmt_info_u_t power_info
[8];
687 unsigned long min_power
;
688 int i
, min_power_state
;
690 if (ia64_pal_halt_info(power_info
) != 0)
694 min_power
= power_info
[0].pal_power_mgmt_info_s
.power_consumption
;
695 for (i
= 1; i
< 8; ++i
)
696 if (power_info
[i
].pal_power_mgmt_info_s
.im
697 && power_info
[i
].pal_power_mgmt_info_s
.power_consumption
< min_power
) {
698 min_power
= power_info
[i
].pal_power_mgmt_info_s
.power_consumption
;
703 ia64_pal_halt(min_power_state
);
706 void machine_shutdown(void)
708 #ifdef CONFIG_HOTPLUG_CPU
711 for_each_online_cpu(cpu
) {
712 if (cpu
!= smp_processor_id())
717 kexec_disable_iosapic();
722 machine_restart (char *restart_cmd
)
724 (void) notify_die(DIE_MACHINE_RESTART
, restart_cmd
, NULL
, 0, 0, 0);
725 (*efi
.reset_system
)(EFI_RESET_WARM
, 0, 0, NULL
);
731 (void) notify_die(DIE_MACHINE_HALT
, "", NULL
, 0, 0, 0);
736 machine_power_off (void)