Merge branch 'akpm'
[linux-2.6/next.git] / arch / powerpc / platforms / cell / pervasive.c
blobefdacc829576582a0b4da52384f80ad62082cd4e
1 /*
2 * CBE Pervasive Monitor and Debug
4 * (C) Copyright IBM Corporation 2005
6 * Authors: Maximino Aguilar (maguilar@us.ibm.com)
7 * Michael N. Day (mnday@us.ibm.com)
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #undef DEBUG
26 #include <linux/interrupt.h>
27 #include <linux/irq.h>
28 #include <linux/percpu.h>
29 #include <linux/types.h>
30 #include <linux/kallsyms.h>
32 #include <asm/io.h>
33 #include <asm/machdep.h>
34 #include <asm/prom.h>
35 #include <asm/pgtable.h>
36 #include <asm/reg.h>
37 #include <asm/cell-regs.h>
39 #include "pervasive.h"
41 static void cbe_power_save(void)
43 unsigned long ctrl, thread_switch_control;
46 * We need to hard disable interrupts, the local_irq_enable() done by
47 * our caller upon return will hard re-enable.
49 hard_irq_disable();
51 ctrl = mfspr(SPRN_CTRLF);
53 /* Enable DEC and EE interrupt request */
54 thread_switch_control = mfspr(SPRN_TSC_CELL);
55 thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST;
57 switch (ctrl & CTRL_CT) {
58 case CTRL_CT0:
59 thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
60 break;
61 case CTRL_CT1:
62 thread_switch_control |= TSC_CELL_DEC_ENABLE_1;
63 break;
64 default:
65 printk(KERN_WARNING "%s: unknown configuration\n",
66 __func__);
67 break;
69 mtspr(SPRN_TSC_CELL, thread_switch_control);
72 * go into low thread priority, medium priority will be
73 * restored for us after wake-up.
75 HMT_low();
78 * atomically disable thread execution and runlatch.
79 * External and Decrementer exceptions are still handled when the
80 * thread is disabled but now enter in cbe_system_reset_exception()
82 ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
83 mtspr(SPRN_CTRLT, ctrl);
86 static int cbe_system_reset_exception(struct pt_regs *regs)
88 switch (regs->msr & SRR1_WAKEMASK) {
89 case SRR1_WAKEEE:
90 do_IRQ(regs);
91 break;
92 case SRR1_WAKEDEC:
93 timer_interrupt(regs);
94 break;
95 case SRR1_WAKEMT:
96 return cbe_sysreset_hack();
97 #ifdef CONFIG_CBE_RAS
98 case SRR1_WAKESYSERR:
99 cbe_system_error_exception(regs);
100 break;
101 case SRR1_WAKETHERM:
102 cbe_thermal_exception(regs);
103 break;
104 #endif /* CONFIG_CBE_RAS */
105 default:
106 /* do system reset */
107 return 0;
109 /* everything handled */
110 return 1;
113 void __init cbe_pervasive_init(void)
115 int cpu;
117 if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
118 return;
120 for_each_possible_cpu(cpu) {
121 struct cbe_pmd_regs __iomem *regs = cbe_get_cpu_pmd_regs(cpu);
122 if (!regs)
123 continue;
125 /* Enable Pause(0) control bit */
126 out_be64(&regs->pmcr, in_be64(&regs->pmcr) |
127 CBE_PMD_PAUSE_ZERO_CONTROL);
130 ppc_md.power_save = cbe_power_save;
131 ppc_md.system_reset_exception = cbe_system_reset_exception;