vmalloc: walk vmap_areas by sorted list instead of rb_next()
[linux/fpc-iii.git] / arch / blackfin / kernel / nmi.c
blob9919d29287dce98ed39daa2fa64e7b4f3fe42a1e
1 /*
2 * Blackfin nmi_watchdog Driver
4 * Originally based on bfin_wdt.c
5 * Copyright 2010-2010 Analog Devices Inc.
6 * Graff Yang <graf.yang@analog.com>
8 * Enter bugs at http://blackfin.uclinux.org/
10 * Licensed under the GPL-2 or later.
13 #include <linux/bitops.h>
14 #include <linux/hardirq.h>
15 #include <linux/syscore_ops.h>
16 #include <linux/pm.h>
17 #include <linux/nmi.h>
18 #include <linux/smp.h>
19 #include <linux/timer.h>
20 #include <asm/blackfin.h>
21 #include <linux/atomic.h>
22 #include <asm/cacheflush.h>
23 #include <asm/bfin_watchdog.h>
25 #define DRV_NAME "nmi-wdt"
27 #define NMI_WDT_TIMEOUT 5 /* 5 seconds */
28 #define NMI_CHECK_TIMEOUT (4 * HZ) /* 4 seconds in jiffies */
29 static int nmi_wdt_cpu = 1;
31 static unsigned int timeout = NMI_WDT_TIMEOUT;
32 static int nmi_active;
34 static unsigned short wdoga_ctl;
35 static unsigned int wdoga_cnt;
36 static struct corelock_slot saved_corelock;
37 static atomic_t nmi_touched[NR_CPUS];
38 static struct timer_list ntimer;
40 enum {
41 COREA_ENTER_NMI = 0,
42 COREA_EXIT_NMI,
43 COREB_EXIT_NMI,
45 NMI_EVENT_NR,
47 static unsigned long nmi_event __attribute__ ((__section__(".l2.bss")));
49 /* we are in nmi, non-atomic bit ops is safe */
50 static inline void set_nmi_event(int event)
52 __set_bit(event, &nmi_event);
55 static inline void wait_nmi_event(int event)
57 while (!test_bit(event, &nmi_event))
58 barrier();
59 __clear_bit(event, &nmi_event);
62 static inline void send_corea_nmi(void)
64 wdoga_ctl = bfin_read_WDOGA_CTL();
65 wdoga_cnt = bfin_read_WDOGA_CNT();
67 bfin_write_WDOGA_CTL(WDEN_DISABLE);
68 bfin_write_WDOGA_CNT(0);
69 bfin_write_WDOGA_CTL(WDEN_ENABLE | ICTL_NMI);
72 static inline void restore_corea_nmi(void)
74 bfin_write_WDOGA_CTL(WDEN_DISABLE);
75 bfin_write_WDOGA_CTL(WDOG_EXPIRED | WDEN_DISABLE | ICTL_NONE);
77 bfin_write_WDOGA_CNT(wdoga_cnt);
78 bfin_write_WDOGA_CTL(wdoga_ctl);
81 static inline void save_corelock(void)
83 saved_corelock = corelock;
84 corelock.lock = 0;
87 static inline void restore_corelock(void)
89 corelock = saved_corelock;
93 static inline void nmi_wdt_keepalive(void)
95 bfin_write_WDOGB_STAT(0);
98 static inline void nmi_wdt_stop(void)
100 bfin_write_WDOGB_CTL(WDEN_DISABLE);
103 /* before calling this function, you must stop the WDT */
104 static inline void nmi_wdt_clear(void)
106 /* clear TRO bit, disable event generation */
107 bfin_write_WDOGB_CTL(WDOG_EXPIRED | WDEN_DISABLE | ICTL_NONE);
110 static inline void nmi_wdt_start(void)
112 bfin_write_WDOGB_CTL(WDEN_ENABLE | ICTL_NMI);
115 static inline int nmi_wdt_running(void)
117 return ((bfin_read_WDOGB_CTL() & WDEN_MASK) != WDEN_DISABLE);
120 static inline int nmi_wdt_set_timeout(unsigned long t)
122 u32 cnt, max_t, sclk;
123 int run;
125 sclk = get_sclk();
126 max_t = -1 / sclk;
127 cnt = t * sclk;
128 if (t > max_t) {
129 pr_warning("NMI: timeout value is too large\n");
130 return -EINVAL;
133 run = nmi_wdt_running();
134 nmi_wdt_stop();
135 bfin_write_WDOGB_CNT(cnt);
136 if (run)
137 nmi_wdt_start();
139 timeout = t;
141 return 0;
144 int check_nmi_wdt_touched(void)
146 unsigned int this_cpu = smp_processor_id();
147 unsigned int cpu;
148 cpumask_t mask;
150 cpumask_copy(&mask, cpu_online_mask);
151 if (!atomic_read(&nmi_touched[this_cpu]))
152 return 0;
154 atomic_set(&nmi_touched[this_cpu], 0);
156 cpumask_clear_cpu(this_cpu, &mask);
157 for_each_cpu(cpu, &mask) {
158 invalidate_dcache_range((unsigned long)(&nmi_touched[cpu]),
159 (unsigned long)(&nmi_touched[cpu]));
160 if (!atomic_read(&nmi_touched[cpu]))
161 return 0;
162 atomic_set(&nmi_touched[cpu], 0);
165 return 1;
168 static void nmi_wdt_timer(unsigned long data)
170 if (check_nmi_wdt_touched())
171 nmi_wdt_keepalive();
173 mod_timer(&ntimer, jiffies + NMI_CHECK_TIMEOUT);
176 static int __init init_nmi_wdt(void)
178 nmi_wdt_set_timeout(timeout);
179 nmi_wdt_start();
180 nmi_active = true;
182 init_timer(&ntimer);
183 ntimer.function = nmi_wdt_timer;
184 ntimer.expires = jiffies + NMI_CHECK_TIMEOUT;
185 add_timer(&ntimer);
187 pr_info("nmi_wdt: initialized: timeout=%d sec\n", timeout);
188 return 0;
190 device_initcall(init_nmi_wdt);
192 void touch_nmi_watchdog(void)
194 atomic_set(&nmi_touched[smp_processor_id()], 1);
197 /* Suspend/resume support */
198 #ifdef CONFIG_PM
199 static int nmi_wdt_suspend(void)
201 nmi_wdt_stop();
202 return 0;
205 static void nmi_wdt_resume(void)
207 if (nmi_active)
208 nmi_wdt_start();
211 static struct syscore_ops nmi_syscore_ops = {
212 .resume = nmi_wdt_resume,
213 .suspend = nmi_wdt_suspend,
216 static int __init init_nmi_wdt_syscore(void)
218 if (nmi_active)
219 register_syscore_ops(&nmi_syscore_ops);
221 return 0;
223 late_initcall(init_nmi_wdt_syscore);
225 #endif /* CONFIG_PM */
228 asmlinkage notrace void do_nmi(struct pt_regs *fp)
230 unsigned int cpu = smp_processor_id();
231 nmi_enter();
233 cpu_pda[cpu].__nmi_count += 1;
235 if (cpu == nmi_wdt_cpu) {
236 /* CoreB goes here first */
238 /* reload the WDOG_STAT */
239 nmi_wdt_keepalive();
241 /* clear nmi interrupt for CoreB */
242 nmi_wdt_stop();
243 nmi_wdt_clear();
245 /* trigger NMI interrupt of CoreA */
246 send_corea_nmi();
248 /* waiting CoreB to enter NMI */
249 wait_nmi_event(COREA_ENTER_NMI);
251 /* recover WDOGA's settings */
252 restore_corea_nmi();
254 save_corelock();
256 /* corelock is save/cleared, CoreA is dummping messages */
258 wait_nmi_event(COREA_EXIT_NMI);
259 } else {
260 /* OK, CoreA entered NMI */
261 set_nmi_event(COREA_ENTER_NMI);
264 pr_emerg("\nNMI Watchdog detected LOCKUP, dump for CPU %d\n", cpu);
265 dump_bfin_process(fp);
266 dump_bfin_mem(fp);
267 show_regs(fp);
268 dump_bfin_trace_buffer();
269 show_stack(current, (unsigned long *)fp);
271 if (cpu == nmi_wdt_cpu) {
272 pr_emerg("This fault is not recoverable, sorry!\n");
274 /* CoreA dump finished, restore the corelock */
275 restore_corelock();
277 set_nmi_event(COREB_EXIT_NMI);
278 } else {
279 /* CoreB dump finished, notice the CoreA we are done */
280 set_nmi_event(COREA_EXIT_NMI);
282 /* synchronize with CoreA */
283 wait_nmi_event(COREB_EXIT_NMI);
286 nmi_exit();