mmc: rtsx_pci: Enable MMC_CAP_ERASE to allow erase/discard/trim requests
[linux/fpc-iii.git] / drivers / watchdog / octeon-wdt-main.c
blobb55981f88a08334328d409eff8badba7606908be
1 /*
2 * Octeon Watchdog driver
4 * Copyright (C) 2007, 2008, 2009, 2010 Cavium Networks
6 * Converted to use WATCHDOG_CORE by Aaro Koskinen <aaro.koskinen@iki.fi>.
8 * Some parts derived from wdt.c
10 * (c) Copyright 1996-1997 Alan Cox <alan@lxorguk.ukuu.org.uk>,
11 * All Rights Reserved.
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
18 * Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
19 * warranty for any of this software. This material is provided
20 * "AS-IS" and at no charge.
22 * (c) Copyright 1995 Alan Cox <alan@lxorguk.ukuu.org.uk>
24 * This file is subject to the terms and conditions of the GNU General Public
25 * License. See the file "COPYING" in the main directory of this archive
26 * for more details.
29 * The OCTEON watchdog has a maximum timeout of 2^32 * io_clock.
30 * For most systems this is less than 10 seconds, so to allow for
31 * software to request longer watchdog heartbeats, we maintain software
32 * counters to count multiples of the base rate. If the system locks
33 * up in such a manner that we can not run the software counters, the
34 * only result is a watchdog reset sooner than was requested. But
35 * that is OK, because in this case userspace would likely not be able
36 * to do anything anyhow.
38 * The hardware watchdog interval we call the period. The OCTEON
39 * watchdog goes through several stages, after the first period an
40 * irq is asserted, then if it is not reset, after the next period NMI
41 * is asserted, then after an additional period a chip wide soft reset.
42 * So for the software counters, we reset watchdog after each period
43 * and decrement the counter. But for the last two periods we need to
44 * let the watchdog progress to the NMI stage so we disable the irq
45 * and let it proceed. Once in the NMI, we print the register state
46 * to the serial port and then wait for the reset.
48 * A watchdog is maintained for each CPU in the system, that way if
49 * one CPU suffers a lockup, we also get a register dump and reset.
50 * The userspace ping resets the watchdog on all CPUs.
52 * Before userspace opens the watchdog device, we still run the
53 * watchdogs to catch any lockups that may be kernel related.
57 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
59 #include <linux/miscdevice.h>
60 #include <linux/interrupt.h>
61 #include <linux/watchdog.h>
62 #include <linux/cpumask.h>
63 #include <linux/bitops.h>
64 #include <linux/kernel.h>
65 #include <linux/module.h>
66 #include <linux/string.h>
67 #include <linux/delay.h>
68 #include <linux/cpu.h>
69 #include <linux/smp.h>
70 #include <linux/fs.h>
71 #include <linux/irq.h>
73 #include <asm/mipsregs.h>
74 #include <asm/uasm.h>
76 #include <asm/octeon/octeon.h>
78 /* The count needed to achieve timeout_sec. */
79 static unsigned int timeout_cnt;
81 /* The maximum period supported. */
82 static unsigned int max_timeout_sec;
84 /* The current period. */
85 static unsigned int timeout_sec;
87 /* Set to non-zero when userspace countdown mode active */
88 static int do_coundown;
89 static unsigned int countdown_reset;
90 static unsigned int per_cpu_countdown[NR_CPUS];
92 static cpumask_t irq_enabled_cpus;
94 #define WD_TIMO 60 /* Default heartbeat = 60 seconds */
96 static int heartbeat = WD_TIMO;
97 module_param(heartbeat, int, S_IRUGO);
98 MODULE_PARM_DESC(heartbeat,
99 "Watchdog heartbeat in seconds. (0 < heartbeat, default="
100 __MODULE_STRING(WD_TIMO) ")");
102 static bool nowayout = WATCHDOG_NOWAYOUT;
103 module_param(nowayout, bool, S_IRUGO);
104 MODULE_PARM_DESC(nowayout,
105 "Watchdog cannot be stopped once started (default="
106 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
108 static u32 nmi_stage1_insns[64] __initdata;
109 /* We need one branch and therefore one relocation per target label. */
110 static struct uasm_label labels[5] __initdata;
111 static struct uasm_reloc relocs[5] __initdata;
113 enum lable_id {
114 label_enter_bootloader = 1
117 /* Some CP0 registers */
118 #define K0 26
119 #define C0_CVMMEMCTL 11, 7
120 #define C0_STATUS 12, 0
121 #define C0_EBASE 15, 1
122 #define C0_DESAVE 31, 0
124 void octeon_wdt_nmi_stage2(void);
126 static void __init octeon_wdt_build_stage1(void)
128 int i;
129 int len;
130 u32 *p = nmi_stage1_insns;
131 #ifdef CONFIG_HOTPLUG_CPU
132 struct uasm_label *l = labels;
133 struct uasm_reloc *r = relocs;
134 #endif
137 * For the next few instructions running the debugger may
138 * cause corruption of k0 in the saved registers. Since we're
139 * about to crash, nobody probably cares.
141 * Save K0 into the debug scratch register
143 uasm_i_dmtc0(&p, K0, C0_DESAVE);
145 uasm_i_mfc0(&p, K0, C0_STATUS);
146 #ifdef CONFIG_HOTPLUG_CPU
147 if (octeon_bootloader_entry_addr)
148 uasm_il_bbit0(&p, &r, K0, ilog2(ST0_NMI),
149 label_enter_bootloader);
150 #endif
151 /* Force 64-bit addressing enabled */
152 uasm_i_ori(&p, K0, K0, ST0_UX | ST0_SX | ST0_KX);
153 uasm_i_mtc0(&p, K0, C0_STATUS);
155 #ifdef CONFIG_HOTPLUG_CPU
156 if (octeon_bootloader_entry_addr) {
157 uasm_i_mfc0(&p, K0, C0_EBASE);
158 /* Coreid number in K0 */
159 uasm_i_andi(&p, K0, K0, 0xf);
160 /* 8 * coreid in bits 16-31 */
161 uasm_i_dsll_safe(&p, K0, K0, 3 + 16);
162 uasm_i_ori(&p, K0, K0, 0x8001);
163 uasm_i_dsll_safe(&p, K0, K0, 16);
164 uasm_i_ori(&p, K0, K0, 0x0700);
165 uasm_i_drotr_safe(&p, K0, K0, 32);
167 * Should result in: 0x8001,0700,0000,8*coreid which is
168 * CVMX_CIU_WDOGX(coreid) - 0x0500
170 * Now ld K0, CVMX_CIU_WDOGX(coreid)
172 uasm_i_ld(&p, K0, 0x500, K0);
174 * If bit one set handle the NMI as a watchdog event.
175 * otherwise transfer control to bootloader.
177 uasm_il_bbit0(&p, &r, K0, 1, label_enter_bootloader);
178 uasm_i_nop(&p);
180 #endif
182 /* Clear Dcache so cvmseg works right. */
183 uasm_i_cache(&p, 1, 0, 0);
185 /* Use K0 to do a read/modify/write of CVMMEMCTL */
186 uasm_i_dmfc0(&p, K0, C0_CVMMEMCTL);
187 /* Clear out the size of CVMSEG */
188 uasm_i_dins(&p, K0, 0, 0, 6);
189 /* Set CVMSEG to its largest value */
190 uasm_i_ori(&p, K0, K0, 0x1c0 | 54);
191 /* Store the CVMMEMCTL value */
192 uasm_i_dmtc0(&p, K0, C0_CVMMEMCTL);
194 /* Load the address of the second stage handler */
195 UASM_i_LA(&p, K0, (long)octeon_wdt_nmi_stage2);
196 uasm_i_jr(&p, K0);
197 uasm_i_dmfc0(&p, K0, C0_DESAVE);
199 #ifdef CONFIG_HOTPLUG_CPU
200 if (octeon_bootloader_entry_addr) {
201 uasm_build_label(&l, p, label_enter_bootloader);
202 /* Jump to the bootloader and restore K0 */
203 UASM_i_LA(&p, K0, (long)octeon_bootloader_entry_addr);
204 uasm_i_jr(&p, K0);
205 uasm_i_dmfc0(&p, K0, C0_DESAVE);
207 #endif
208 uasm_resolve_relocs(relocs, labels);
210 len = (int)(p - nmi_stage1_insns);
211 pr_debug("Synthesized NMI stage 1 handler (%d instructions)\n", len);
213 pr_debug("\t.set push\n");
214 pr_debug("\t.set noreorder\n");
215 for (i = 0; i < len; i++)
216 pr_debug("\t.word 0x%08x\n", nmi_stage1_insns[i]);
217 pr_debug("\t.set pop\n");
219 if (len > 32)
220 panic("NMI stage 1 handler exceeds 32 instructions, was %d\n",
221 len);
224 static int cpu2core(int cpu)
226 #ifdef CONFIG_SMP
227 return cpu_logical_map(cpu);
228 #else
229 return cvmx_get_core_num();
230 #endif
233 static int core2cpu(int coreid)
235 #ifdef CONFIG_SMP
236 return cpu_number_map(coreid);
237 #else
238 return 0;
239 #endif
243 * Poke the watchdog when an interrupt is received
245 * @cpl:
246 * @dev_id:
248 * Returns
250 static irqreturn_t octeon_wdt_poke_irq(int cpl, void *dev_id)
252 unsigned int core = cvmx_get_core_num();
253 int cpu = core2cpu(core);
255 if (do_coundown) {
256 if (per_cpu_countdown[cpu] > 0) {
257 /* We're alive, poke the watchdog */
258 cvmx_write_csr(CVMX_CIU_PP_POKEX(core), 1);
259 per_cpu_countdown[cpu]--;
260 } else {
261 /* Bad news, you are about to reboot. */
262 disable_irq_nosync(cpl);
263 cpumask_clear_cpu(cpu, &irq_enabled_cpus);
265 } else {
266 /* Not open, just ping away... */
267 cvmx_write_csr(CVMX_CIU_PP_POKEX(core), 1);
269 return IRQ_HANDLED;
272 /* From setup.c */
273 extern int prom_putchar(char c);
276 * Write a string to the uart
278 * @str: String to write
280 static void octeon_wdt_write_string(const char *str)
282 /* Just loop writing one byte at a time */
283 while (*str)
284 prom_putchar(*str++);
288 * Write a hex number out of the uart
290 * @value: Number to display
291 * @digits: Number of digits to print (1 to 16)
293 static void octeon_wdt_write_hex(u64 value, int digits)
295 int d;
296 int v;
298 for (d = 0; d < digits; d++) {
299 v = (value >> ((digits - d - 1) * 4)) & 0xf;
300 if (v >= 10)
301 prom_putchar('a' + v - 10);
302 else
303 prom_putchar('0' + v);
307 static const char reg_name[][3] = {
308 "$0", "at", "v0", "v1", "a0", "a1", "a2", "a3",
309 "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
310 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
311 "t8", "t9", "k0", "k1", "gp", "sp", "s8", "ra"
315 * NMI stage 3 handler. NMIs are handled in the following manner:
316 * 1) The first NMI handler enables CVMSEG and transfers from
317 * the bootbus region into normal memory. It is careful to not
318 * destroy any registers.
319 * 2) The second stage handler uses CVMSEG to save the registers
320 * and create a stack for C code. It then calls the third level
321 * handler with one argument, a pointer to the register values.
322 * 3) The third, and final, level handler is the following C
323 * function that prints out some useful infomration.
325 * @reg: Pointer to register state before the NMI
327 void octeon_wdt_nmi_stage3(u64 reg[32])
329 u64 i;
331 unsigned int coreid = cvmx_get_core_num();
333 * Save status and cause early to get them before any changes
334 * might happen.
336 u64 cp0_cause = read_c0_cause();
337 u64 cp0_status = read_c0_status();
338 u64 cp0_error_epc = read_c0_errorepc();
339 u64 cp0_epc = read_c0_epc();
341 /* Delay so output from all cores output is not jumbled together. */
342 __delay(100000000ull * coreid);
344 octeon_wdt_write_string("\r\n*** NMI Watchdog interrupt on Core 0x");
345 octeon_wdt_write_hex(coreid, 1);
346 octeon_wdt_write_string(" ***\r\n");
347 for (i = 0; i < 32; i++) {
348 octeon_wdt_write_string("\t");
349 octeon_wdt_write_string(reg_name[i]);
350 octeon_wdt_write_string("\t0x");
351 octeon_wdt_write_hex(reg[i], 16);
352 if (i & 1)
353 octeon_wdt_write_string("\r\n");
355 octeon_wdt_write_string("\terr_epc\t0x");
356 octeon_wdt_write_hex(cp0_error_epc, 16);
358 octeon_wdt_write_string("\tepc\t0x");
359 octeon_wdt_write_hex(cp0_epc, 16);
360 octeon_wdt_write_string("\r\n");
362 octeon_wdt_write_string("\tstatus\t0x");
363 octeon_wdt_write_hex(cp0_status, 16);
364 octeon_wdt_write_string("\tcause\t0x");
365 octeon_wdt_write_hex(cp0_cause, 16);
366 octeon_wdt_write_string("\r\n");
368 octeon_wdt_write_string("\tsum0\t0x");
369 octeon_wdt_write_hex(cvmx_read_csr(CVMX_CIU_INTX_SUM0(coreid * 2)), 16);
370 octeon_wdt_write_string("\ten0\t0x");
371 octeon_wdt_write_hex(cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)), 16);
372 octeon_wdt_write_string("\r\n");
374 octeon_wdt_write_string("*** Chip soft reset soon ***\r\n");
377 static void octeon_wdt_disable_interrupt(int cpu)
379 unsigned int core;
380 unsigned int irq;
381 union cvmx_ciu_wdogx ciu_wdog;
383 core = cpu2core(cpu);
385 irq = OCTEON_IRQ_WDOG0 + core;
387 /* Poke the watchdog to clear out its state */
388 cvmx_write_csr(CVMX_CIU_PP_POKEX(core), 1);
390 /* Disable the hardware. */
391 ciu_wdog.u64 = 0;
392 cvmx_write_csr(CVMX_CIU_WDOGX(core), ciu_wdog.u64);
394 free_irq(irq, octeon_wdt_poke_irq);
397 static void octeon_wdt_setup_interrupt(int cpu)
399 unsigned int core;
400 unsigned int irq;
401 union cvmx_ciu_wdogx ciu_wdog;
403 core = cpu2core(cpu);
405 /* Disable it before doing anything with the interrupts. */
406 ciu_wdog.u64 = 0;
407 cvmx_write_csr(CVMX_CIU_WDOGX(core), ciu_wdog.u64);
409 per_cpu_countdown[cpu] = countdown_reset;
411 irq = OCTEON_IRQ_WDOG0 + core;
413 if (request_irq(irq, octeon_wdt_poke_irq,
414 IRQF_NO_THREAD, "octeon_wdt", octeon_wdt_poke_irq))
415 panic("octeon_wdt: Couldn't obtain irq %d", irq);
417 cpumask_set_cpu(cpu, &irq_enabled_cpus);
419 /* Poke the watchdog to clear out its state */
420 cvmx_write_csr(CVMX_CIU_PP_POKEX(core), 1);
422 /* Finally enable the watchdog now that all handlers are installed */
423 ciu_wdog.u64 = 0;
424 ciu_wdog.s.len = timeout_cnt;
425 ciu_wdog.s.mode = 3; /* 3 = Interrupt + NMI + Soft-Reset */
426 cvmx_write_csr(CVMX_CIU_WDOGX(core), ciu_wdog.u64);
429 static int octeon_wdt_cpu_callback(struct notifier_block *nfb,
430 unsigned long action, void *hcpu)
432 unsigned int cpu = (unsigned long)hcpu;
434 switch (action & ~CPU_TASKS_FROZEN) {
435 case CPU_DOWN_PREPARE:
436 octeon_wdt_disable_interrupt(cpu);
437 break;
438 case CPU_ONLINE:
439 case CPU_DOWN_FAILED:
440 octeon_wdt_setup_interrupt(cpu);
441 break;
442 default:
443 break;
445 return NOTIFY_OK;
448 static int octeon_wdt_ping(struct watchdog_device __always_unused *wdog)
450 int cpu;
451 int coreid;
453 for_each_online_cpu(cpu) {
454 coreid = cpu2core(cpu);
455 cvmx_write_csr(CVMX_CIU_PP_POKEX(coreid), 1);
456 per_cpu_countdown[cpu] = countdown_reset;
457 if ((countdown_reset || !do_coundown) &&
458 !cpumask_test_cpu(cpu, &irq_enabled_cpus)) {
459 /* We have to enable the irq */
460 int irq = OCTEON_IRQ_WDOG0 + coreid;
462 enable_irq(irq);
463 cpumask_set_cpu(cpu, &irq_enabled_cpus);
466 return 0;
469 static void octeon_wdt_calc_parameters(int t)
471 unsigned int periods;
473 timeout_sec = max_timeout_sec;
477 * Find the largest interrupt period, that can evenly divide
478 * the requested heartbeat time.
480 while ((t % timeout_sec) != 0)
481 timeout_sec--;
483 periods = t / timeout_sec;
486 * The last two periods are after the irq is disabled, and
487 * then to the nmi, so we subtract them off.
490 countdown_reset = periods > 2 ? periods - 2 : 0;
491 heartbeat = t;
492 timeout_cnt = ((octeon_get_io_clock_rate() >> 8) * timeout_sec) >> 8;
495 static int octeon_wdt_set_timeout(struct watchdog_device *wdog,
496 unsigned int t)
498 int cpu;
499 int coreid;
500 union cvmx_ciu_wdogx ciu_wdog;
502 if (t <= 0)
503 return -1;
505 octeon_wdt_calc_parameters(t);
507 for_each_online_cpu(cpu) {
508 coreid = cpu2core(cpu);
509 cvmx_write_csr(CVMX_CIU_PP_POKEX(coreid), 1);
510 ciu_wdog.u64 = 0;
511 ciu_wdog.s.len = timeout_cnt;
512 ciu_wdog.s.mode = 3; /* 3 = Interrupt + NMI + Soft-Reset */
513 cvmx_write_csr(CVMX_CIU_WDOGX(coreid), ciu_wdog.u64);
514 cvmx_write_csr(CVMX_CIU_PP_POKEX(coreid), 1);
516 octeon_wdt_ping(wdog); /* Get the irqs back on. */
517 return 0;
520 static int octeon_wdt_start(struct watchdog_device *wdog)
522 octeon_wdt_ping(wdog);
523 do_coundown = 1;
524 return 0;
527 static int octeon_wdt_stop(struct watchdog_device *wdog)
529 do_coundown = 0;
530 octeon_wdt_ping(wdog);
531 return 0;
534 static struct notifier_block octeon_wdt_cpu_notifier = {
535 .notifier_call = octeon_wdt_cpu_callback,
538 static const struct watchdog_info octeon_wdt_info = {
539 .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
540 .identity = "OCTEON",
543 static const struct watchdog_ops octeon_wdt_ops = {
544 .owner = THIS_MODULE,
545 .start = octeon_wdt_start,
546 .stop = octeon_wdt_stop,
547 .ping = octeon_wdt_ping,
548 .set_timeout = octeon_wdt_set_timeout,
551 static struct watchdog_device octeon_wdt = {
552 .info = &octeon_wdt_info,
553 .ops = &octeon_wdt_ops,
557 * Module/ driver initialization.
559 * Returns Zero on success
561 static int __init octeon_wdt_init(void)
563 int i;
564 int ret;
565 int cpu;
566 u64 *ptr;
569 * Watchdog time expiration length = The 16 bits of LEN
570 * represent the most significant bits of a 24 bit decrementer
571 * that decrements every 256 cycles.
573 * Try for a timeout of 5 sec, if that fails a smaller number
574 * of even seconds,
576 max_timeout_sec = 6;
577 do {
578 max_timeout_sec--;
579 timeout_cnt = ((octeon_get_io_clock_rate() >> 8) *
580 max_timeout_sec) >> 8;
581 } while (timeout_cnt > 65535);
583 BUG_ON(timeout_cnt == 0);
585 octeon_wdt_calc_parameters(heartbeat);
587 pr_info("Initial granularity %d Sec\n", timeout_sec);
589 octeon_wdt.timeout = timeout_sec;
590 octeon_wdt.max_timeout = UINT_MAX;
592 watchdog_set_nowayout(&octeon_wdt, nowayout);
594 ret = watchdog_register_device(&octeon_wdt);
595 if (ret) {
596 pr_err("watchdog_register_device() failed: %d\n", ret);
597 return ret;
600 /* Build the NMI handler ... */
601 octeon_wdt_build_stage1();
603 /* ... and install it. */
604 ptr = (u64 *) nmi_stage1_insns;
605 for (i = 0; i < 16; i++) {
606 cvmx_write_csr(CVMX_MIO_BOOT_LOC_ADR, i * 8);
607 cvmx_write_csr(CVMX_MIO_BOOT_LOC_DAT, ptr[i]);
609 cvmx_write_csr(CVMX_MIO_BOOT_LOC_CFGX(0), 0x81fc0000);
611 cpumask_clear(&irq_enabled_cpus);
613 cpu_notifier_register_begin();
614 for_each_online_cpu(cpu)
615 octeon_wdt_setup_interrupt(cpu);
617 __register_hotcpu_notifier(&octeon_wdt_cpu_notifier);
618 cpu_notifier_register_done();
620 return 0;
624 * Module / driver shutdown
626 static void __exit octeon_wdt_cleanup(void)
628 int cpu;
630 watchdog_unregister_device(&octeon_wdt);
632 cpu_notifier_register_begin();
633 __unregister_hotcpu_notifier(&octeon_wdt_cpu_notifier);
635 for_each_online_cpu(cpu) {
636 int core = cpu2core(cpu);
637 /* Disable the watchdog */
638 cvmx_write_csr(CVMX_CIU_WDOGX(core), 0);
639 /* Free the interrupt handler */
640 free_irq(OCTEON_IRQ_WDOG0 + core, octeon_wdt_poke_irq);
643 cpu_notifier_register_done();
646 * Disable the boot-bus memory, the code it points to is soon
647 * to go missing.
649 cvmx_write_csr(CVMX_MIO_BOOT_LOC_CFGX(0), 0);
652 MODULE_LICENSE("GPL");
653 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
654 MODULE_DESCRIPTION("Cavium Networks Octeon Watchdog driver.");
655 module_init(octeon_wdt_init);
656 module_exit(octeon_wdt_cleanup);