x86: cache_info: Kill the atomic allocation in amd_init_l3_cache()
[linux-2.6/linux-mips.git] / drivers / edac / mpc85xx_edac.c
blob38ab8e2cd7f4f864500aa3cac45dedb76e51d50c
1 /*
2 * Freescale MPC85xx Memory Controller kenel module
4 * Author: Dave Jiang <djiang@mvista.com>
6 * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
7 * the terms of the GNU General Public License version 2. This program
8 * is licensed "as is" without any warranty of any kind, whether express
9 * or implied.
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/ctype.h>
16 #include <linux/io.h>
17 #include <linux/mod_devicetable.h>
18 #include <linux/edac.h>
19 #include <linux/smp.h>
20 #include <linux/gfp.h>
22 #include <linux/of_platform.h>
23 #include <linux/of_device.h>
24 #include "edac_module.h"
25 #include "edac_core.h"
26 #include "mpc85xx_edac.h"
28 static int edac_dev_idx;
29 #ifdef CONFIG_PCI
30 static int edac_pci_idx;
31 #endif
32 static int edac_mc_idx;
34 static u32 orig_ddr_err_disable;
35 static u32 orig_ddr_err_sbe;
38 * PCI Err defines
40 #ifdef CONFIG_PCI
41 static u32 orig_pci_err_cap_dr;
42 static u32 orig_pci_err_en;
43 #endif
45 static u32 orig_l2_err_disable;
46 #ifdef CONFIG_FSL_SOC_BOOKE
47 static u32 orig_hid1[2];
48 #endif
50 /************************ MC SYSFS parts ***********************************/
52 static ssize_t mpc85xx_mc_inject_data_hi_show(struct mem_ctl_info *mci,
53 char *data)
55 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
56 return sprintf(data, "0x%08x",
57 in_be32(pdata->mc_vbase +
58 MPC85XX_MC_DATA_ERR_INJECT_HI));
61 static ssize_t mpc85xx_mc_inject_data_lo_show(struct mem_ctl_info *mci,
62 char *data)
64 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
65 return sprintf(data, "0x%08x",
66 in_be32(pdata->mc_vbase +
67 MPC85XX_MC_DATA_ERR_INJECT_LO));
70 static ssize_t mpc85xx_mc_inject_ctrl_show(struct mem_ctl_info *mci, char *data)
72 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
73 return sprintf(data, "0x%08x",
74 in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT));
77 static ssize_t mpc85xx_mc_inject_data_hi_store(struct mem_ctl_info *mci,
78 const char *data, size_t count)
80 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
81 if (isdigit(*data)) {
82 out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI,
83 simple_strtoul(data, NULL, 0));
84 return count;
86 return 0;
89 static ssize_t mpc85xx_mc_inject_data_lo_store(struct mem_ctl_info *mci,
90 const char *data, size_t count)
92 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
93 if (isdigit(*data)) {
94 out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO,
95 simple_strtoul(data, NULL, 0));
96 return count;
98 return 0;
101 static ssize_t mpc85xx_mc_inject_ctrl_store(struct mem_ctl_info *mci,
102 const char *data, size_t count)
104 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
105 if (isdigit(*data)) {
106 out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT,
107 simple_strtoul(data, NULL, 0));
108 return count;
110 return 0;
113 static struct mcidev_sysfs_attribute mpc85xx_mc_sysfs_attributes[] = {
115 .attr = {
116 .name = "inject_data_hi",
117 .mode = (S_IRUGO | S_IWUSR)
119 .show = mpc85xx_mc_inject_data_hi_show,
120 .store = mpc85xx_mc_inject_data_hi_store},
122 .attr = {
123 .name = "inject_data_lo",
124 .mode = (S_IRUGO | S_IWUSR)
126 .show = mpc85xx_mc_inject_data_lo_show,
127 .store = mpc85xx_mc_inject_data_lo_store},
129 .attr = {
130 .name = "inject_ctrl",
131 .mode = (S_IRUGO | S_IWUSR)
133 .show = mpc85xx_mc_inject_ctrl_show,
134 .store = mpc85xx_mc_inject_ctrl_store},
136 /* End of list */
138 .attr = {.name = NULL}
142 static void mpc85xx_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
144 mci->mc_driver_sysfs_attributes = mpc85xx_mc_sysfs_attributes;
147 /**************************** PCI Err device ***************************/
148 #ifdef CONFIG_PCI
150 static void mpc85xx_pci_check(struct edac_pci_ctl_info *pci)
152 struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
153 u32 err_detect;
155 err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
157 /* master aborts can happen during PCI config cycles */
158 if (!(err_detect & ~(PCI_EDE_MULTI_ERR | PCI_EDE_MST_ABRT))) {
159 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
160 return;
163 printk(KERN_ERR "PCI error(s) detected\n");
164 printk(KERN_ERR "PCI/X ERR_DR register: %#08x\n", err_detect);
166 printk(KERN_ERR "PCI/X ERR_ATTRIB register: %#08x\n",
167 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ATTRIB));
168 printk(KERN_ERR "PCI/X ERR_ADDR register: %#08x\n",
169 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR));
170 printk(KERN_ERR "PCI/X ERR_EXT_ADDR register: %#08x\n",
171 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EXT_ADDR));
172 printk(KERN_ERR "PCI/X ERR_DL register: %#08x\n",
173 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DL));
174 printk(KERN_ERR "PCI/X ERR_DH register: %#08x\n",
175 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DH));
177 /* clear error bits */
178 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
180 if (err_detect & PCI_EDE_PERR_MASK)
181 edac_pci_handle_pe(pci, pci->ctl_name);
183 if ((err_detect & ~PCI_EDE_MULTI_ERR) & ~PCI_EDE_PERR_MASK)
184 edac_pci_handle_npe(pci, pci->ctl_name);
187 static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
189 struct edac_pci_ctl_info *pci = dev_id;
190 struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
191 u32 err_detect;
193 err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
195 if (!err_detect)
196 return IRQ_NONE;
198 mpc85xx_pci_check(pci);
200 return IRQ_HANDLED;
203 static int __devinit mpc85xx_pci_err_probe(struct platform_device *op)
205 struct edac_pci_ctl_info *pci;
206 struct mpc85xx_pci_pdata *pdata;
207 struct resource r;
208 int res = 0;
210 if (!devres_open_group(&op->dev, mpc85xx_pci_err_probe, GFP_KERNEL))
211 return -ENOMEM;
213 pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mpc85xx_pci_err");
214 if (!pci)
215 return -ENOMEM;
217 pdata = pci->pvt_info;
218 pdata->name = "mpc85xx_pci_err";
219 pdata->irq = NO_IRQ;
220 dev_set_drvdata(&op->dev, pci);
221 pci->dev = &op->dev;
222 pci->mod_name = EDAC_MOD_STR;
223 pci->ctl_name = pdata->name;
224 pci->dev_name = dev_name(&op->dev);
226 if (edac_op_state == EDAC_OPSTATE_POLL)
227 pci->edac_check = mpc85xx_pci_check;
229 pdata->edac_idx = edac_pci_idx++;
231 res = of_address_to_resource(op->dev.of_node, 0, &r);
232 if (res) {
233 printk(KERN_ERR "%s: Unable to get resource for "
234 "PCI err regs\n", __func__);
235 goto err;
238 /* we only need the error registers */
239 r.start += 0xe00;
241 if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
242 pdata->name)) {
243 printk(KERN_ERR "%s: Error while requesting mem region\n",
244 __func__);
245 res = -EBUSY;
246 goto err;
249 pdata->pci_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
250 if (!pdata->pci_vbase) {
251 printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
252 res = -ENOMEM;
253 goto err;
256 orig_pci_err_cap_dr =
257 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR);
259 /* PCI master abort is expected during config cycles */
260 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40);
262 orig_pci_err_en = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
264 /* disable master abort reporting */
265 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40);
267 /* clear error bits */
268 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0);
270 if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
271 debugf3("%s(): failed edac_pci_add_device()\n", __func__);
272 goto err;
275 if (edac_op_state == EDAC_OPSTATE_INT) {
276 pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
277 res = devm_request_irq(&op->dev, pdata->irq,
278 mpc85xx_pci_isr, IRQF_DISABLED,
279 "[EDAC] PCI err", pci);
280 if (res < 0) {
281 printk(KERN_ERR
282 "%s: Unable to requiest irq %d for "
283 "MPC85xx PCI err\n", __func__, pdata->irq);
284 irq_dispose_mapping(pdata->irq);
285 res = -ENODEV;
286 goto err2;
289 printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
290 pdata->irq);
293 devres_remove_group(&op->dev, mpc85xx_pci_err_probe);
294 debugf3("%s(): success\n", __func__);
295 printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n");
297 return 0;
299 err2:
300 edac_pci_del_device(&op->dev);
301 err:
302 edac_pci_free_ctl_info(pci);
303 devres_release_group(&op->dev, mpc85xx_pci_err_probe);
304 return res;
307 static int mpc85xx_pci_err_remove(struct platform_device *op)
309 struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev);
310 struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
312 debugf0("%s()\n", __func__);
314 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR,
315 orig_pci_err_cap_dr);
317 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, orig_pci_err_en);
319 edac_pci_del_device(pci->dev);
321 if (edac_op_state == EDAC_OPSTATE_INT)
322 irq_dispose_mapping(pdata->irq);
324 edac_pci_free_ctl_info(pci);
326 return 0;
329 static struct of_device_id mpc85xx_pci_err_of_match[] = {
331 .compatible = "fsl,mpc8540-pcix",
334 .compatible = "fsl,mpc8540-pci",
338 MODULE_DEVICE_TABLE(of, mpc85xx_pci_err_of_match);
340 static struct platform_driver mpc85xx_pci_err_driver = {
341 .probe = mpc85xx_pci_err_probe,
342 .remove = __devexit_p(mpc85xx_pci_err_remove),
343 .driver = {
344 .name = "mpc85xx_pci_err",
345 .owner = THIS_MODULE,
346 .of_match_table = mpc85xx_pci_err_of_match,
350 #endif /* CONFIG_PCI */
352 /**************************** L2 Err device ***************************/
354 /************************ L2 SYSFS parts ***********************************/
356 static ssize_t mpc85xx_l2_inject_data_hi_show(struct edac_device_ctl_info
357 *edac_dev, char *data)
359 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
360 return sprintf(data, "0x%08x",
361 in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI));
364 static ssize_t mpc85xx_l2_inject_data_lo_show(struct edac_device_ctl_info
365 *edac_dev, char *data)
367 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
368 return sprintf(data, "0x%08x",
369 in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO));
372 static ssize_t mpc85xx_l2_inject_ctrl_show(struct edac_device_ctl_info
373 *edac_dev, char *data)
375 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
376 return sprintf(data, "0x%08x",
377 in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL));
380 static ssize_t mpc85xx_l2_inject_data_hi_store(struct edac_device_ctl_info
381 *edac_dev, const char *data,
382 size_t count)
384 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
385 if (isdigit(*data)) {
386 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI,
387 simple_strtoul(data, NULL, 0));
388 return count;
390 return 0;
393 static ssize_t mpc85xx_l2_inject_data_lo_store(struct edac_device_ctl_info
394 *edac_dev, const char *data,
395 size_t count)
397 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
398 if (isdigit(*data)) {
399 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO,
400 simple_strtoul(data, NULL, 0));
401 return count;
403 return 0;
406 static ssize_t mpc85xx_l2_inject_ctrl_store(struct edac_device_ctl_info
407 *edac_dev, const char *data,
408 size_t count)
410 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
411 if (isdigit(*data)) {
412 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL,
413 simple_strtoul(data, NULL, 0));
414 return count;
416 return 0;
419 static struct edac_dev_sysfs_attribute mpc85xx_l2_sysfs_attributes[] = {
421 .attr = {
422 .name = "inject_data_hi",
423 .mode = (S_IRUGO | S_IWUSR)
425 .show = mpc85xx_l2_inject_data_hi_show,
426 .store = mpc85xx_l2_inject_data_hi_store},
428 .attr = {
429 .name = "inject_data_lo",
430 .mode = (S_IRUGO | S_IWUSR)
432 .show = mpc85xx_l2_inject_data_lo_show,
433 .store = mpc85xx_l2_inject_data_lo_store},
435 .attr = {
436 .name = "inject_ctrl",
437 .mode = (S_IRUGO | S_IWUSR)
439 .show = mpc85xx_l2_inject_ctrl_show,
440 .store = mpc85xx_l2_inject_ctrl_store},
442 /* End of list */
444 .attr = {.name = NULL}
448 static void mpc85xx_set_l2_sysfs_attributes(struct edac_device_ctl_info
449 *edac_dev)
451 edac_dev->sysfs_attributes = mpc85xx_l2_sysfs_attributes;
454 /***************************** L2 ops ***********************************/
456 static void mpc85xx_l2_check(struct edac_device_ctl_info *edac_dev)
458 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
459 u32 err_detect;
461 err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
463 if (!(err_detect & L2_EDE_MASK))
464 return;
466 printk(KERN_ERR "ECC Error in CPU L2 cache\n");
467 printk(KERN_ERR "L2 Error Detect Register: 0x%08x\n", err_detect);
468 printk(KERN_ERR "L2 Error Capture Data High Register: 0x%08x\n",
469 in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATAHI));
470 printk(KERN_ERR "L2 Error Capture Data Lo Register: 0x%08x\n",
471 in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATALO));
472 printk(KERN_ERR "L2 Error Syndrome Register: 0x%08x\n",
473 in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTECC));
474 printk(KERN_ERR "L2 Error Attributes Capture Register: 0x%08x\n",
475 in_be32(pdata->l2_vbase + MPC85XX_L2_ERRATTR));
476 printk(KERN_ERR "L2 Error Address Capture Register: 0x%08x\n",
477 in_be32(pdata->l2_vbase + MPC85XX_L2_ERRADDR));
479 /* clear error detect register */
480 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, err_detect);
482 if (err_detect & L2_EDE_CE_MASK)
483 edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
485 if (err_detect & L2_EDE_UE_MASK)
486 edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
489 static irqreturn_t mpc85xx_l2_isr(int irq, void *dev_id)
491 struct edac_device_ctl_info *edac_dev = dev_id;
492 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
493 u32 err_detect;
495 err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
497 if (!(err_detect & L2_EDE_MASK))
498 return IRQ_NONE;
500 mpc85xx_l2_check(edac_dev);
502 return IRQ_HANDLED;
505 static int __devinit mpc85xx_l2_err_probe(struct platform_device *op)
507 struct edac_device_ctl_info *edac_dev;
508 struct mpc85xx_l2_pdata *pdata;
509 struct resource r;
510 int res;
512 if (!devres_open_group(&op->dev, mpc85xx_l2_err_probe, GFP_KERNEL))
513 return -ENOMEM;
515 edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
516 "cpu", 1, "L", 1, 2, NULL, 0,
517 edac_dev_idx);
518 if (!edac_dev) {
519 devres_release_group(&op->dev, mpc85xx_l2_err_probe);
520 return -ENOMEM;
523 pdata = edac_dev->pvt_info;
524 pdata->name = "mpc85xx_l2_err";
525 pdata->irq = NO_IRQ;
526 edac_dev->dev = &op->dev;
527 dev_set_drvdata(edac_dev->dev, edac_dev);
528 edac_dev->ctl_name = pdata->name;
529 edac_dev->dev_name = pdata->name;
531 res = of_address_to_resource(op->dev.of_node, 0, &r);
532 if (res) {
533 printk(KERN_ERR "%s: Unable to get resource for "
534 "L2 err regs\n", __func__);
535 goto err;
538 /* we only need the error registers */
539 r.start += 0xe00;
541 if (!devm_request_mem_region(&op->dev, r.start,
542 r.end - r.start + 1, pdata->name)) {
543 printk(KERN_ERR "%s: Error while requesting mem region\n",
544 __func__);
545 res = -EBUSY;
546 goto err;
549 pdata->l2_vbase = devm_ioremap(&op->dev, r.start, r.end - r.start + 1);
550 if (!pdata->l2_vbase) {
551 printk(KERN_ERR "%s: Unable to setup L2 err regs\n", __func__);
552 res = -ENOMEM;
553 goto err;
556 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, ~0);
558 orig_l2_err_disable = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS);
560 /* clear the err_dis */
561 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, 0);
563 edac_dev->mod_name = EDAC_MOD_STR;
565 if (edac_op_state == EDAC_OPSTATE_POLL)
566 edac_dev->edac_check = mpc85xx_l2_check;
568 mpc85xx_set_l2_sysfs_attributes(edac_dev);
570 pdata->edac_idx = edac_dev_idx++;
572 if (edac_device_add_device(edac_dev) > 0) {
573 debugf3("%s(): failed edac_device_add_device()\n", __func__);
574 goto err;
577 if (edac_op_state == EDAC_OPSTATE_INT) {
578 pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
579 res = devm_request_irq(&op->dev, pdata->irq,
580 mpc85xx_l2_isr, IRQF_DISABLED,
581 "[EDAC] L2 err", edac_dev);
582 if (res < 0) {
583 printk(KERN_ERR
584 "%s: Unable to requiest irq %d for "
585 "MPC85xx L2 err\n", __func__, pdata->irq);
586 irq_dispose_mapping(pdata->irq);
587 res = -ENODEV;
588 goto err2;
591 printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for L2 Err\n",
592 pdata->irq);
594 edac_dev->op_state = OP_RUNNING_INTERRUPT;
596 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, L2_EIE_MASK);
599 devres_remove_group(&op->dev, mpc85xx_l2_err_probe);
601 debugf3("%s(): success\n", __func__);
602 printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n");
604 return 0;
606 err2:
607 edac_device_del_device(&op->dev);
608 err:
609 devres_release_group(&op->dev, mpc85xx_l2_err_probe);
610 edac_device_free_ctl_info(edac_dev);
611 return res;
614 static int mpc85xx_l2_err_remove(struct platform_device *op)
616 struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev);
617 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
619 debugf0("%s()\n", __func__);
621 if (edac_op_state == EDAC_OPSTATE_INT) {
622 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0);
623 irq_dispose_mapping(pdata->irq);
626 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, orig_l2_err_disable);
627 edac_device_del_device(&op->dev);
628 edac_device_free_ctl_info(edac_dev);
629 return 0;
632 static struct of_device_id mpc85xx_l2_err_of_match[] = {
633 /* deprecate the fsl,85.. forms in the future, 2.6.30? */
634 { .compatible = "fsl,8540-l2-cache-controller", },
635 { .compatible = "fsl,8541-l2-cache-controller", },
636 { .compatible = "fsl,8544-l2-cache-controller", },
637 { .compatible = "fsl,8548-l2-cache-controller", },
638 { .compatible = "fsl,8555-l2-cache-controller", },
639 { .compatible = "fsl,8568-l2-cache-controller", },
640 { .compatible = "fsl,mpc8536-l2-cache-controller", },
641 { .compatible = "fsl,mpc8540-l2-cache-controller", },
642 { .compatible = "fsl,mpc8541-l2-cache-controller", },
643 { .compatible = "fsl,mpc8544-l2-cache-controller", },
644 { .compatible = "fsl,mpc8548-l2-cache-controller", },
645 { .compatible = "fsl,mpc8555-l2-cache-controller", },
646 { .compatible = "fsl,mpc8560-l2-cache-controller", },
647 { .compatible = "fsl,mpc8568-l2-cache-controller", },
648 { .compatible = "fsl,mpc8569-l2-cache-controller", },
649 { .compatible = "fsl,mpc8572-l2-cache-controller", },
650 { .compatible = "fsl,p1020-l2-cache-controller", },
651 { .compatible = "fsl,p1021-l2-cache-controller", },
652 { .compatible = "fsl,p2020-l2-cache-controller", },
655 MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match);
657 static struct platform_driver mpc85xx_l2_err_driver = {
658 .probe = mpc85xx_l2_err_probe,
659 .remove = mpc85xx_l2_err_remove,
660 .driver = {
661 .name = "mpc85xx_l2_err",
662 .owner = THIS_MODULE,
663 .of_match_table = mpc85xx_l2_err_of_match,
667 /**************************** MC Err device ***************************/
670 * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the
671 * MPC8572 User's Manual. Each line represents a syndrome bit column as a
672 * 64-bit value, but split into an upper and lower 32-bit chunk. The labels
673 * below correspond to Freescale's manuals.
675 static unsigned int ecc_table[16] = {
676 /* MSB LSB */
677 /* [0:31] [32:63] */
678 0xf00fe11e, 0xc33c0ff7, /* Syndrome bit 7 */
679 0x00ff00ff, 0x00fff0ff,
680 0x0f0f0f0f, 0x0f0fff00,
681 0x11113333, 0x7777000f,
682 0x22224444, 0x8888222f,
683 0x44448888, 0xffff4441,
684 0x8888ffff, 0x11118882,
685 0xffff1111, 0x22221114, /* Syndrome bit 0 */
689 * Calculate the correct ECC value for a 64-bit value specified by high:low
691 static u8 calculate_ecc(u32 high, u32 low)
693 u32 mask_low;
694 u32 mask_high;
695 int bit_cnt;
696 u8 ecc = 0;
697 int i;
698 int j;
700 for (i = 0; i < 8; i++) {
701 mask_high = ecc_table[i * 2];
702 mask_low = ecc_table[i * 2 + 1];
703 bit_cnt = 0;
705 for (j = 0; j < 32; j++) {
706 if ((mask_high >> j) & 1)
707 bit_cnt ^= (high >> j) & 1;
708 if ((mask_low >> j) & 1)
709 bit_cnt ^= (low >> j) & 1;
712 ecc |= bit_cnt << i;
715 return ecc;
719 * Create the syndrome code which is generated if the data line specified by
720 * 'bit' failed. Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641
721 * User's Manual and 9-61 in the MPC8572 User's Manual.
723 static u8 syndrome_from_bit(unsigned int bit) {
724 int i;
725 u8 syndrome = 0;
728 * Cycle through the upper or lower 32-bit portion of each value in
729 * ecc_table depending on if 'bit' is in the upper or lower half of
730 * 64-bit data.
732 for (i = bit < 32; i < 16; i += 2)
733 syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);
735 return syndrome;
739 * Decode data and ecc syndrome to determine what went wrong
740 * Note: This can only decode single-bit errors
742 static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
743 int *bad_data_bit, int *bad_ecc_bit)
745 int i;
746 u8 syndrome;
748 *bad_data_bit = -1;
749 *bad_ecc_bit = -1;
752 * Calculate the ECC of the captured data and XOR it with the captured
753 * ECC to find an ECC syndrome value we can search for
755 syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;
757 /* Check if a data line is stuck... */
758 for (i = 0; i < 64; i++) {
759 if (syndrome == syndrome_from_bit(i)) {
760 *bad_data_bit = i;
761 return;
765 /* If data is correct, check ECC bits for errors... */
766 for (i = 0; i < 8; i++) {
767 if ((syndrome >> i) & 0x1) {
768 *bad_ecc_bit = i;
769 return;
774 static void mpc85xx_mc_check(struct mem_ctl_info *mci)
776 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
777 struct csrow_info *csrow;
778 u32 bus_width;
779 u32 err_detect;
780 u32 syndrome;
781 u32 err_addr;
782 u32 pfn;
783 int row_index;
784 u32 cap_high;
785 u32 cap_low;
786 int bad_data_bit;
787 int bad_ecc_bit;
789 err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
790 if (!err_detect)
791 return;
793 mpc85xx_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
794 err_detect);
796 /* no more processing if not ECC bit errors */
797 if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
798 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
799 return;
802 syndrome = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ECC);
804 /* Mask off appropriate bits of syndrome based on bus width */
805 bus_width = (in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG) &
806 DSC_DBW_MASK) ? 32 : 64;
807 if (bus_width == 64)
808 syndrome &= 0xff;
809 else
810 syndrome &= 0xffff;
812 err_addr = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ADDRESS);
813 pfn = err_addr >> PAGE_SHIFT;
815 for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
816 csrow = &mci->csrows[row_index];
817 if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
818 break;
821 cap_high = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_HI);
822 cap_low = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_LO);
825 * Analyze single-bit errors on 64-bit wide buses
826 * TODO: Add support for 32-bit wide buses
828 if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
829 sbe_ecc_decode(cap_high, cap_low, syndrome,
830 &bad_data_bit, &bad_ecc_bit);
832 if (bad_data_bit != -1)
833 mpc85xx_mc_printk(mci, KERN_ERR,
834 "Faulty Data bit: %d\n", bad_data_bit);
835 if (bad_ecc_bit != -1)
836 mpc85xx_mc_printk(mci, KERN_ERR,
837 "Faulty ECC bit: %d\n", bad_ecc_bit);
839 mpc85xx_mc_printk(mci, KERN_ERR,
840 "Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
841 cap_high ^ (1 << (bad_data_bit - 32)),
842 cap_low ^ (1 << bad_data_bit),
843 syndrome ^ (1 << bad_ecc_bit));
846 mpc85xx_mc_printk(mci, KERN_ERR,
847 "Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
848 cap_high, cap_low, syndrome);
849 mpc85xx_mc_printk(mci, KERN_ERR, "Err addr: %#8.8x\n", err_addr);
850 mpc85xx_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
852 /* we are out of range */
853 if (row_index == mci->nr_csrows)
854 mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
856 if (err_detect & DDR_EDE_SBE)
857 edac_mc_handle_ce(mci, pfn, err_addr & PAGE_MASK,
858 syndrome, row_index, 0, mci->ctl_name);
860 if (err_detect & DDR_EDE_MBE)
861 edac_mc_handle_ue(mci, pfn, err_addr & PAGE_MASK,
862 row_index, mci->ctl_name);
864 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
867 static irqreturn_t mpc85xx_mc_isr(int irq, void *dev_id)
869 struct mem_ctl_info *mci = dev_id;
870 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
871 u32 err_detect;
873 err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
874 if (!err_detect)
875 return IRQ_NONE;
877 mpc85xx_mc_check(mci);
879 return IRQ_HANDLED;
882 static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
884 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
885 struct csrow_info *csrow;
886 u32 sdram_ctl;
887 u32 sdtype;
888 enum mem_type mtype;
889 u32 cs_bnds;
890 int index;
892 sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
894 sdtype = sdram_ctl & DSC_SDTYPE_MASK;
895 if (sdram_ctl & DSC_RD_EN) {
896 switch (sdtype) {
897 case DSC_SDTYPE_DDR:
898 mtype = MEM_RDDR;
899 break;
900 case DSC_SDTYPE_DDR2:
901 mtype = MEM_RDDR2;
902 break;
903 case DSC_SDTYPE_DDR3:
904 mtype = MEM_RDDR3;
905 break;
906 default:
907 mtype = MEM_UNKNOWN;
908 break;
910 } else {
911 switch (sdtype) {
912 case DSC_SDTYPE_DDR:
913 mtype = MEM_DDR;
914 break;
915 case DSC_SDTYPE_DDR2:
916 mtype = MEM_DDR2;
917 break;
918 case DSC_SDTYPE_DDR3:
919 mtype = MEM_DDR3;
920 break;
921 default:
922 mtype = MEM_UNKNOWN;
923 break;
927 for (index = 0; index < mci->nr_csrows; index++) {
928 u32 start;
929 u32 end;
931 csrow = &mci->csrows[index];
932 cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
933 (index * MPC85XX_MC_CS_BNDS_OFS));
935 start = (cs_bnds & 0xffff0000) >> 16;
936 end = (cs_bnds & 0x0000ffff);
938 if (start == end)
939 continue; /* not populated */
941 start <<= (24 - PAGE_SHIFT);
942 end <<= (24 - PAGE_SHIFT);
943 end |= (1 << (24 - PAGE_SHIFT)) - 1;
945 csrow->first_page = start;
946 csrow->last_page = end;
947 csrow->nr_pages = end + 1 - start;
948 csrow->grain = 8;
949 csrow->mtype = mtype;
950 csrow->dtype = DEV_UNKNOWN;
951 if (sdram_ctl & DSC_X32_EN)
952 csrow->dtype = DEV_X32;
953 csrow->edac_mode = EDAC_SECDED;
957 static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
959 struct mem_ctl_info *mci;
960 struct mpc85xx_mc_pdata *pdata;
961 struct resource r;
962 u32 sdram_ctl;
963 int res;
965 if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL))
966 return -ENOMEM;
968 mci = edac_mc_alloc(sizeof(*pdata), 4, 1, edac_mc_idx);
969 if (!mci) {
970 devres_release_group(&op->dev, mpc85xx_mc_err_probe);
971 return -ENOMEM;
974 pdata = mci->pvt_info;
975 pdata->name = "mpc85xx_mc_err";
976 pdata->irq = NO_IRQ;
977 mci->dev = &op->dev;
978 pdata->edac_idx = edac_mc_idx++;
979 dev_set_drvdata(mci->dev, mci);
980 mci->ctl_name = pdata->name;
981 mci->dev_name = pdata->name;
983 res = of_address_to_resource(op->dev.of_node, 0, &r);
984 if (res) {
985 printk(KERN_ERR "%s: Unable to get resource for MC err regs\n",
986 __func__);
987 goto err;
990 if (!devm_request_mem_region(&op->dev, r.start,
991 r.end - r.start + 1, pdata->name)) {
992 printk(KERN_ERR "%s: Error while requesting mem region\n",
993 __func__);
994 res = -EBUSY;
995 goto err;
998 pdata->mc_vbase = devm_ioremap(&op->dev, r.start, r.end - r.start + 1);
999 if (!pdata->mc_vbase) {
1000 printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
1001 res = -ENOMEM;
1002 goto err;
1005 sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
1006 if (!(sdram_ctl & DSC_ECC_EN)) {
1007 /* no ECC */
1008 printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
1009 res = -ENODEV;
1010 goto err;
1013 debugf3("%s(): init mci\n", __func__);
1014 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 |
1015 MEM_FLAG_DDR | MEM_FLAG_DDR2;
1016 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
1017 mci->edac_cap = EDAC_FLAG_SECDED;
1018 mci->mod_name = EDAC_MOD_STR;
1019 mci->mod_ver = MPC85XX_REVISION;
1021 if (edac_op_state == EDAC_OPSTATE_POLL)
1022 mci->edac_check = mpc85xx_mc_check;
1024 mci->ctl_page_to_phys = NULL;
1026 mci->scrub_mode = SCRUB_SW_SRC;
1028 mpc85xx_set_mc_sysfs_attributes(mci);
1030 mpc85xx_init_csrows(mci);
1032 /* store the original error disable bits */
1033 orig_ddr_err_disable =
1034 in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE);
1035 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE, 0);
1037 /* clear all error bits */
1038 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0);
1040 if (edac_mc_add_mc(mci)) {
1041 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
1042 goto err;
1045 if (edac_op_state == EDAC_OPSTATE_INT) {
1046 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN,
1047 DDR_EIE_MBEE | DDR_EIE_SBEE);
1049 /* store the original error management threshold */
1050 orig_ddr_err_sbe = in_be32(pdata->mc_vbase +
1051 MPC85XX_MC_ERR_SBE) & 0xff0000;
1053 /* set threshold to 1 error per interrupt */
1054 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, 0x10000);
1056 /* register interrupts */
1057 pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
1058 res = devm_request_irq(&op->dev, pdata->irq,
1059 mpc85xx_mc_isr,
1060 IRQF_DISABLED | IRQF_SHARED,
1061 "[EDAC] MC err", mci);
1062 if (res < 0) {
1063 printk(KERN_ERR "%s: Unable to request irq %d for "
1064 "MPC85xx DRAM ERR\n", __func__, pdata->irq);
1065 irq_dispose_mapping(pdata->irq);
1066 res = -ENODEV;
1067 goto err2;
1070 printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC\n",
1071 pdata->irq);
1074 devres_remove_group(&op->dev, mpc85xx_mc_err_probe);
1075 debugf3("%s(): success\n", __func__);
1076 printk(KERN_INFO EDAC_MOD_STR " MC err registered\n");
1078 return 0;
1080 err2:
1081 edac_mc_del_mc(&op->dev);
1082 err:
1083 devres_release_group(&op->dev, mpc85xx_mc_err_probe);
1084 edac_mc_free(mci);
1085 return res;
1088 static int mpc85xx_mc_err_remove(struct platform_device *op)
1090 struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
1091 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
1093 debugf0("%s()\n", __func__);
1095 if (edac_op_state == EDAC_OPSTATE_INT) {
1096 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0);
1097 irq_dispose_mapping(pdata->irq);
1100 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE,
1101 orig_ddr_err_disable);
1102 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe);
1104 edac_mc_del_mc(&op->dev);
1105 edac_mc_free(mci);
1106 return 0;
1109 static struct of_device_id mpc85xx_mc_err_of_match[] = {
1110 /* deprecate the fsl,85.. forms in the future, 2.6.30? */
1111 { .compatible = "fsl,8540-memory-controller", },
1112 { .compatible = "fsl,8541-memory-controller", },
1113 { .compatible = "fsl,8544-memory-controller", },
1114 { .compatible = "fsl,8548-memory-controller", },
1115 { .compatible = "fsl,8555-memory-controller", },
1116 { .compatible = "fsl,8568-memory-controller", },
1117 { .compatible = "fsl,mpc8536-memory-controller", },
1118 { .compatible = "fsl,mpc8540-memory-controller", },
1119 { .compatible = "fsl,mpc8541-memory-controller", },
1120 { .compatible = "fsl,mpc8544-memory-controller", },
1121 { .compatible = "fsl,mpc8548-memory-controller", },
1122 { .compatible = "fsl,mpc8555-memory-controller", },
1123 { .compatible = "fsl,mpc8560-memory-controller", },
1124 { .compatible = "fsl,mpc8568-memory-controller", },
1125 { .compatible = "fsl,mpc8569-memory-controller", },
1126 { .compatible = "fsl,mpc8572-memory-controller", },
1127 { .compatible = "fsl,mpc8349-memory-controller", },
1128 { .compatible = "fsl,p1020-memory-controller", },
1129 { .compatible = "fsl,p1021-memory-controller", },
1130 { .compatible = "fsl,p2020-memory-controller", },
1131 { .compatible = "fsl,p4080-memory-controller", },
1134 MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
1136 static struct platform_driver mpc85xx_mc_err_driver = {
1137 .probe = mpc85xx_mc_err_probe,
1138 .remove = mpc85xx_mc_err_remove,
1139 .driver = {
1140 .name = "mpc85xx_mc_err",
1141 .owner = THIS_MODULE,
1142 .of_match_table = mpc85xx_mc_err_of_match,
1146 #ifdef CONFIG_FSL_SOC_BOOKE
1147 static void __init mpc85xx_mc_clear_rfxe(void *data)
1149 orig_hid1[smp_processor_id()] = mfspr(SPRN_HID1);
1150 mtspr(SPRN_HID1, (orig_hid1[smp_processor_id()] & ~HID1_RFXE));
1152 #endif
1154 static int __init mpc85xx_mc_init(void)
1156 int res = 0;
1157 u32 pvr = 0;
1159 printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, "
1160 "(C) 2006 Montavista Software\n");
1162 /* make sure error reporting method is sane */
1163 switch (edac_op_state) {
1164 case EDAC_OPSTATE_POLL:
1165 case EDAC_OPSTATE_INT:
1166 break;
1167 default:
1168 edac_op_state = EDAC_OPSTATE_INT;
1169 break;
1172 res = platform_driver_register(&mpc85xx_mc_err_driver);
1173 if (res)
1174 printk(KERN_WARNING EDAC_MOD_STR "MC fails to register\n");
1176 res = platform_driver_register(&mpc85xx_l2_err_driver);
1177 if (res)
1178 printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n");
1180 #ifdef CONFIG_PCI
1181 res = platform_driver_register(&mpc85xx_pci_err_driver);
1182 if (res)
1183 printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n");
1184 #endif
1186 #ifdef CONFIG_FSL_SOC_BOOKE
1187 pvr = mfspr(SPRN_PVR);
1189 if ((PVR_VER(pvr) == PVR_VER_E500V1) ||
1190 (PVR_VER(pvr) == PVR_VER_E500V2)) {
1192 * need to clear HID1[RFXE] to disable machine check int
1193 * so we can catch it
1195 if (edac_op_state == EDAC_OPSTATE_INT)
1196 on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0);
1198 #endif
1200 return 0;
1203 module_init(mpc85xx_mc_init);
1205 #ifdef CONFIG_FSL_SOC_BOOKE
1206 static void __exit mpc85xx_mc_restore_hid1(void *data)
1208 mtspr(SPRN_HID1, orig_hid1[smp_processor_id()]);
1210 #endif
1212 static void __exit mpc85xx_mc_exit(void)
1214 #ifdef CONFIG_FSL_SOC_BOOKE
1215 u32 pvr = mfspr(SPRN_PVR);
1217 if ((PVR_VER(pvr) == PVR_VER_E500V1) ||
1218 (PVR_VER(pvr) == PVR_VER_E500V2)) {
1219 on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0);
1221 #endif
1222 #ifdef CONFIG_PCI
1223 platform_driver_unregister(&mpc85xx_pci_err_driver);
1224 #endif
1225 platform_driver_unregister(&mpc85xx_l2_err_driver);
1226 platform_driver_unregister(&mpc85xx_mc_err_driver);
1229 module_exit(mpc85xx_mc_exit);
1231 MODULE_LICENSE("GPL");
1232 MODULE_AUTHOR("Montavista Software, Inc.");
1233 module_param(edac_op_state, int, 0444);
1234 MODULE_PARM_DESC(edac_op_state,
1235 "EDAC Error Reporting state: 0=Poll, 2=Interrupt");