1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2018, 2019 Cisco Systems
6 #include <linux/edac.h>
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/interrupt.h>
10 #include <linux/platform_device.h>
11 #include <linux/stop_machine.h>
13 #include <linux/of_address.h>
14 #include <linux/regmap.h>
15 #include "edac_module.h"
18 #define DRV_NAME "aspeed-edac"
21 #define ASPEED_MCR_PROT 0x00 /* protection key register */
22 #define ASPEED_MCR_CONF 0x04 /* configuration register */
23 #define ASPEED_MCR_INTR_CTRL 0x50 /* interrupt control/status register */
24 #define ASPEED_MCR_ADDR_UNREC 0x58 /* address of first un-recoverable error */
25 #define ASPEED_MCR_ADDR_REC 0x5c /* address of last recoverable error */
26 #define ASPEED_MCR_LAST ASPEED_MCR_ADDR_REC
29 #define ASPEED_MCR_PROT_PASSWD 0xfc600309
30 #define ASPEED_MCR_CONF_DRAM_TYPE BIT(4)
31 #define ASPEED_MCR_CONF_ECC BIT(7)
32 #define ASPEED_MCR_INTR_CTRL_CLEAR BIT(31)
33 #define ASPEED_MCR_INTR_CTRL_CNT_REC GENMASK(23, 16)
34 #define ASPEED_MCR_INTR_CTRL_CNT_UNREC GENMASK(15, 12)
35 #define ASPEED_MCR_INTR_CTRL_ENABLE (BIT(0) | BIT(1))
38 static struct regmap
*aspeed_regmap
;
41 static int regmap_reg_write(void *context
, unsigned int reg
, unsigned int val
)
43 void __iomem
*regs
= (void __iomem
*)context
;
45 /* enable write to MCR register set */
46 writel(ASPEED_MCR_PROT_PASSWD
, regs
+ ASPEED_MCR_PROT
);
48 writel(val
, regs
+ reg
);
50 /* disable write to MCR register set */
51 writel(~ASPEED_MCR_PROT_PASSWD
, regs
+ ASPEED_MCR_PROT
);
57 static int regmap_reg_read(void *context
, unsigned int reg
, unsigned int *val
)
59 void __iomem
*regs
= (void __iomem
*)context
;
61 *val
= readl(regs
+ reg
);
66 static bool regmap_is_volatile(struct device
*dev
, unsigned int reg
)
70 case ASPEED_MCR_INTR_CTRL
:
71 case ASPEED_MCR_ADDR_UNREC
:
72 case ASPEED_MCR_ADDR_REC
:
80 static const struct regmap_config aspeed_regmap_config
= {
84 .max_register
= ASPEED_MCR_LAST
,
85 .reg_write
= regmap_reg_write
,
86 .reg_read
= regmap_reg_read
,
87 .volatile_reg
= regmap_is_volatile
,
92 static void count_rec(struct mem_ctl_info
*mci
, u8 rec_cnt
, u32 rec_addr
)
94 struct csrow_info
*csrow
= mci
->csrows
[0];
95 u32 page
, offset
, syndrome
;
100 /* report first few errors (if there are) */
101 /* note: no addresses are recorded */
103 /* page, offset and syndrome are not available */
107 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED
, mci
, rec_cnt
-1,
108 page
, offset
, syndrome
, 0, 0, -1,
109 "address(es) not available", "");
112 /* report last error */
113 /* note: rec_addr is the last recoverable error addr */
114 page
= rec_addr
>> PAGE_SHIFT
;
115 offset
= rec_addr
& ~PAGE_MASK
;
116 /* syndrome is not available */
118 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED
, mci
, 1,
119 csrow
->first_page
+ page
, offset
, syndrome
,
124 static void count_un_rec(struct mem_ctl_info
*mci
, u8 un_rec_cnt
,
127 struct csrow_info
*csrow
= mci
->csrows
[0];
128 u32 page
, offset
, syndrome
;
133 /* report 1. error */
134 /* note: un_rec_addr is the first unrecoverable error addr */
135 page
= un_rec_addr
>> PAGE_SHIFT
;
136 offset
= un_rec_addr
& ~PAGE_MASK
;
137 /* syndrome is not available */
139 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED
, mci
, 1,
140 csrow
->first_page
+ page
, offset
, syndrome
,
143 /* report further errors (if there are) */
144 /* note: no addresses are recorded */
145 if (un_rec_cnt
> 1) {
146 /* page, offset and syndrome are not available */
150 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED
, mci
, un_rec_cnt
-1,
151 page
, offset
, syndrome
, 0, 0, -1,
152 "address(es) not available", "");
157 static irqreturn_t
mcr_isr(int irq
, void *arg
)
159 struct mem_ctl_info
*mci
= arg
;
160 u32 rec_addr
, un_rec_addr
;
161 u32 reg50
, reg5c
, reg58
;
162 u8 rec_cnt
, un_rec_cnt
;
164 regmap_read(aspeed_regmap
, ASPEED_MCR_INTR_CTRL
, ®50
);
165 dev_dbg(mci
->pdev
, "received edac interrupt w/ mcr register 50: 0x%x\n",
168 /* collect data about recoverable and unrecoverable errors */
169 rec_cnt
= (reg50
& ASPEED_MCR_INTR_CTRL_CNT_REC
) >> 16;
170 un_rec_cnt
= (reg50
& ASPEED_MCR_INTR_CTRL_CNT_UNREC
) >> 12;
172 dev_dbg(mci
->pdev
, "%d recoverable interrupts and %d unrecoverable interrupts\n",
173 rec_cnt
, un_rec_cnt
);
175 regmap_read(aspeed_regmap
, ASPEED_MCR_ADDR_UNREC
, ®58
);
178 regmap_read(aspeed_regmap
, ASPEED_MCR_ADDR_REC
, ®5c
);
181 /* clear interrupt flags and error counters: */
182 regmap_update_bits(aspeed_regmap
, ASPEED_MCR_INTR_CTRL
,
183 ASPEED_MCR_INTR_CTRL_CLEAR
,
184 ASPEED_MCR_INTR_CTRL_CLEAR
);
186 regmap_update_bits(aspeed_regmap
, ASPEED_MCR_INTR_CTRL
,
187 ASPEED_MCR_INTR_CTRL_CLEAR
, 0);
189 /* process recoverable and unrecoverable errors */
190 count_rec(mci
, rec_cnt
, rec_addr
);
191 count_un_rec(mci
, un_rec_cnt
, un_rec_addr
);
193 if (!rec_cnt
&& !un_rec_cnt
)
194 dev_dbg(mci
->pdev
, "received edac interrupt, but did not find any ECC counters\n");
196 regmap_read(aspeed_regmap
, ASPEED_MCR_INTR_CTRL
, ®50
);
197 dev_dbg(mci
->pdev
, "edac interrupt handled. mcr reg 50 is now: 0x%x\n",
204 static int config_irq(void *ctx
, struct platform_device
*pdev
)
209 /* register interrupt handler */
210 irq
= platform_get_irq(pdev
, 0);
211 dev_dbg(&pdev
->dev
, "got irq %d\n", irq
);
215 rc
= devm_request_irq(&pdev
->dev
, irq
, mcr_isr
, IRQF_TRIGGER_HIGH
,
218 dev_err(&pdev
->dev
, "unable to request irq %d\n", irq
);
222 /* enable interrupts */
223 regmap_update_bits(aspeed_regmap
, ASPEED_MCR_INTR_CTRL
,
224 ASPEED_MCR_INTR_CTRL_ENABLE
,
225 ASPEED_MCR_INTR_CTRL_ENABLE
);
231 static int init_csrows(struct mem_ctl_info
*mci
)
233 struct csrow_info
*csrow
= mci
->csrows
[0];
234 u32 nr_pages
, dram_type
;
235 struct dimm_info
*dimm
;
236 struct device_node
*np
;
241 /* retrieve info about physical memory from device tree */
242 np
= of_find_node_by_name(NULL
, "memory");
244 dev_err(mci
->pdev
, "dt: missing /memory node\n");
248 rc
= of_address_to_resource(np
, 0, &r
);
253 dev_err(mci
->pdev
, "dt: failed requesting resource for /memory node\n");
257 dev_dbg(mci
->pdev
, "dt: /memory node resources: first page r.start=0x%x, resource_size=0x%x, PAGE_SHIFT macro=0x%x\n",
258 r
.start
, resource_size(&r
), PAGE_SHIFT
);
260 csrow
->first_page
= r
.start
>> PAGE_SHIFT
;
261 nr_pages
= resource_size(&r
) >> PAGE_SHIFT
;
262 csrow
->last_page
= csrow
->first_page
+ nr_pages
- 1;
264 regmap_read(aspeed_regmap
, ASPEED_MCR_CONF
, ®04
);
265 dram_type
= (reg04
& ASPEED_MCR_CONF_DRAM_TYPE
) ? MEM_DDR4
: MEM_DDR3
;
267 dimm
= csrow
->channels
[0]->dimm
;
268 dimm
->mtype
= dram_type
;
269 dimm
->edac_mode
= EDAC_SECDED
;
270 dimm
->nr_pages
= nr_pages
/ csrow
->nr_channels
;
272 dev_dbg(mci
->pdev
, "initialized dimm with first_page=0x%lx and nr_pages=0x%x\n",
273 csrow
->first_page
, nr_pages
);
279 static int aspeed_probe(struct platform_device
*pdev
)
281 struct device
*dev
= &pdev
->dev
;
282 struct edac_mc_layer layers
[2];
283 struct mem_ctl_info
*mci
;
288 regs
= devm_platform_ioremap_resource(pdev
, 0);
290 return PTR_ERR(regs
);
292 aspeed_regmap
= devm_regmap_init(dev
, NULL
, (__force
void *)regs
,
293 &aspeed_regmap_config
);
294 if (IS_ERR(aspeed_regmap
))
295 return PTR_ERR(aspeed_regmap
);
297 /* bail out if ECC mode is not configured */
298 regmap_read(aspeed_regmap
, ASPEED_MCR_CONF
, ®04
);
299 if (!(reg04
& ASPEED_MCR_CONF_ECC
)) {
300 dev_err(&pdev
->dev
, "ECC mode is not configured in u-boot\n");
304 edac_op_state
= EDAC_OPSTATE_INT
;
306 /* allocate & init EDAC MC data structure */
307 layers
[0].type
= EDAC_MC_LAYER_CHIP_SELECT
;
309 layers
[0].is_virt_csrow
= true;
310 layers
[1].type
= EDAC_MC_LAYER_CHANNEL
;
312 layers
[1].is_virt_csrow
= false;
314 mci
= edac_mc_alloc(0, ARRAY_SIZE(layers
), layers
, 0);
318 mci
->pdev
= &pdev
->dev
;
319 mci
->mtype_cap
= MEM_FLAG_DDR3
| MEM_FLAG_DDR4
;
320 mci
->edac_ctl_cap
= EDAC_FLAG_SECDED
;
321 mci
->edac_cap
= EDAC_FLAG_SECDED
;
322 mci
->scrub_cap
= SCRUB_FLAG_HW_SRC
;
323 mci
->scrub_mode
= SCRUB_HW_SRC
;
324 mci
->mod_name
= DRV_NAME
;
325 mci
->ctl_name
= "MIC";
326 mci
->dev_name
= dev_name(&pdev
->dev
);
328 rc
= init_csrows(mci
);
330 dev_err(&pdev
->dev
, "failed to init csrows\n");
334 platform_set_drvdata(pdev
, mci
);
336 /* register with edac core */
337 rc
= edac_mc_add_mc(mci
);
339 dev_err(&pdev
->dev
, "failed to register with EDAC core\n");
343 /* register interrupt handler and enable interrupts */
344 rc
= config_irq(mci
, pdev
);
346 dev_err(&pdev
->dev
, "failed setting up irq\n");
353 edac_mc_del_mc(&pdev
->dev
);
360 static int aspeed_remove(struct platform_device
*pdev
)
362 struct mem_ctl_info
*mci
;
364 /* disable interrupts */
365 regmap_update_bits(aspeed_regmap
, ASPEED_MCR_INTR_CTRL
,
366 ASPEED_MCR_INTR_CTRL_ENABLE
, 0);
369 mci
= edac_mc_del_mc(&pdev
->dev
);
377 static const struct of_device_id aspeed_of_match
[] = {
378 { .compatible
= "aspeed,ast2400-sdram-edac" },
379 { .compatible
= "aspeed,ast2500-sdram-edac" },
380 { .compatible
= "aspeed,ast2600-sdram-edac" },
384 MODULE_DEVICE_TABLE(of
, aspeed_of_match
);
386 static struct platform_driver aspeed_driver
= {
389 .of_match_table
= aspeed_of_match
391 .probe
= aspeed_probe
,
392 .remove
= aspeed_remove
394 module_platform_driver(aspeed_driver
);
396 MODULE_LICENSE("GPL");
397 MODULE_AUTHOR("Stefan Schaeckeler <sschaeck@cisco.com>");
398 MODULE_DESCRIPTION("Aspeed BMC SoC EDAC driver");
399 MODULE_VERSION("1.0");