dmaengine: imx-sdma: Let the core do the device node validation
[linux/fpc-iii.git] / drivers / edac / ti_edac.c
blob6ac26d1b929f048017fc37045de389def37e5148
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
5 * Texas Instruments DDR3 ECC error correction and detection driver
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/init.h>
21 #include <linux/edac.h>
22 #include <linux/io.h>
23 #include <linux/interrupt.h>
24 #include <linux/of_address.h>
25 #include <linux/of_device.h>
26 #include <linux/module.h>
28 #include "edac_module.h"
30 /* EMIF controller registers */
31 #define EMIF_SDRAM_CONFIG 0x008
32 #define EMIF_IRQ_STATUS 0x0ac
33 #define EMIF_IRQ_ENABLE_SET 0x0b4
34 #define EMIF_ECC_CTRL 0x110
35 #define EMIF_1B_ECC_ERR_CNT 0x130
36 #define EMIF_1B_ECC_ERR_THRSH 0x134
37 #define EMIF_1B_ECC_ERR_ADDR_LOG 0x13c
38 #define EMIF_2B_ECC_ERR_ADDR_LOG 0x140
40 /* Bit definitions for EMIF_SDRAM_CONFIG */
41 #define SDRAM_TYPE_SHIFT 29
42 #define SDRAM_TYPE_MASK GENMASK(31, 29)
43 #define SDRAM_TYPE_DDR3 (3 << SDRAM_TYPE_SHIFT)
44 #define SDRAM_TYPE_DDR2 (2 << SDRAM_TYPE_SHIFT)
45 #define SDRAM_NARROW_MODE_MASK GENMASK(15, 14)
46 #define SDRAM_K2_NARROW_MODE_SHIFT 12
47 #define SDRAM_K2_NARROW_MODE_MASK GENMASK(13, 12)
48 #define SDRAM_ROWSIZE_SHIFT 7
49 #define SDRAM_ROWSIZE_MASK GENMASK(9, 7)
50 #define SDRAM_IBANK_SHIFT 4
51 #define SDRAM_IBANK_MASK GENMASK(6, 4)
52 #define SDRAM_K2_IBANK_SHIFT 5
53 #define SDRAM_K2_IBANK_MASK GENMASK(6, 5)
54 #define SDRAM_K2_EBANK_SHIFT 3
55 #define SDRAM_K2_EBANK_MASK BIT(SDRAM_K2_EBANK_SHIFT)
56 #define SDRAM_PAGESIZE_SHIFT 0
57 #define SDRAM_PAGESIZE_MASK GENMASK(2, 0)
58 #define SDRAM_K2_PAGESIZE_SHIFT 0
59 #define SDRAM_K2_PAGESIZE_MASK GENMASK(1, 0)
61 #define EMIF_1B_ECC_ERR_THRSH_SHIFT 24
63 /* IRQ bit definitions */
64 #define EMIF_1B_ECC_ERR BIT(5)
65 #define EMIF_2B_ECC_ERR BIT(4)
66 #define EMIF_WR_ECC_ERR BIT(3)
67 #define EMIF_SYS_ERR BIT(0)
68 /* Bit 31 enables ECC and 28 enables RMW */
69 #define ECC_ENABLED (BIT(31) | BIT(28))
71 #define EDAC_MOD_NAME "ti-emif-edac"
73 enum {
74 EMIF_TYPE_DRA7,
75 EMIF_TYPE_K2
78 struct ti_edac {
79 void __iomem *reg;
82 static u32 ti_edac_readl(struct ti_edac *edac, u16 offset)
84 return readl_relaxed(edac->reg + offset);
87 static void ti_edac_writel(struct ti_edac *edac, u32 val, u16 offset)
89 writel_relaxed(val, edac->reg + offset);
92 static irqreturn_t ti_edac_isr(int irq, void *data)
94 struct mem_ctl_info *mci = data;
95 struct ti_edac *edac = mci->pvt_info;
96 u32 irq_status;
97 u32 err_addr;
98 int err_count;
100 irq_status = ti_edac_readl(edac, EMIF_IRQ_STATUS);
102 if (irq_status & EMIF_1B_ECC_ERR) {
103 err_addr = ti_edac_readl(edac, EMIF_1B_ECC_ERR_ADDR_LOG);
104 err_count = ti_edac_readl(edac, EMIF_1B_ECC_ERR_CNT);
105 ti_edac_writel(edac, err_count, EMIF_1B_ECC_ERR_CNT);
106 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count,
107 err_addr >> PAGE_SHIFT,
108 err_addr & ~PAGE_MASK, -1, 0, 0, 0,
109 mci->ctl_name, "1B");
112 if (irq_status & EMIF_2B_ECC_ERR) {
113 err_addr = ti_edac_readl(edac, EMIF_2B_ECC_ERR_ADDR_LOG);
114 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
115 err_addr >> PAGE_SHIFT,
116 err_addr & ~PAGE_MASK, -1, 0, 0, 0,
117 mci->ctl_name, "2B");
120 if (irq_status & EMIF_WR_ECC_ERR)
121 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
122 0, 0, -1, 0, 0, 0,
123 mci->ctl_name, "WR");
125 ti_edac_writel(edac, irq_status, EMIF_IRQ_STATUS);
127 return IRQ_HANDLED;
130 static void ti_edac_setup_dimm(struct mem_ctl_info *mci, u32 type)
132 struct dimm_info *dimm;
133 struct ti_edac *edac = mci->pvt_info;
134 int bits;
135 u32 val;
136 u32 memsize;
138 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, 0, 0, 0);
140 val = ti_edac_readl(edac, EMIF_SDRAM_CONFIG);
142 if (type == EMIF_TYPE_DRA7) {
143 bits = ((val & SDRAM_PAGESIZE_MASK) >> SDRAM_PAGESIZE_SHIFT) + 8;
144 bits += ((val & SDRAM_ROWSIZE_MASK) >> SDRAM_ROWSIZE_SHIFT) + 9;
145 bits += (val & SDRAM_IBANK_MASK) >> SDRAM_IBANK_SHIFT;
147 if (val & SDRAM_NARROW_MODE_MASK) {
148 bits++;
149 dimm->dtype = DEV_X16;
150 } else {
151 bits += 2;
152 dimm->dtype = DEV_X32;
154 } else {
155 bits = 16;
156 bits += ((val & SDRAM_K2_PAGESIZE_MASK) >>
157 SDRAM_K2_PAGESIZE_SHIFT) + 8;
158 bits += (val & SDRAM_K2_IBANK_MASK) >> SDRAM_K2_IBANK_SHIFT;
159 bits += (val & SDRAM_K2_EBANK_MASK) >> SDRAM_K2_EBANK_SHIFT;
161 val = (val & SDRAM_K2_NARROW_MODE_MASK) >>
162 SDRAM_K2_NARROW_MODE_SHIFT;
163 switch (val) {
164 case 0:
165 bits += 3;
166 dimm->dtype = DEV_X64;
167 break;
168 case 1:
169 bits += 2;
170 dimm->dtype = DEV_X32;
171 break;
172 case 2:
173 bits++;
174 dimm->dtype = DEV_X16;
175 break;
179 memsize = 1 << bits;
181 dimm->nr_pages = memsize >> PAGE_SHIFT;
182 dimm->grain = 4;
183 if ((val & SDRAM_TYPE_MASK) == SDRAM_TYPE_DDR2)
184 dimm->mtype = MEM_DDR2;
185 else
186 dimm->mtype = MEM_DDR3;
188 val = ti_edac_readl(edac, EMIF_ECC_CTRL);
189 if (val & ECC_ENABLED)
190 dimm->edac_mode = EDAC_SECDED;
191 else
192 dimm->edac_mode = EDAC_NONE;
195 static const struct of_device_id ti_edac_of_match[] = {
196 { .compatible = "ti,emif-keystone", .data = (void *)EMIF_TYPE_K2 },
197 { .compatible = "ti,emif-dra7xx", .data = (void *)EMIF_TYPE_DRA7 },
201 static int _emif_get_id(struct device_node *node)
203 struct device_node *np;
204 const __be32 *addrp;
205 u32 addr, my_addr;
206 int my_id = 0;
208 addrp = of_get_address(node, 0, NULL, NULL);
209 my_addr = (u32)of_translate_address(node, addrp);
211 for_each_matching_node(np, ti_edac_of_match) {
212 if (np == node)
213 continue;
215 addrp = of_get_address(np, 0, NULL, NULL);
216 addr = (u32)of_translate_address(np, addrp);
218 edac_printk(KERN_INFO, EDAC_MOD_NAME,
219 "addr=%x, my_addr=%x\n",
220 addr, my_addr);
222 if (addr < my_addr)
223 my_id++;
226 return my_id;
229 static int ti_edac_probe(struct platform_device *pdev)
231 int error_irq = 0, ret = -ENODEV;
232 struct device *dev = &pdev->dev;
233 struct resource *res;
234 void __iomem *reg;
235 struct mem_ctl_info *mci;
236 struct edac_mc_layer layers[1];
237 const struct of_device_id *id;
238 struct ti_edac *edac;
239 int emif_id;
241 id = of_match_device(ti_edac_of_match, &pdev->dev);
242 if (!id)
243 return -ENODEV;
245 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
246 reg = devm_ioremap_resource(dev, res);
247 if (IS_ERR(reg)) {
248 edac_printk(KERN_ERR, EDAC_MOD_NAME,
249 "EMIF controller regs not defined\n");
250 return PTR_ERR(reg);
253 layers[0].type = EDAC_MC_LAYER_ALL_MEM;
254 layers[0].size = 1;
256 /* Allocate ID number for our EMIF controller */
257 emif_id = _emif_get_id(pdev->dev.of_node);
258 if (emif_id < 0)
259 return -EINVAL;
261 mci = edac_mc_alloc(emif_id, 1, layers, sizeof(*edac));
262 if (!mci)
263 return -ENOMEM;
265 mci->pdev = &pdev->dev;
266 edac = mci->pvt_info;
267 edac->reg = reg;
268 platform_set_drvdata(pdev, mci);
270 mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
271 mci->edac_ctl_cap = EDAC_FLAG_SECDED | EDAC_FLAG_NONE;
272 mci->mod_name = EDAC_MOD_NAME;
273 mci->ctl_name = id->compatible;
274 mci->dev_name = dev_name(&pdev->dev);
276 /* Setup memory layout */
277 ti_edac_setup_dimm(mci, (u32)(id->data));
279 /* add EMIF ECC error handler */
280 error_irq = platform_get_irq(pdev, 0);
281 if (!error_irq) {
282 edac_printk(KERN_ERR, EDAC_MOD_NAME,
283 "EMIF irq number not defined.\n");
284 goto err;
287 ret = devm_request_irq(dev, error_irq, ti_edac_isr, 0,
288 "emif-edac-irq", mci);
289 if (ret) {
290 edac_printk(KERN_ERR, EDAC_MOD_NAME,
291 "request_irq fail for EMIF EDAC irq\n");
292 goto err;
295 ret = edac_mc_add_mc(mci);
296 if (ret) {
297 edac_printk(KERN_ERR, EDAC_MOD_NAME,
298 "Failed to register mci: %d.\n", ret);
299 goto err;
302 /* Generate an interrupt with each 1b error */
303 ti_edac_writel(edac, 1 << EMIF_1B_ECC_ERR_THRSH_SHIFT,
304 EMIF_1B_ECC_ERR_THRSH);
306 /* Enable interrupts */
307 ti_edac_writel(edac,
308 EMIF_1B_ECC_ERR | EMIF_2B_ECC_ERR | EMIF_WR_ECC_ERR,
309 EMIF_IRQ_ENABLE_SET);
311 return 0;
313 err:
314 edac_mc_free(mci);
315 return ret;
318 static int ti_edac_remove(struct platform_device *pdev)
320 struct mem_ctl_info *mci = platform_get_drvdata(pdev);
322 edac_mc_del_mc(&pdev->dev);
323 edac_mc_free(mci);
325 return 0;
328 static struct platform_driver ti_edac_driver = {
329 .probe = ti_edac_probe,
330 .remove = ti_edac_remove,
331 .driver = {
332 .name = EDAC_MOD_NAME,
333 .of_match_table = ti_edac_of_match,
337 module_platform_driver(ti_edac_driver);
339 MODULE_AUTHOR("Texas Instruments Inc.");
340 MODULE_DESCRIPTION("EDAC Driver for Texas Instruments DDR3 MC");
341 MODULE_LICENSE("GPL v2");