Linux 4.9.243
[linux/fpc-iii.git] / drivers / mtd / nand / mtk_ecc.c
blobada2d88fd4c71b46a9f4bb377e7dfb5cbd1fb6a3
1 /*
2 * MTK ECC controller driver.
3 * Copyright (C) 2016 MediaTek Inc.
4 * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
5 * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
17 #include <linux/platform_device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/interrupt.h>
20 #include <linux/clk.h>
21 #include <linux/module.h>
22 #include <linux/iopoll.h>
23 #include <linux/of.h>
24 #include <linux/of_platform.h>
25 #include <linux/mutex.h>
27 #include "mtk_ecc.h"
29 #define ECC_IDLE_MASK BIT(0)
30 #define ECC_IRQ_EN BIT(0)
31 #define ECC_OP_ENABLE (1)
32 #define ECC_OP_DISABLE (0)
34 #define ECC_ENCCON (0x00)
35 #define ECC_ENCCNFG (0x04)
36 #define ECC_CNFG_4BIT (0)
37 #define ECC_CNFG_6BIT (1)
38 #define ECC_CNFG_8BIT (2)
39 #define ECC_CNFG_10BIT (3)
40 #define ECC_CNFG_12BIT (4)
41 #define ECC_CNFG_14BIT (5)
42 #define ECC_CNFG_16BIT (6)
43 #define ECC_CNFG_18BIT (7)
44 #define ECC_CNFG_20BIT (8)
45 #define ECC_CNFG_22BIT (9)
46 #define ECC_CNFG_24BIT (0xa)
47 #define ECC_CNFG_28BIT (0xb)
48 #define ECC_CNFG_32BIT (0xc)
49 #define ECC_CNFG_36BIT (0xd)
50 #define ECC_CNFG_40BIT (0xe)
51 #define ECC_CNFG_44BIT (0xf)
52 #define ECC_CNFG_48BIT (0x10)
53 #define ECC_CNFG_52BIT (0x11)
54 #define ECC_CNFG_56BIT (0x12)
55 #define ECC_CNFG_60BIT (0x13)
56 #define ECC_MODE_SHIFT (5)
57 #define ECC_MS_SHIFT (16)
58 #define ECC_ENCDIADDR (0x08)
59 #define ECC_ENCIDLE (0x0C)
60 #define ECC_ENCPAR(x) (0x10 + (x) * sizeof(u32))
61 #define ECC_ENCIRQ_EN (0x80)
62 #define ECC_ENCIRQ_STA (0x84)
63 #define ECC_DECCON (0x100)
64 #define ECC_DECCNFG (0x104)
65 #define DEC_EMPTY_EN BIT(31)
66 #define DEC_CNFG_CORRECT (0x3 << 12)
67 #define ECC_DECIDLE (0x10C)
68 #define ECC_DECENUM0 (0x114)
69 #define ERR_MASK (0x3f)
70 #define ECC_DECDONE (0x124)
71 #define ECC_DECIRQ_EN (0x200)
72 #define ECC_DECIRQ_STA (0x204)
74 #define ECC_TIMEOUT (500000)
76 #define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE)
77 #define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON)
78 #define ECC_IRQ_REG(op) ((op) == ECC_ENCODE ? \
79 ECC_ENCIRQ_EN : ECC_DECIRQ_EN)
81 struct mtk_ecc {
82 struct device *dev;
83 void __iomem *regs;
84 struct clk *clk;
86 struct completion done;
87 struct mutex lock;
88 u32 sectors;
90 u8 eccdata[112];
93 static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
94 enum mtk_ecc_operation op)
96 struct device *dev = ecc->dev;
97 u32 val;
98 int ret;
100 ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(op), val,
101 val & ECC_IDLE_MASK,
102 10, ECC_TIMEOUT);
103 if (ret)
104 dev_warn(dev, "%s NOT idle\n",
105 op == ECC_ENCODE ? "encoder" : "decoder");
108 static irqreturn_t mtk_ecc_irq(int irq, void *id)
110 struct mtk_ecc *ecc = id;
111 enum mtk_ecc_operation op;
112 u32 dec, enc;
114 dec = readw(ecc->regs + ECC_DECIRQ_STA) & ECC_IRQ_EN;
115 if (dec) {
116 op = ECC_DECODE;
117 dec = readw(ecc->regs + ECC_DECDONE);
118 if (dec & ecc->sectors) {
120 * Clear decode IRQ status once again to ensure that
121 * there will be no extra IRQ.
123 readw(ecc->regs + ECC_DECIRQ_STA);
124 ecc->sectors = 0;
125 complete(&ecc->done);
126 } else {
127 return IRQ_HANDLED;
129 } else {
130 enc = readl(ecc->regs + ECC_ENCIRQ_STA) & ECC_IRQ_EN;
131 if (enc) {
132 op = ECC_ENCODE;
133 complete(&ecc->done);
134 } else {
135 return IRQ_NONE;
139 return IRQ_HANDLED;
142 static void mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
144 u32 ecc_bit = ECC_CNFG_4BIT, dec_sz, enc_sz;
145 u32 reg;
147 switch (config->strength) {
148 case 4:
149 ecc_bit = ECC_CNFG_4BIT;
150 break;
151 case 6:
152 ecc_bit = ECC_CNFG_6BIT;
153 break;
154 case 8:
155 ecc_bit = ECC_CNFG_8BIT;
156 break;
157 case 10:
158 ecc_bit = ECC_CNFG_10BIT;
159 break;
160 case 12:
161 ecc_bit = ECC_CNFG_12BIT;
162 break;
163 case 14:
164 ecc_bit = ECC_CNFG_14BIT;
165 break;
166 case 16:
167 ecc_bit = ECC_CNFG_16BIT;
168 break;
169 case 18:
170 ecc_bit = ECC_CNFG_18BIT;
171 break;
172 case 20:
173 ecc_bit = ECC_CNFG_20BIT;
174 break;
175 case 22:
176 ecc_bit = ECC_CNFG_22BIT;
177 break;
178 case 24:
179 ecc_bit = ECC_CNFG_24BIT;
180 break;
181 case 28:
182 ecc_bit = ECC_CNFG_28BIT;
183 break;
184 case 32:
185 ecc_bit = ECC_CNFG_32BIT;
186 break;
187 case 36:
188 ecc_bit = ECC_CNFG_36BIT;
189 break;
190 case 40:
191 ecc_bit = ECC_CNFG_40BIT;
192 break;
193 case 44:
194 ecc_bit = ECC_CNFG_44BIT;
195 break;
196 case 48:
197 ecc_bit = ECC_CNFG_48BIT;
198 break;
199 case 52:
200 ecc_bit = ECC_CNFG_52BIT;
201 break;
202 case 56:
203 ecc_bit = ECC_CNFG_56BIT;
204 break;
205 case 60:
206 ecc_bit = ECC_CNFG_60BIT;
207 break;
208 default:
209 dev_err(ecc->dev, "invalid strength %d, default to 4 bits\n",
210 config->strength);
213 if (config->op == ECC_ENCODE) {
214 /* configure ECC encoder (in bits) */
215 enc_sz = config->len << 3;
217 reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
218 reg |= (enc_sz << ECC_MS_SHIFT);
219 writel(reg, ecc->regs + ECC_ENCCNFG);
221 if (config->mode != ECC_NFI_MODE)
222 writel(lower_32_bits(config->addr),
223 ecc->regs + ECC_ENCDIADDR);
225 } else {
226 /* configure ECC decoder (in bits) */
227 dec_sz = (config->len << 3) +
228 config->strength * ECC_PARITY_BITS;
230 reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
231 reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT;
232 reg |= DEC_EMPTY_EN;
233 writel(reg, ecc->regs + ECC_DECCNFG);
235 if (config->sectors)
236 ecc->sectors = 1 << (config->sectors - 1);
240 void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
241 int sectors)
243 u32 offset, i, err;
244 u32 bitflips = 0;
246 stats->corrected = 0;
247 stats->failed = 0;
249 for (i = 0; i < sectors; i++) {
250 offset = (i >> 2) << 2;
251 err = readl(ecc->regs + ECC_DECENUM0 + offset);
252 err = err >> ((i % 4) * 8);
253 err &= ERR_MASK;
254 if (err == ERR_MASK) {
255 /* uncorrectable errors */
256 stats->failed++;
257 continue;
260 stats->corrected += err;
261 bitflips = max_t(u32, bitflips, err);
264 stats->bitflips = bitflips;
266 EXPORT_SYMBOL(mtk_ecc_get_stats);
268 void mtk_ecc_release(struct mtk_ecc *ecc)
270 clk_disable_unprepare(ecc->clk);
271 put_device(ecc->dev);
273 EXPORT_SYMBOL(mtk_ecc_release);
275 static void mtk_ecc_hw_init(struct mtk_ecc *ecc)
277 mtk_ecc_wait_idle(ecc, ECC_ENCODE);
278 writew(ECC_OP_DISABLE, ecc->regs + ECC_ENCCON);
280 mtk_ecc_wait_idle(ecc, ECC_DECODE);
281 writel(ECC_OP_DISABLE, ecc->regs + ECC_DECCON);
284 static struct mtk_ecc *mtk_ecc_get(struct device_node *np)
286 struct platform_device *pdev;
287 struct mtk_ecc *ecc;
289 pdev = of_find_device_by_node(np);
290 if (!pdev || !platform_get_drvdata(pdev))
291 return ERR_PTR(-EPROBE_DEFER);
293 get_device(&pdev->dev);
294 ecc = platform_get_drvdata(pdev);
295 clk_prepare_enable(ecc->clk);
296 mtk_ecc_hw_init(ecc);
298 return ecc;
301 struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node)
303 struct mtk_ecc *ecc = NULL;
304 struct device_node *np;
306 np = of_parse_phandle(of_node, "ecc-engine", 0);
307 if (np) {
308 ecc = mtk_ecc_get(np);
309 of_node_put(np);
312 return ecc;
314 EXPORT_SYMBOL(of_mtk_ecc_get);
316 int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
318 enum mtk_ecc_operation op = config->op;
319 int ret;
321 ret = mutex_lock_interruptible(&ecc->lock);
322 if (ret) {
323 dev_err(ecc->dev, "interrupted when attempting to lock\n");
324 return ret;
327 mtk_ecc_wait_idle(ecc, op);
328 mtk_ecc_config(ecc, config);
329 writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op));
331 init_completion(&ecc->done);
332 writew(ECC_IRQ_EN, ecc->regs + ECC_IRQ_REG(op));
334 return 0;
336 EXPORT_SYMBOL(mtk_ecc_enable);
338 void mtk_ecc_disable(struct mtk_ecc *ecc)
340 enum mtk_ecc_operation op = ECC_ENCODE;
342 /* find out the running operation */
343 if (readw(ecc->regs + ECC_CTL_REG(op)) != ECC_OP_ENABLE)
344 op = ECC_DECODE;
346 /* disable it */
347 mtk_ecc_wait_idle(ecc, op);
348 if (op == ECC_DECODE)
350 * Clear decode IRQ status in case there is a timeout to wait
351 * decode IRQ.
353 readw(ecc->regs + ECC_DECIRQ_STA);
354 writew(0, ecc->regs + ECC_IRQ_REG(op));
355 writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
357 mutex_unlock(&ecc->lock);
359 EXPORT_SYMBOL(mtk_ecc_disable);
361 int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op)
363 int ret;
365 ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500));
366 if (!ret) {
367 dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n",
368 (op == ECC_ENCODE) ? "encoder" : "decoder");
369 return -ETIMEDOUT;
372 return 0;
374 EXPORT_SYMBOL(mtk_ecc_wait_done);
376 int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
377 u8 *data, u32 bytes)
379 dma_addr_t addr;
380 u32 len;
381 int ret;
383 addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
384 ret = dma_mapping_error(ecc->dev, addr);
385 if (ret) {
386 dev_err(ecc->dev, "dma mapping error\n");
387 return -EINVAL;
390 config->op = ECC_ENCODE;
391 config->addr = addr;
392 ret = mtk_ecc_enable(ecc, config);
393 if (ret) {
394 dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
395 return ret;
398 ret = mtk_ecc_wait_done(ecc, ECC_ENCODE);
399 if (ret)
400 goto timeout;
402 mtk_ecc_wait_idle(ecc, ECC_ENCODE);
404 /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
405 len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
407 /* write the parity bytes generated by the ECC back to temp buffer */
408 __ioread32_copy(ecc->eccdata, ecc->regs + ECC_ENCPAR(0), round_up(len, 4));
410 /* copy into possibly unaligned OOB region with actual length */
411 memcpy(data + bytes, ecc->eccdata, len);
412 timeout:
414 dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
415 mtk_ecc_disable(ecc);
417 return ret;
419 EXPORT_SYMBOL(mtk_ecc_encode);
421 void mtk_ecc_adjust_strength(u32 *p)
423 u32 ecc[] = {4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
424 40, 44, 48, 52, 56, 60};
425 int i;
427 for (i = 0; i < ARRAY_SIZE(ecc); i++) {
428 if (*p <= ecc[i]) {
429 if (!i)
430 *p = ecc[i];
431 else if (*p != ecc[i])
432 *p = ecc[i - 1];
433 return;
437 *p = ecc[ARRAY_SIZE(ecc) - 1];
439 EXPORT_SYMBOL(mtk_ecc_adjust_strength);
441 static int mtk_ecc_probe(struct platform_device *pdev)
443 struct device *dev = &pdev->dev;
444 struct mtk_ecc *ecc;
445 struct resource *res;
446 int irq, ret;
448 ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
449 if (!ecc)
450 return -ENOMEM;
452 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
453 ecc->regs = devm_ioremap_resource(dev, res);
454 if (IS_ERR(ecc->regs)) {
455 dev_err(dev, "failed to map regs: %ld\n", PTR_ERR(ecc->regs));
456 return PTR_ERR(ecc->regs);
459 ecc->clk = devm_clk_get(dev, NULL);
460 if (IS_ERR(ecc->clk)) {
461 dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk));
462 return PTR_ERR(ecc->clk);
465 irq = platform_get_irq(pdev, 0);
466 if (irq < 0) {
467 dev_err(dev, "failed to get irq\n");
468 return -EINVAL;
471 ret = dma_set_mask(dev, DMA_BIT_MASK(32));
472 if (ret) {
473 dev_err(dev, "failed to set DMA mask\n");
474 return ret;
477 ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc);
478 if (ret) {
479 dev_err(dev, "failed to request irq\n");
480 return -EINVAL;
483 ecc->dev = dev;
484 mutex_init(&ecc->lock);
485 platform_set_drvdata(pdev, ecc);
486 dev_info(dev, "probed\n");
488 return 0;
491 #ifdef CONFIG_PM_SLEEP
492 static int mtk_ecc_suspend(struct device *dev)
494 struct mtk_ecc *ecc = dev_get_drvdata(dev);
496 clk_disable_unprepare(ecc->clk);
498 return 0;
501 static int mtk_ecc_resume(struct device *dev)
503 struct mtk_ecc *ecc = dev_get_drvdata(dev);
504 int ret;
506 ret = clk_prepare_enable(ecc->clk);
507 if (ret) {
508 dev_err(dev, "failed to enable clk\n");
509 return ret;
512 mtk_ecc_hw_init(ecc);
514 return 0;
517 static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume);
518 #endif
520 static const struct of_device_id mtk_ecc_dt_match[] = {
521 { .compatible = "mediatek,mt2701-ecc" },
525 MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match);
527 static struct platform_driver mtk_ecc_driver = {
528 .probe = mtk_ecc_probe,
529 .driver = {
530 .name = "mtk-ecc",
531 .of_match_table = of_match_ptr(mtk_ecc_dt_match),
532 #ifdef CONFIG_PM_SLEEP
533 .pm = &mtk_ecc_pm_ops,
534 #endif
538 module_platform_driver(mtk_ecc_driver);
540 MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
541 MODULE_DESCRIPTION("MTK Nand ECC Driver");
542 MODULE_LICENSE("GPL");