3 * Flexible Static Memory Controller (FSMC)
4 * Driver for NAND portions
6 * Copyright © 2010 ST Microelectronics
7 * Vipin Kumar <vipin.kumar@st.com>
10 * Based on drivers/mtd/nand/nomadik_nand.c (removed in v3.8)
11 * Copyright © 2007 STMicroelectronics Pvt. Ltd.
12 * Copyright © 2009 Alessandro Rubini
14 * This file is licensed under the terms of the GNU General Public
15 * License version 2. This program is licensed "as is" without any
16 * warranty of any kind, whether express or implied.
19 #include <linux/clk.h>
20 #include <linux/completion.h>
21 #include <linux/dmaengine.h>
22 #include <linux/dma-direction.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/err.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/resource.h>
28 #include <linux/sched.h>
29 #include <linux/types.h>
30 #include <linux/mtd/mtd.h>
31 #include <linux/mtd/rawnand.h>
32 #include <linux/mtd/nand_ecc.h>
33 #include <linux/platform_device.h>
35 #include <linux/mtd/partitions.h>
37 #include <linux/slab.h>
38 #include <linux/amba/bus.h>
39 #include <mtd/mtd-abi.h>
41 /* fsmc controller registers for NOR flash */
43 /* ctrl register definitions */
44 #define BANK_ENABLE (1 << 0)
45 #define MUXED (1 << 1)
46 #define NOR_DEV (2 << 2)
47 #define WIDTH_8 (0 << 4)
48 #define WIDTH_16 (1 << 4)
49 #define RSTPWRDWN (1 << 6)
50 #define WPROT (1 << 7)
51 #define WRT_ENABLE (1 << 12)
52 #define WAIT_ENB (1 << 13)
55 /* ctrl_tim register definitions */
57 #define FSMC_NOR_BANK_SZ 0x8
58 #define FSMC_NOR_REG_SIZE 0x40
60 #define FSMC_NOR_REG(base, bank, reg) (base + \
61 FSMC_NOR_BANK_SZ * (bank) + \
64 /* fsmc controller registers for NAND flash */
66 /* pc register definitions */
67 #define FSMC_RESET (1 << 0)
68 #define FSMC_WAITON (1 << 1)
69 #define FSMC_ENABLE (1 << 2)
70 #define FSMC_DEVTYPE_NAND (1 << 3)
71 #define FSMC_DEVWID_8 (0 << 4)
72 #define FSMC_DEVWID_16 (1 << 4)
73 #define FSMC_ECCEN (1 << 6)
74 #define FSMC_ECCPLEN_512 (0 << 7)
75 #define FSMC_ECCPLEN_256 (1 << 7)
76 #define FSMC_TCLR_1 (1)
77 #define FSMC_TCLR_SHIFT (9)
78 #define FSMC_TCLR_MASK (0xF)
79 #define FSMC_TAR_1 (1)
80 #define FSMC_TAR_SHIFT (13)
81 #define FSMC_TAR_MASK (0xF)
83 /* sts register definitions */
84 #define FSMC_CODE_RDY (1 << 15)
86 /* comm register definitions */
88 #define FSMC_TSET_SHIFT 0
89 #define FSMC_TSET_MASK 0xFF
90 #define FSMC_TWAIT_6 6
91 #define FSMC_TWAIT_SHIFT 8
92 #define FSMC_TWAIT_MASK 0xFF
93 #define FSMC_THOLD_4 4
94 #define FSMC_THOLD_SHIFT 16
95 #define FSMC_THOLD_MASK 0xFF
97 #define FSMC_THIZ_SHIFT 24
98 #define FSMC_THIZ_MASK 0xFF
104 #define FSMC_NAND_BANK_SZ 0x20
106 #define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
108 struct fsmc_nand_timings
{
123 * struct fsmc_nand_data - structure for FSMC NAND device state
125 * @pid: Part ID on the AMBA PrimeCell format
126 * @mtd: MTD info for a NAND flash.
127 * @nand: Chip related info for a NAND flash.
128 * @partitions: Partition info for a NAND Flash.
129 * @nr_partitions: Total number of partition of a NAND flash.
131 * @bank: Bank number for probed device.
132 * @clk: Clock structure for FSMC.
134 * @read_dma_chan: DMA channel for read access
135 * @write_dma_chan: DMA channel for write access to NAND
136 * @dma_access_complete: Completion structure
138 * @data_pa: NAND Physical port for Data.
139 * @data_va: NAND port for Data.
140 * @cmd_va: NAND port for Command.
141 * @addr_va: NAND port for Address.
142 * @regs_va: Registers base address for a given bank.
144 struct fsmc_nand_data
{
146 struct nand_chip nand
;
150 enum access_mode mode
;
153 /* DMA related objects */
154 struct dma_chan
*read_dma_chan
;
155 struct dma_chan
*write_dma_chan
;
156 struct completion dma_access_complete
;
158 struct fsmc_nand_timings
*dev_timings
;
161 void __iomem
*data_va
;
162 void __iomem
*cmd_va
;
163 void __iomem
*addr_va
;
164 void __iomem
*regs_va
;
167 static int fsmc_ecc1_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
168 struct mtd_oob_region
*oobregion
)
170 struct nand_chip
*chip
= mtd_to_nand(mtd
);
172 if (section
>= chip
->ecc
.steps
)
175 oobregion
->offset
= (section
* 16) + 2;
176 oobregion
->length
= 3;
181 static int fsmc_ecc1_ooblayout_free(struct mtd_info
*mtd
, int section
,
182 struct mtd_oob_region
*oobregion
)
184 struct nand_chip
*chip
= mtd_to_nand(mtd
);
186 if (section
>= chip
->ecc
.steps
)
189 oobregion
->offset
= (section
* 16) + 8;
191 if (section
< chip
->ecc
.steps
- 1)
192 oobregion
->length
= 8;
194 oobregion
->length
= mtd
->oobsize
- oobregion
->offset
;
199 static const struct mtd_ooblayout_ops fsmc_ecc1_ooblayout_ops
= {
200 .ecc
= fsmc_ecc1_ooblayout_ecc
,
201 .free
= fsmc_ecc1_ooblayout_free
,
205 * ECC placement definitions in oobfree type format.
206 * There are 13 bytes of ecc for every 512 byte block and it has to be read
207 * consecutively and immediately after the 512 byte data block for hardware to
208 * generate the error bit offsets in 512 byte data.
210 static int fsmc_ecc4_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
211 struct mtd_oob_region
*oobregion
)
213 struct nand_chip
*chip
= mtd_to_nand(mtd
);
215 if (section
>= chip
->ecc
.steps
)
218 oobregion
->length
= chip
->ecc
.bytes
;
220 if (!section
&& mtd
->writesize
<= 512)
221 oobregion
->offset
= 0;
223 oobregion
->offset
= (section
* 16) + 2;
228 static int fsmc_ecc4_ooblayout_free(struct mtd_info
*mtd
, int section
,
229 struct mtd_oob_region
*oobregion
)
231 struct nand_chip
*chip
= mtd_to_nand(mtd
);
233 if (section
>= chip
->ecc
.steps
)
236 oobregion
->offset
= (section
* 16) + 15;
238 if (section
< chip
->ecc
.steps
- 1)
239 oobregion
->length
= 3;
241 oobregion
->length
= mtd
->oobsize
- oobregion
->offset
;
246 static const struct mtd_ooblayout_ops fsmc_ecc4_ooblayout_ops
= {
247 .ecc
= fsmc_ecc4_ooblayout_ecc
,
248 .free
= fsmc_ecc4_ooblayout_free
,
251 static inline struct fsmc_nand_data
*mtd_to_fsmc(struct mtd_info
*mtd
)
253 return container_of(mtd_to_nand(mtd
), struct fsmc_nand_data
, nand
);
257 * fsmc_nand_setup - FSMC (Flexible Static Memory Controller) init routine
259 * This routine initializes timing parameters related to NAND memory access in
262 static void fsmc_nand_setup(struct fsmc_nand_data
*host
,
263 struct fsmc_nand_timings
*tims
)
265 uint32_t value
= FSMC_DEVTYPE_NAND
| FSMC_ENABLE
| FSMC_WAITON
;
266 uint32_t tclr
, tar
, thiz
, thold
, twait
, tset
;
268 tclr
= (tims
->tclr
& FSMC_TCLR_MASK
) << FSMC_TCLR_SHIFT
;
269 tar
= (tims
->tar
& FSMC_TAR_MASK
) << FSMC_TAR_SHIFT
;
270 thiz
= (tims
->thiz
& FSMC_THIZ_MASK
) << FSMC_THIZ_SHIFT
;
271 thold
= (tims
->thold
& FSMC_THOLD_MASK
) << FSMC_THOLD_SHIFT
;
272 twait
= (tims
->twait
& FSMC_TWAIT_MASK
) << FSMC_TWAIT_SHIFT
;
273 tset
= (tims
->tset
& FSMC_TSET_MASK
) << FSMC_TSET_SHIFT
;
275 if (host
->nand
.options
& NAND_BUSWIDTH_16
)
276 writel_relaxed(value
| FSMC_DEVWID_16
,
277 host
->regs_va
+ FSMC_PC
);
279 writel_relaxed(value
| FSMC_DEVWID_8
, host
->regs_va
+ FSMC_PC
);
281 writel_relaxed(readl(host
->regs_va
+ FSMC_PC
) | tclr
| tar
,
282 host
->regs_va
+ FSMC_PC
);
283 writel_relaxed(thiz
| thold
| twait
| tset
, host
->regs_va
+ COMM
);
284 writel_relaxed(thiz
| thold
| twait
| tset
, host
->regs_va
+ ATTRIB
);
287 static int fsmc_calc_timings(struct fsmc_nand_data
*host
,
288 const struct nand_sdr_timings
*sdrt
,
289 struct fsmc_nand_timings
*tims
)
291 unsigned long hclk
= clk_get_rate(host
->clk
);
292 unsigned long hclkn
= NSEC_PER_SEC
/ hclk
;
293 uint32_t thiz
, thold
, twait
, tset
;
295 if (sdrt
->tRC_min
< 30000)
298 tims
->tar
= DIV_ROUND_UP(sdrt
->tAR_min
/ 1000, hclkn
) - 1;
299 if (tims
->tar
> FSMC_TAR_MASK
)
300 tims
->tar
= FSMC_TAR_MASK
;
301 tims
->tclr
= DIV_ROUND_UP(sdrt
->tCLR_min
/ 1000, hclkn
) - 1;
302 if (tims
->tclr
> FSMC_TCLR_MASK
)
303 tims
->tclr
= FSMC_TCLR_MASK
;
305 thiz
= sdrt
->tCS_min
- sdrt
->tWP_min
;
306 tims
->thiz
= DIV_ROUND_UP(thiz
/ 1000, hclkn
);
308 thold
= sdrt
->tDH_min
;
309 if (thold
< sdrt
->tCH_min
)
310 thold
= sdrt
->tCH_min
;
311 if (thold
< sdrt
->tCLH_min
)
312 thold
= sdrt
->tCLH_min
;
313 if (thold
< sdrt
->tWH_min
)
314 thold
= sdrt
->tWH_min
;
315 if (thold
< sdrt
->tALH_min
)
316 thold
= sdrt
->tALH_min
;
317 if (thold
< sdrt
->tREH_min
)
318 thold
= sdrt
->tREH_min
;
319 tims
->thold
= DIV_ROUND_UP(thold
/ 1000, hclkn
);
320 if (tims
->thold
== 0)
322 else if (tims
->thold
> FSMC_THOLD_MASK
)
323 tims
->thold
= FSMC_THOLD_MASK
;
325 twait
= max(sdrt
->tRP_min
, sdrt
->tWP_min
);
326 tims
->twait
= DIV_ROUND_UP(twait
/ 1000, hclkn
) - 1;
327 if (tims
->twait
== 0)
329 else if (tims
->twait
> FSMC_TWAIT_MASK
)
330 tims
->twait
= FSMC_TWAIT_MASK
;
332 tset
= max(sdrt
->tCS_min
- sdrt
->tWP_min
,
333 sdrt
->tCEA_max
- sdrt
->tREA_max
);
334 tims
->tset
= DIV_ROUND_UP(tset
/ 1000, hclkn
) - 1;
337 else if (tims
->tset
> FSMC_TSET_MASK
)
338 tims
->tset
= FSMC_TSET_MASK
;
343 static int fsmc_setup_data_interface(struct mtd_info
*mtd
, int csline
,
344 const struct nand_data_interface
*conf
)
346 struct nand_chip
*nand
= mtd_to_nand(mtd
);
347 struct fsmc_nand_data
*host
= nand_get_controller_data(nand
);
348 struct fsmc_nand_timings tims
;
349 const struct nand_sdr_timings
*sdrt
;
352 sdrt
= nand_get_sdr_timings(conf
);
354 return PTR_ERR(sdrt
);
356 ret
= fsmc_calc_timings(host
, sdrt
, &tims
);
360 if (csline
== NAND_DATA_IFACE_CHECK_ONLY
)
363 fsmc_nand_setup(host
, &tims
);
369 * fsmc_enable_hwecc - Enables Hardware ECC through FSMC registers
371 static void fsmc_enable_hwecc(struct mtd_info
*mtd
, int mode
)
373 struct fsmc_nand_data
*host
= mtd_to_fsmc(mtd
);
375 writel_relaxed(readl(host
->regs_va
+ FSMC_PC
) & ~FSMC_ECCPLEN_256
,
376 host
->regs_va
+ FSMC_PC
);
377 writel_relaxed(readl(host
->regs_va
+ FSMC_PC
) & ~FSMC_ECCEN
,
378 host
->regs_va
+ FSMC_PC
);
379 writel_relaxed(readl(host
->regs_va
+ FSMC_PC
) | FSMC_ECCEN
,
380 host
->regs_va
+ FSMC_PC
);
384 * fsmc_read_hwecc_ecc4 - Hardware ECC calculator for ecc4 option supported by
385 * FSMC. ECC is 13 bytes for 512 bytes of data (supports error correction up to
388 static int fsmc_read_hwecc_ecc4(struct mtd_info
*mtd
, const uint8_t *data
,
391 struct fsmc_nand_data
*host
= mtd_to_fsmc(mtd
);
393 unsigned long deadline
= jiffies
+ FSMC_BUSY_WAIT_TIMEOUT
;
396 if (readl_relaxed(host
->regs_va
+ STS
) & FSMC_CODE_RDY
)
400 } while (!time_after_eq(jiffies
, deadline
));
402 if (time_after_eq(jiffies
, deadline
)) {
403 dev_err(host
->dev
, "calculate ecc timed out\n");
407 ecc_tmp
= readl_relaxed(host
->regs_va
+ ECC1
);
408 ecc
[0] = (uint8_t) (ecc_tmp
>> 0);
409 ecc
[1] = (uint8_t) (ecc_tmp
>> 8);
410 ecc
[2] = (uint8_t) (ecc_tmp
>> 16);
411 ecc
[3] = (uint8_t) (ecc_tmp
>> 24);
413 ecc_tmp
= readl_relaxed(host
->regs_va
+ ECC2
);
414 ecc
[4] = (uint8_t) (ecc_tmp
>> 0);
415 ecc
[5] = (uint8_t) (ecc_tmp
>> 8);
416 ecc
[6] = (uint8_t) (ecc_tmp
>> 16);
417 ecc
[7] = (uint8_t) (ecc_tmp
>> 24);
419 ecc_tmp
= readl_relaxed(host
->regs_va
+ ECC3
);
420 ecc
[8] = (uint8_t) (ecc_tmp
>> 0);
421 ecc
[9] = (uint8_t) (ecc_tmp
>> 8);
422 ecc
[10] = (uint8_t) (ecc_tmp
>> 16);
423 ecc
[11] = (uint8_t) (ecc_tmp
>> 24);
425 ecc_tmp
= readl_relaxed(host
->regs_va
+ STS
);
426 ecc
[12] = (uint8_t) (ecc_tmp
>> 16);
432 * fsmc_read_hwecc_ecc1 - Hardware ECC calculator for ecc1 option supported by
433 * FSMC. ECC is 3 bytes for 512 bytes of data (supports error correction up to
436 static int fsmc_read_hwecc_ecc1(struct mtd_info
*mtd
, const uint8_t *data
,
439 struct fsmc_nand_data
*host
= mtd_to_fsmc(mtd
);
442 ecc_tmp
= readl_relaxed(host
->regs_va
+ ECC1
);
443 ecc
[0] = (uint8_t) (ecc_tmp
>> 0);
444 ecc
[1] = (uint8_t) (ecc_tmp
>> 8);
445 ecc
[2] = (uint8_t) (ecc_tmp
>> 16);
450 /* Count the number of 0's in buff upto a max of max_bits */
451 static int count_written_bits(uint8_t *buff
, int size
, int max_bits
)
453 int k
, written_bits
= 0;
455 for (k
= 0; k
< size
; k
++) {
456 written_bits
+= hweight8(~buff
[k
]);
457 if (written_bits
> max_bits
)
464 static void dma_complete(void *param
)
466 struct fsmc_nand_data
*host
= param
;
468 complete(&host
->dma_access_complete
);
471 static int dma_xfer(struct fsmc_nand_data
*host
, void *buffer
, int len
,
472 enum dma_data_direction direction
)
474 struct dma_chan
*chan
;
475 struct dma_device
*dma_dev
;
476 struct dma_async_tx_descriptor
*tx
;
477 dma_addr_t dma_dst
, dma_src
, dma_addr
;
479 unsigned long flags
= DMA_CTRL_ACK
| DMA_PREP_INTERRUPT
;
481 unsigned long time_left
;
483 if (direction
== DMA_TO_DEVICE
)
484 chan
= host
->write_dma_chan
;
485 else if (direction
== DMA_FROM_DEVICE
)
486 chan
= host
->read_dma_chan
;
490 dma_dev
= chan
->device
;
491 dma_addr
= dma_map_single(dma_dev
->dev
, buffer
, len
, direction
);
493 if (direction
== DMA_TO_DEVICE
) {
495 dma_dst
= host
->data_pa
;
497 dma_src
= host
->data_pa
;
501 tx
= dma_dev
->device_prep_dma_memcpy(chan
, dma_dst
, dma_src
,
504 dev_err(host
->dev
, "device_prep_dma_memcpy error\n");
509 tx
->callback
= dma_complete
;
510 tx
->callback_param
= host
;
511 cookie
= tx
->tx_submit(tx
);
513 ret
= dma_submit_error(cookie
);
515 dev_err(host
->dev
, "dma_submit_error %d\n", cookie
);
519 dma_async_issue_pending(chan
);
522 wait_for_completion_timeout(&host
->dma_access_complete
,
523 msecs_to_jiffies(3000));
524 if (time_left
== 0) {
525 dmaengine_terminate_all(chan
);
526 dev_err(host
->dev
, "wait_for_completion_timeout\n");
534 dma_unmap_single(dma_dev
->dev
, dma_addr
, len
, direction
);
540 * fsmc_write_buf - write buffer to chip
541 * @mtd: MTD device structure
543 * @len: number of bytes to write
545 static void fsmc_write_buf(struct mtd_info
*mtd
, const uint8_t *buf
, int len
)
547 struct fsmc_nand_data
*host
= mtd_to_fsmc(mtd
);
550 if (IS_ALIGNED((uintptr_t)buf
, sizeof(uint32_t)) &&
551 IS_ALIGNED(len
, sizeof(uint32_t))) {
552 uint32_t *p
= (uint32_t *)buf
;
554 for (i
= 0; i
< len
; i
++)
555 writel_relaxed(p
[i
], host
->data_va
);
557 for (i
= 0; i
< len
; i
++)
558 writeb_relaxed(buf
[i
], host
->data_va
);
563 * fsmc_read_buf - read chip data into buffer
564 * @mtd: MTD device structure
565 * @buf: buffer to store date
566 * @len: number of bytes to read
568 static void fsmc_read_buf(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
570 struct fsmc_nand_data
*host
= mtd_to_fsmc(mtd
);
573 if (IS_ALIGNED((uintptr_t)buf
, sizeof(uint32_t)) &&
574 IS_ALIGNED(len
, sizeof(uint32_t))) {
575 uint32_t *p
= (uint32_t *)buf
;
577 for (i
= 0; i
< len
; i
++)
578 p
[i
] = readl_relaxed(host
->data_va
);
580 for (i
= 0; i
< len
; i
++)
581 buf
[i
] = readb_relaxed(host
->data_va
);
586 * fsmc_read_buf_dma - read chip data into buffer
587 * @mtd: MTD device structure
588 * @buf: buffer to store date
589 * @len: number of bytes to read
591 static void fsmc_read_buf_dma(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
593 struct fsmc_nand_data
*host
= mtd_to_fsmc(mtd
);
595 dma_xfer(host
, buf
, len
, DMA_FROM_DEVICE
);
599 * fsmc_write_buf_dma - write buffer to chip
600 * @mtd: MTD device structure
602 * @len: number of bytes to write
604 static void fsmc_write_buf_dma(struct mtd_info
*mtd
, const uint8_t *buf
,
607 struct fsmc_nand_data
*host
= mtd_to_fsmc(mtd
);
609 dma_xfer(host
, (void *)buf
, len
, DMA_TO_DEVICE
);
612 /* fsmc_select_chip - assert or deassert nCE */
613 static void fsmc_select_chip(struct mtd_info
*mtd
, int chipnr
)
615 struct fsmc_nand_data
*host
= mtd_to_fsmc(mtd
);
618 /* Support only one CS */
622 pc
= readl(host
->regs_va
+ FSMC_PC
);
624 writel_relaxed(pc
& ~FSMC_ENABLE
, host
->regs_va
+ FSMC_PC
);
626 writel_relaxed(pc
| FSMC_ENABLE
, host
->regs_va
+ FSMC_PC
);
628 /* nCE line must be asserted before starting any operation */
633 * fsmc_exec_op - hook called by the core to execute NAND operations
635 * This controller is simple enough and thus does not need to use the parser
636 * provided by the core, instead, handle every situation here.
638 static int fsmc_exec_op(struct nand_chip
*chip
, const struct nand_operation
*op
,
641 struct mtd_info
*mtd
= nand_to_mtd(chip
);
642 struct fsmc_nand_data
*host
= mtd_to_fsmc(mtd
);
643 const struct nand_op_instr
*instr
= NULL
;
648 pr_debug("Executing operation [%d instructions]:\n", op
->ninstrs
);
649 for (op_id
= 0; op_id
< op
->ninstrs
; op_id
++) {
650 instr
= &op
->instrs
[op_id
];
652 switch (instr
->type
) {
653 case NAND_OP_CMD_INSTR
:
654 pr_debug(" ->CMD [0x%02x]\n",
655 instr
->ctx
.cmd
.opcode
);
657 writeb_relaxed(instr
->ctx
.cmd
.opcode
, host
->cmd_va
);
660 case NAND_OP_ADDR_INSTR
:
661 pr_debug(" ->ADDR [%d cyc]",
662 instr
->ctx
.addr
.naddrs
);
664 for (i
= 0; i
< instr
->ctx
.addr
.naddrs
; i
++)
665 writeb_relaxed(instr
->ctx
.addr
.addrs
[i
],
669 case NAND_OP_DATA_IN_INSTR
:
670 pr_debug(" ->DATA_IN [%d B%s]\n", instr
->ctx
.data
.len
,
671 instr
->ctx
.data
.force_8bit
?
672 ", force 8-bit" : "");
674 if (host
->mode
== USE_DMA_ACCESS
)
675 fsmc_read_buf_dma(mtd
, instr
->ctx
.data
.buf
.in
,
676 instr
->ctx
.data
.len
);
678 fsmc_read_buf(mtd
, instr
->ctx
.data
.buf
.in
,
679 instr
->ctx
.data
.len
);
682 case NAND_OP_DATA_OUT_INSTR
:
683 pr_debug(" ->DATA_OUT [%d B%s]\n", instr
->ctx
.data
.len
,
684 instr
->ctx
.data
.force_8bit
?
685 ", force 8-bit" : "");
687 if (host
->mode
== USE_DMA_ACCESS
)
688 fsmc_write_buf_dma(mtd
, instr
->ctx
.data
.buf
.out
,
689 instr
->ctx
.data
.len
);
691 fsmc_write_buf(mtd
, instr
->ctx
.data
.buf
.out
,
692 instr
->ctx
.data
.len
);
695 case NAND_OP_WAITRDY_INSTR
:
696 pr_debug(" ->WAITRDY [max %d ms]\n",
697 instr
->ctx
.waitrdy
.timeout_ms
);
699 ret
= nand_soft_waitrdy(chip
,
700 instr
->ctx
.waitrdy
.timeout_ms
);
709 * fsmc_read_page_hwecc
710 * @mtd: mtd info structure
711 * @chip: nand chip info structure
712 * @buf: buffer to store read data
713 * @oob_required: caller expects OOB data read to chip->oob_poi
714 * @page: page number to read
716 * This routine is needed for fsmc version 8 as reading from NAND chip has to be
717 * performed in a strict sequence as follows:
718 * data(512 byte) -> ecc(13 byte)
719 * After this read, fsmc hardware generates and reports error data bits(up to a
722 static int fsmc_read_page_hwecc(struct mtd_info
*mtd
, struct nand_chip
*chip
,
723 uint8_t *buf
, int oob_required
, int page
)
725 int i
, j
, s
, stat
, eccsize
= chip
->ecc
.size
;
726 int eccbytes
= chip
->ecc
.bytes
;
727 int eccsteps
= chip
->ecc
.steps
;
729 uint8_t *ecc_calc
= chip
->ecc
.calc_buf
;
730 uint8_t *ecc_code
= chip
->ecc
.code_buf
;
731 int off
, len
, group
= 0;
733 * ecc_oob is intentionally taken as uint16_t. In 16bit devices, we
734 * end up reading 14 bytes (7 words) from oob. The local array is
735 * to maintain word alignment
738 uint8_t *oob
= (uint8_t *)&ecc_oob
[0];
739 unsigned int max_bitflips
= 0;
741 for (i
= 0, s
= 0; s
< eccsteps
; s
++, i
+= eccbytes
, p
+= eccsize
) {
742 nand_read_page_op(chip
, page
, s
* eccsize
, NULL
, 0);
743 chip
->ecc
.hwctl(mtd
, NAND_ECC_READ
);
744 nand_read_data_op(chip
, p
, eccsize
, false);
746 for (j
= 0; j
< eccbytes
;) {
747 struct mtd_oob_region oobregion
;
750 ret
= mtd_ooblayout_ecc(mtd
, group
++, &oobregion
);
754 off
= oobregion
.offset
;
755 len
= oobregion
.length
;
758 * length is intentionally kept a higher multiple of 2
759 * to read at least 13 bytes even in case of 16 bit NAND
762 if (chip
->options
& NAND_BUSWIDTH_16
)
763 len
= roundup(len
, 2);
765 nand_read_oob_op(chip
, page
, off
, oob
+ j
, len
);
769 memcpy(&ecc_code
[i
], oob
, chip
->ecc
.bytes
);
770 chip
->ecc
.calculate(mtd
, p
, &ecc_calc
[i
]);
772 stat
= chip
->ecc
.correct(mtd
, p
, &ecc_code
[i
], &ecc_calc
[i
]);
774 mtd
->ecc_stats
.failed
++;
776 mtd
->ecc_stats
.corrected
+= stat
;
777 max_bitflips
= max_t(unsigned int, max_bitflips
, stat
);
785 * fsmc_bch8_correct_data
786 * @mtd: mtd info structure
787 * @dat: buffer of read data
788 * @read_ecc: ecc read from device spare area
789 * @calc_ecc: ecc calculated from read data
791 * calc_ecc is a 104 bit information containing maximum of 8 error
792 * offset informations of 13 bits each in 512 bytes of read data.
794 static int fsmc_bch8_correct_data(struct mtd_info
*mtd
, uint8_t *dat
,
795 uint8_t *read_ecc
, uint8_t *calc_ecc
)
797 struct nand_chip
*chip
= mtd_to_nand(mtd
);
798 struct fsmc_nand_data
*host
= mtd_to_fsmc(mtd
);
801 uint32_t ecc1
, ecc2
, ecc3
, ecc4
;
803 num_err
= (readl_relaxed(host
->regs_va
+ STS
) >> 10) & 0xF;
805 /* no bit flipping */
806 if (likely(num_err
== 0))
809 /* too many errors */
810 if (unlikely(num_err
> 8)) {
812 * This is a temporary erase check. A newly erased page read
813 * would result in an ecc error because the oob data is also
814 * erased to FF and the calculated ecc for an FF data is not
816 * This is a workaround to skip performing correction in case
820 * For every page, each bit written as 0 is counted until these
821 * number of bits are greater than 8 (the maximum correction
822 * capability of FSMC for each 512 + 13 bytes)
825 int bits_ecc
= count_written_bits(read_ecc
, chip
->ecc
.bytes
, 8);
826 int bits_data
= count_written_bits(dat
, chip
->ecc
.size
, 8);
828 if ((bits_ecc
+ bits_data
) <= 8) {
830 memset(dat
, 0xff, chip
->ecc
.size
);
838 * ------------------- calc_ecc[] bit wise -----------|--13 bits--|
839 * |---idx[7]--|--.....-----|---idx[2]--||---idx[1]--||---idx[0]--|
841 * calc_ecc is a 104 bit information containing maximum of 8 error
842 * offset informations of 13 bits each. calc_ecc is copied into a
843 * uint64_t array and error offset indexes are populated in err_idx
846 ecc1
= readl_relaxed(host
->regs_va
+ ECC1
);
847 ecc2
= readl_relaxed(host
->regs_va
+ ECC2
);
848 ecc3
= readl_relaxed(host
->regs_va
+ ECC3
);
849 ecc4
= readl_relaxed(host
->regs_va
+ STS
);
851 err_idx
[0] = (ecc1
>> 0) & 0x1FFF;
852 err_idx
[1] = (ecc1
>> 13) & 0x1FFF;
853 err_idx
[2] = (((ecc2
>> 0) & 0x7F) << 6) | ((ecc1
>> 26) & 0x3F);
854 err_idx
[3] = (ecc2
>> 7) & 0x1FFF;
855 err_idx
[4] = (((ecc3
>> 0) & 0x1) << 12) | ((ecc2
>> 20) & 0xFFF);
856 err_idx
[5] = (ecc3
>> 1) & 0x1FFF;
857 err_idx
[6] = (ecc3
>> 14) & 0x1FFF;
858 err_idx
[7] = (((ecc4
>> 16) & 0xFF) << 5) | ((ecc3
>> 27) & 0x1F);
862 change_bit(0, (unsigned long *)&err_idx
[i
]);
863 change_bit(1, (unsigned long *)&err_idx
[i
]);
865 if (err_idx
[i
] < chip
->ecc
.size
* 8) {
866 change_bit(err_idx
[i
], (unsigned long *)dat
);
873 static bool filter(struct dma_chan
*chan
, void *slave
)
875 chan
->private = slave
;
879 static int fsmc_nand_probe_config_dt(struct platform_device
*pdev
,
880 struct fsmc_nand_data
*host
,
881 struct nand_chip
*nand
)
883 struct device_node
*np
= pdev
->dev
.of_node
;
889 if (!of_property_read_u32(np
, "bank-width", &val
)) {
891 nand
->options
|= NAND_BUSWIDTH_16
;
892 } else if (val
!= 1) {
893 dev_err(&pdev
->dev
, "invalid bank-width %u\n", val
);
898 if (of_get_property(np
, "nand-skip-bbtscan", NULL
))
899 nand
->options
|= NAND_SKIP_BBTSCAN
;
901 host
->dev_timings
= devm_kzalloc(&pdev
->dev
,
902 sizeof(*host
->dev_timings
), GFP_KERNEL
);
903 if (!host
->dev_timings
)
905 ret
= of_property_read_u8_array(np
, "timings", (u8
*)host
->dev_timings
,
906 sizeof(*host
->dev_timings
));
908 host
->dev_timings
= NULL
;
910 /* Set default NAND bank to 0 */
912 if (!of_property_read_u32(np
, "bank", &val
)) {
914 dev_err(&pdev
->dev
, "invalid bank %u\n", val
);
922 static int fsmc_nand_attach_chip(struct nand_chip
*nand
)
924 struct mtd_info
*mtd
= nand_to_mtd(nand
);
925 struct fsmc_nand_data
*host
= mtd_to_fsmc(mtd
);
927 if (AMBA_REV_BITS(host
->pid
) >= 8) {
928 switch (mtd
->oobsize
) {
937 "No oob scheme defined for oobsize %d\n",
942 mtd_set_ooblayout(mtd
, &fsmc_ecc4_ooblayout_ops
);
947 switch (nand
->ecc
.mode
) {
949 dev_info(host
->dev
, "Using 1-bit HW ECC scheme\n");
950 nand
->ecc
.calculate
= fsmc_read_hwecc_ecc1
;
951 nand
->ecc
.correct
= nand_correct_data
;
953 nand
->ecc
.strength
= 1;
957 if (nand
->ecc
.algo
== NAND_ECC_BCH
) {
959 "Using 4-bit SW BCH ECC scheme\n");
963 case NAND_ECC_ON_DIE
:
967 dev_err(host
->dev
, "Unsupported ECC mode!\n");
972 * Don't set layout for BCH4 SW ECC. This will be
973 * generated later in nand_bch_init() later.
975 if (nand
->ecc
.mode
== NAND_ECC_HW
) {
976 switch (mtd
->oobsize
) {
980 mtd_set_ooblayout(mtd
,
981 &fsmc_ecc1_ooblayout_ops
);
985 "No oob scheme defined for oobsize %d\n",
994 static const struct nand_controller_ops fsmc_nand_controller_ops
= {
995 .attach_chip
= fsmc_nand_attach_chip
,
999 * fsmc_nand_probe - Probe function
1000 * @pdev: platform device structure
1002 static int __init
fsmc_nand_probe(struct platform_device
*pdev
)
1004 struct fsmc_nand_data
*host
;
1005 struct mtd_info
*mtd
;
1006 struct nand_chip
*nand
;
1007 struct resource
*res
;
1009 dma_cap_mask_t mask
;
1014 /* Allocate memory for the device structure (and zero it) */
1015 host
= devm_kzalloc(&pdev
->dev
, sizeof(*host
), GFP_KERNEL
);
1021 ret
= fsmc_nand_probe_config_dt(pdev
, host
, nand
);
1025 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "nand_data");
1026 host
->data_va
= devm_ioremap_resource(&pdev
->dev
, res
);
1027 if (IS_ERR(host
->data_va
))
1028 return PTR_ERR(host
->data_va
);
1030 host
->data_pa
= (dma_addr_t
)res
->start
;
1032 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "nand_addr");
1033 host
->addr_va
= devm_ioremap_resource(&pdev
->dev
, res
);
1034 if (IS_ERR(host
->addr_va
))
1035 return PTR_ERR(host
->addr_va
);
1037 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "nand_cmd");
1038 host
->cmd_va
= devm_ioremap_resource(&pdev
->dev
, res
);
1039 if (IS_ERR(host
->cmd_va
))
1040 return PTR_ERR(host
->cmd_va
);
1042 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "fsmc_regs");
1043 base
= devm_ioremap_resource(&pdev
->dev
, res
);
1045 return PTR_ERR(base
);
1047 host
->regs_va
= base
+ FSMC_NOR_REG_SIZE
+
1048 (host
->bank
* FSMC_NAND_BANK_SZ
);
1050 host
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1051 if (IS_ERR(host
->clk
)) {
1052 dev_err(&pdev
->dev
, "failed to fetch block clock\n");
1053 return PTR_ERR(host
->clk
);
1056 ret
= clk_prepare_enable(host
->clk
);
1061 * This device ID is actually a common AMBA ID as used on the
1062 * AMBA PrimeCell bus. However it is not a PrimeCell.
1064 for (pid
= 0, i
= 0; i
< 4; i
++)
1065 pid
|= (readl(base
+ resource_size(res
) - 0x20 + 4 * i
) & 255) << (i
* 8);
1067 dev_info(&pdev
->dev
, "FSMC device partno %03x, manufacturer %02x, "
1068 "revision %02x, config %02x\n",
1069 AMBA_PART_BITS(pid
), AMBA_MANF_BITS(pid
),
1070 AMBA_REV_BITS(pid
), AMBA_CONFIG_BITS(pid
));
1072 host
->dev
= &pdev
->dev
;
1074 if (host
->mode
== USE_DMA_ACCESS
)
1075 init_completion(&host
->dma_access_complete
);
1077 /* Link all private pointers */
1078 mtd
= nand_to_mtd(&host
->nand
);
1079 nand_set_controller_data(nand
, host
);
1080 nand_set_flash_node(nand
, pdev
->dev
.of_node
);
1082 mtd
->dev
.parent
= &pdev
->dev
;
1083 nand
->exec_op
= fsmc_exec_op
;
1084 nand
->select_chip
= fsmc_select_chip
;
1085 nand
->chip_delay
= 30;
1088 * Setup default ECC mode. nand_dt_init() called from nand_scan_ident()
1089 * can overwrite this value if the DT provides a different value.
1091 nand
->ecc
.mode
= NAND_ECC_HW
;
1092 nand
->ecc
.hwctl
= fsmc_enable_hwecc
;
1093 nand
->ecc
.size
= 512;
1094 nand
->badblockbits
= 7;
1096 if (host
->mode
== USE_DMA_ACCESS
) {
1098 dma_cap_set(DMA_MEMCPY
, mask
);
1099 host
->read_dma_chan
= dma_request_channel(mask
, filter
, NULL
);
1100 if (!host
->read_dma_chan
) {
1101 dev_err(&pdev
->dev
, "Unable to get read dma channel\n");
1104 host
->write_dma_chan
= dma_request_channel(mask
, filter
, NULL
);
1105 if (!host
->write_dma_chan
) {
1106 dev_err(&pdev
->dev
, "Unable to get write dma channel\n");
1107 goto release_dma_read_chan
;
1111 if (host
->dev_timings
)
1112 fsmc_nand_setup(host
, host
->dev_timings
);
1114 nand
->setup_data_interface
= fsmc_setup_data_interface
;
1116 if (AMBA_REV_BITS(host
->pid
) >= 8) {
1117 nand
->ecc
.read_page
= fsmc_read_page_hwecc
;
1118 nand
->ecc
.calculate
= fsmc_read_hwecc_ecc4
;
1119 nand
->ecc
.correct
= fsmc_bch8_correct_data
;
1120 nand
->ecc
.bytes
= 13;
1121 nand
->ecc
.strength
= 8;
1125 * Scan to find existence of the device
1127 nand
->dummy_controller
.ops
= &fsmc_nand_controller_ops
;
1128 ret
= nand_scan(nand
, 1);
1130 goto release_dma_write_chan
;
1133 ret
= mtd_device_register(mtd
, NULL
, 0);
1137 platform_set_drvdata(pdev
, host
);
1138 dev_info(&pdev
->dev
, "FSMC NAND driver registration successful\n");
1144 release_dma_write_chan
:
1145 if (host
->mode
== USE_DMA_ACCESS
)
1146 dma_release_channel(host
->write_dma_chan
);
1147 release_dma_read_chan
:
1148 if (host
->mode
== USE_DMA_ACCESS
)
1149 dma_release_channel(host
->read_dma_chan
);
1151 clk_disable_unprepare(host
->clk
);
1159 static int fsmc_nand_remove(struct platform_device
*pdev
)
1161 struct fsmc_nand_data
*host
= platform_get_drvdata(pdev
);
1164 nand_release(&host
->nand
);
1166 if (host
->mode
== USE_DMA_ACCESS
) {
1167 dma_release_channel(host
->write_dma_chan
);
1168 dma_release_channel(host
->read_dma_chan
);
1170 clk_disable_unprepare(host
->clk
);
1176 #ifdef CONFIG_PM_SLEEP
1177 static int fsmc_nand_suspend(struct device
*dev
)
1179 struct fsmc_nand_data
*host
= dev_get_drvdata(dev
);
1181 clk_disable_unprepare(host
->clk
);
1185 static int fsmc_nand_resume(struct device
*dev
)
1187 struct fsmc_nand_data
*host
= dev_get_drvdata(dev
);
1189 clk_prepare_enable(host
->clk
);
1190 if (host
->dev_timings
)
1191 fsmc_nand_setup(host
, host
->dev_timings
);
1197 static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops
, fsmc_nand_suspend
, fsmc_nand_resume
);
1199 static const struct of_device_id fsmc_nand_id_table
[] = {
1200 { .compatible
= "st,spear600-fsmc-nand" },
1201 { .compatible
= "stericsson,fsmc-nand" },
1204 MODULE_DEVICE_TABLE(of
, fsmc_nand_id_table
);
1206 static struct platform_driver fsmc_nand_driver
= {
1207 .remove
= fsmc_nand_remove
,
1209 .name
= "fsmc-nand",
1210 .of_match_table
= fsmc_nand_id_table
,
1211 .pm
= &fsmc_nand_pm_ops
,
1215 module_platform_driver_probe(fsmc_nand_driver
, fsmc_nand_probe
);
1217 MODULE_LICENSE("GPL");
1218 MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>, Ashish Priyadarshi");
1219 MODULE_DESCRIPTION("NAND driver for SPEAr Platforms");