2 * NAND Flash Controller Device Driver
3 * Copyright © 2009-2010, Intel Corporation and its suppliers.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #include <linux/bitfield.h>
16 #include <linux/completion.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/mtd/mtd.h>
22 #include <linux/mtd/rawnand.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
28 MODULE_LICENSE("GPL");
30 #define DENALI_NAND_NAME "denali-nand"
32 /* for Indexed Addressing */
33 #define DENALI_INDEXED_CTRL 0x00
34 #define DENALI_INDEXED_DATA 0x10
36 #define DENALI_MAP00 (0 << 26) /* direct access to buffer */
37 #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */
38 #define DENALI_MAP10 (2 << 26) /* high-level control plane */
39 #define DENALI_MAP11 (3 << 26) /* direct controller access */
41 /* MAP11 access cycle type */
42 #define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */
43 #define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */
44 #define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */
47 #define DENALI_ERASE 0x01
49 #define DENALI_BANK(denali) ((denali)->active_bank << 24)
51 #define DENALI_INVALID_BANK -1
52 #define DENALI_NR_BANKS 4
54 static inline struct denali_nand_info
*mtd_to_denali(struct mtd_info
*mtd
)
56 return container_of(mtd_to_nand(mtd
), struct denali_nand_info
, nand
);
60 * Direct Addressing - the slave address forms the control information (command
61 * type, bank, block, and page address). The slave data is the actual data to
62 * be transferred. This mode requires 28 bits of address region allocated.
64 static u32
denali_direct_read(struct denali_nand_info
*denali
, u32 addr
)
66 return ioread32(denali
->host
+ addr
);
69 static void denali_direct_write(struct denali_nand_info
*denali
, u32 addr
,
72 iowrite32(data
, denali
->host
+ addr
);
76 * Indexed Addressing - address translation module intervenes in passing the
77 * control information. This mode reduces the required address range. The
78 * control information and transferred data are latched by the registers in
79 * the translation module.
81 static u32
denali_indexed_read(struct denali_nand_info
*denali
, u32 addr
)
83 iowrite32(addr
, denali
->host
+ DENALI_INDEXED_CTRL
);
84 return ioread32(denali
->host
+ DENALI_INDEXED_DATA
);
87 static void denali_indexed_write(struct denali_nand_info
*denali
, u32 addr
,
90 iowrite32(addr
, denali
->host
+ DENALI_INDEXED_CTRL
);
91 iowrite32(data
, denali
->host
+ DENALI_INDEXED_DATA
);
95 * Use the configuration feature register to determine the maximum number of
96 * banks that the hardware supports.
98 static void denali_detect_max_banks(struct denali_nand_info
*denali
)
100 uint32_t features
= ioread32(denali
->reg
+ FEATURES
);
102 denali
->max_banks
= 1 << FIELD_GET(FEATURES__N_BANKS
, features
);
104 /* the encoding changed from rev 5.0 to 5.1 */
105 if (denali
->revision
< 0x0501)
106 denali
->max_banks
<<= 1;
109 static void denali_enable_irq(struct denali_nand_info
*denali
)
113 for (i
= 0; i
< DENALI_NR_BANKS
; i
++)
114 iowrite32(U32_MAX
, denali
->reg
+ INTR_EN(i
));
115 iowrite32(GLOBAL_INT_EN_FLAG
, denali
->reg
+ GLOBAL_INT_ENABLE
);
118 static void denali_disable_irq(struct denali_nand_info
*denali
)
122 for (i
= 0; i
< DENALI_NR_BANKS
; i
++)
123 iowrite32(0, denali
->reg
+ INTR_EN(i
));
124 iowrite32(0, denali
->reg
+ GLOBAL_INT_ENABLE
);
127 static void denali_clear_irq(struct denali_nand_info
*denali
,
128 int bank
, uint32_t irq_status
)
130 /* write one to clear bits */
131 iowrite32(irq_status
, denali
->reg
+ INTR_STATUS(bank
));
134 static void denali_clear_irq_all(struct denali_nand_info
*denali
)
138 for (i
= 0; i
< DENALI_NR_BANKS
; i
++)
139 denali_clear_irq(denali
, i
, U32_MAX
);
142 static irqreturn_t
denali_isr(int irq
, void *dev_id
)
144 struct denali_nand_info
*denali
= dev_id
;
145 irqreturn_t ret
= IRQ_NONE
;
149 spin_lock(&denali
->irq_lock
);
151 for (i
= 0; i
< DENALI_NR_BANKS
; i
++) {
152 irq_status
= ioread32(denali
->reg
+ INTR_STATUS(i
));
156 denali_clear_irq(denali
, i
, irq_status
);
158 if (i
!= denali
->active_bank
)
161 denali
->irq_status
|= irq_status
;
163 if (denali
->irq_status
& denali
->irq_mask
)
164 complete(&denali
->complete
);
167 spin_unlock(&denali
->irq_lock
);
172 static void denali_reset_irq(struct denali_nand_info
*denali
)
176 spin_lock_irqsave(&denali
->irq_lock
, flags
);
177 denali
->irq_status
= 0;
178 denali
->irq_mask
= 0;
179 spin_unlock_irqrestore(&denali
->irq_lock
, flags
);
182 static uint32_t denali_wait_for_irq(struct denali_nand_info
*denali
,
185 unsigned long time_left
, flags
;
188 spin_lock_irqsave(&denali
->irq_lock
, flags
);
190 irq_status
= denali
->irq_status
;
192 if (irq_mask
& irq_status
) {
193 /* return immediately if the IRQ has already happened. */
194 spin_unlock_irqrestore(&denali
->irq_lock
, flags
);
198 denali
->irq_mask
= irq_mask
;
199 reinit_completion(&denali
->complete
);
200 spin_unlock_irqrestore(&denali
->irq_lock
, flags
);
202 time_left
= wait_for_completion_timeout(&denali
->complete
,
203 msecs_to_jiffies(1000));
205 dev_err(denali
->dev
, "timeout while waiting for irq 0x%x\n",
210 return denali
->irq_status
;
213 static uint32_t denali_check_irq(struct denali_nand_info
*denali
)
218 spin_lock_irqsave(&denali
->irq_lock
, flags
);
219 irq_status
= denali
->irq_status
;
220 spin_unlock_irqrestore(&denali
->irq_lock
, flags
);
225 static void denali_read_buf(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
227 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
228 u32 addr
= DENALI_MAP11_DATA
| DENALI_BANK(denali
);
231 for (i
= 0; i
< len
; i
++)
232 buf
[i
] = denali
->host_read(denali
, addr
);
235 static void denali_write_buf(struct mtd_info
*mtd
, const uint8_t *buf
, int len
)
237 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
238 u32 addr
= DENALI_MAP11_DATA
| DENALI_BANK(denali
);
241 for (i
= 0; i
< len
; i
++)
242 denali
->host_write(denali
, addr
, buf
[i
]);
245 static void denali_read_buf16(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
247 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
248 u32 addr
= DENALI_MAP11_DATA
| DENALI_BANK(denali
);
249 uint16_t *buf16
= (uint16_t *)buf
;
252 for (i
= 0; i
< len
/ 2; i
++)
253 buf16
[i
] = denali
->host_read(denali
, addr
);
256 static void denali_write_buf16(struct mtd_info
*mtd
, const uint8_t *buf
,
259 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
260 u32 addr
= DENALI_MAP11_DATA
| DENALI_BANK(denali
);
261 const uint16_t *buf16
= (const uint16_t *)buf
;
264 for (i
= 0; i
< len
/ 2; i
++)
265 denali
->host_write(denali
, addr
, buf16
[i
]);
268 static uint8_t denali_read_byte(struct mtd_info
*mtd
)
272 denali_read_buf(mtd
, &byte
, 1);
277 static void denali_write_byte(struct mtd_info
*mtd
, uint8_t byte
)
279 denali_write_buf(mtd
, &byte
, 1);
282 static uint16_t denali_read_word(struct mtd_info
*mtd
)
286 denali_read_buf16(mtd
, (uint8_t *)&word
, 2);
291 static void denali_cmd_ctrl(struct mtd_info
*mtd
, int dat
, unsigned int ctrl
)
293 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
297 type
= DENALI_MAP11_CMD
;
298 else if (ctrl
& NAND_ALE
)
299 type
= DENALI_MAP11_ADDR
;
304 * Some commands are followed by chip->dev_ready or chip->waitfunc.
305 * irq_status must be cleared here to catch the R/B# interrupt later.
307 if (ctrl
& NAND_CTRL_CHANGE
)
308 denali_reset_irq(denali
);
310 denali
->host_write(denali
, DENALI_BANK(denali
) | type
, dat
);
313 static int denali_dev_ready(struct mtd_info
*mtd
)
315 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
317 return !!(denali_check_irq(denali
) & INTR__INT_ACT
);
320 static int denali_check_erased_page(struct mtd_info
*mtd
,
321 struct nand_chip
*chip
, uint8_t *buf
,
322 unsigned long uncor_ecc_flags
,
323 unsigned int max_bitflips
)
325 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
326 uint8_t *ecc_code
= chip
->oob_poi
+ denali
->oob_skip_bytes
;
327 int ecc_steps
= chip
->ecc
.steps
;
328 int ecc_size
= chip
->ecc
.size
;
329 int ecc_bytes
= chip
->ecc
.bytes
;
332 for (i
= 0; i
< ecc_steps
; i
++) {
333 if (!(uncor_ecc_flags
& BIT(i
)))
336 stat
= nand_check_erased_ecc_chunk(buf
, ecc_size
,
341 mtd
->ecc_stats
.failed
++;
343 mtd
->ecc_stats
.corrected
+= stat
;
344 max_bitflips
= max_t(unsigned int, max_bitflips
, stat
);
348 ecc_code
+= ecc_bytes
;
354 static int denali_hw_ecc_fixup(struct mtd_info
*mtd
,
355 struct denali_nand_info
*denali
,
356 unsigned long *uncor_ecc_flags
)
358 struct nand_chip
*chip
= mtd_to_nand(mtd
);
359 int bank
= denali
->active_bank
;
361 unsigned int max_bitflips
;
363 ecc_cor
= ioread32(denali
->reg
+ ECC_COR_INFO(bank
));
364 ecc_cor
>>= ECC_COR_INFO__SHIFT(bank
);
366 if (ecc_cor
& ECC_COR_INFO__UNCOR_ERR
) {
368 * This flag is set when uncorrectable error occurs at least in
369 * one ECC sector. We can not know "how many sectors", or
370 * "which sector(s)". We need erase-page check for all sectors.
372 *uncor_ecc_flags
= GENMASK(chip
->ecc
.steps
- 1, 0);
376 max_bitflips
= FIELD_GET(ECC_COR_INFO__MAX_ERRORS
, ecc_cor
);
379 * The register holds the maximum of per-sector corrected bitflips.
380 * This is suitable for the return value of the ->read_page() callback.
381 * Unfortunately, we can not know the total number of corrected bits in
382 * the page. Increase the stats by max_bitflips. (compromised solution)
384 mtd
->ecc_stats
.corrected
+= max_bitflips
;
389 static int denali_sw_ecc_fixup(struct mtd_info
*mtd
,
390 struct denali_nand_info
*denali
,
391 unsigned long *uncor_ecc_flags
, uint8_t *buf
)
393 unsigned int ecc_size
= denali
->nand
.ecc
.size
;
394 unsigned int bitflips
= 0;
395 unsigned int max_bitflips
= 0;
396 uint32_t err_addr
, err_cor_info
;
397 unsigned int err_byte
, err_sector
, err_device
;
398 uint8_t err_cor_value
;
399 unsigned int prev_sector
= 0;
402 denali_reset_irq(denali
);
405 err_addr
= ioread32(denali
->reg
+ ECC_ERROR_ADDRESS
);
406 err_sector
= FIELD_GET(ECC_ERROR_ADDRESS__SECTOR
, err_addr
);
407 err_byte
= FIELD_GET(ECC_ERROR_ADDRESS__OFFSET
, err_addr
);
409 err_cor_info
= ioread32(denali
->reg
+ ERR_CORRECTION_INFO
);
410 err_cor_value
= FIELD_GET(ERR_CORRECTION_INFO__BYTE
,
412 err_device
= FIELD_GET(ERR_CORRECTION_INFO__DEVICE
,
415 /* reset the bitflip counter when crossing ECC sector */
416 if (err_sector
!= prev_sector
)
419 if (err_cor_info
& ERR_CORRECTION_INFO__UNCOR
) {
421 * Check later if this is a real ECC error, or
424 *uncor_ecc_flags
|= BIT(err_sector
);
425 } else if (err_byte
< ecc_size
) {
427 * If err_byte is larger than ecc_size, means error
428 * happened in OOB, so we ignore it. It's no need for
429 * us to correct it err_device is represented the NAND
430 * error bits are happened in if there are more than
431 * one NAND connected.
434 unsigned int flips_in_byte
;
436 offset
= (err_sector
* ecc_size
+ err_byte
) *
437 denali
->devs_per_cs
+ err_device
;
439 /* correct the ECC error */
440 flips_in_byte
= hweight8(buf
[offset
] ^ err_cor_value
);
441 buf
[offset
] ^= err_cor_value
;
442 mtd
->ecc_stats
.corrected
+= flips_in_byte
;
443 bitflips
+= flips_in_byte
;
445 max_bitflips
= max(max_bitflips
, bitflips
);
448 prev_sector
= err_sector
;
449 } while (!(err_cor_info
& ERR_CORRECTION_INFO__LAST_ERR
));
452 * Once handle all ECC errors, controller will trigger an
453 * ECC_TRANSACTION_DONE interrupt.
455 irq_status
= denali_wait_for_irq(denali
, INTR__ECC_TRANSACTION_DONE
);
456 if (!(irq_status
& INTR__ECC_TRANSACTION_DONE
))
462 static void denali_setup_dma64(struct denali_nand_info
*denali
,
463 dma_addr_t dma_addr
, int page
, int write
)
466 const int page_count
= 1;
468 mode
= DENALI_MAP10
| DENALI_BANK(denali
) | page
;
470 /* DMA is a three step process */
473 * 1. setup transfer type, interrupt when complete,
474 * burst len = 64 bytes, the number of pages
476 denali
->host_write(denali
, mode
,
477 0x01002000 | (64 << 16) | (write
<< 8) | page_count
);
479 /* 2. set memory low address */
480 denali
->host_write(denali
, mode
, lower_32_bits(dma_addr
));
482 /* 3. set memory high address */
483 denali
->host_write(denali
, mode
, upper_32_bits(dma_addr
));
486 static void denali_setup_dma32(struct denali_nand_info
*denali
,
487 dma_addr_t dma_addr
, int page
, int write
)
490 const int page_count
= 1;
492 mode
= DENALI_MAP10
| DENALI_BANK(denali
);
494 /* DMA is a four step process */
496 /* 1. setup transfer type and # of pages */
497 denali
->host_write(denali
, mode
| page
,
498 0x2000 | (write
<< 8) | page_count
);
500 /* 2. set memory high address bits 23:8 */
501 denali
->host_write(denali
, mode
| ((dma_addr
>> 16) << 8), 0x2200);
503 /* 3. set memory low address bits 23:8 */
504 denali
->host_write(denali
, mode
| ((dma_addr
& 0xffff) << 8), 0x2300);
506 /* 4. interrupt when complete, burst len = 64 bytes */
507 denali
->host_write(denali
, mode
| 0x14000, 0x2400);
510 static int denali_pio_read(struct denali_nand_info
*denali
, void *buf
,
511 size_t size
, int page
, int raw
)
513 u32 addr
= DENALI_MAP01
| DENALI_BANK(denali
) | page
;
514 uint32_t *buf32
= (uint32_t *)buf
;
515 uint32_t irq_status
, ecc_err_mask
;
518 if (denali
->caps
& DENALI_CAP_HW_ECC_FIXUP
)
519 ecc_err_mask
= INTR__ECC_UNCOR_ERR
;
521 ecc_err_mask
= INTR__ECC_ERR
;
523 denali_reset_irq(denali
);
525 for (i
= 0; i
< size
/ 4; i
++)
526 *buf32
++ = denali
->host_read(denali
, addr
);
528 irq_status
= denali_wait_for_irq(denali
, INTR__PAGE_XFER_INC
);
529 if (!(irq_status
& INTR__PAGE_XFER_INC
))
532 if (irq_status
& INTR__ERASED_PAGE
)
533 memset(buf
, 0xff, size
);
535 return irq_status
& ecc_err_mask
? -EBADMSG
: 0;
538 static int denali_pio_write(struct denali_nand_info
*denali
,
539 const void *buf
, size_t size
, int page
, int raw
)
541 u32 addr
= DENALI_MAP01
| DENALI_BANK(denali
) | page
;
542 const uint32_t *buf32
= (uint32_t *)buf
;
546 denali_reset_irq(denali
);
548 for (i
= 0; i
< size
/ 4; i
++)
549 denali
->host_write(denali
, addr
, *buf32
++);
551 irq_status
= denali_wait_for_irq(denali
,
552 INTR__PROGRAM_COMP
| INTR__PROGRAM_FAIL
);
553 if (!(irq_status
& INTR__PROGRAM_COMP
))
559 static int denali_pio_xfer(struct denali_nand_info
*denali
, void *buf
,
560 size_t size
, int page
, int raw
, int write
)
563 return denali_pio_write(denali
, buf
, size
, page
, raw
);
565 return denali_pio_read(denali
, buf
, size
, page
, raw
);
568 static int denali_dma_xfer(struct denali_nand_info
*denali
, void *buf
,
569 size_t size
, int page
, int raw
, int write
)
572 uint32_t irq_mask
, irq_status
, ecc_err_mask
;
573 enum dma_data_direction dir
= write
? DMA_TO_DEVICE
: DMA_FROM_DEVICE
;
576 dma_addr
= dma_map_single(denali
->dev
, buf
, size
, dir
);
577 if (dma_mapping_error(denali
->dev
, dma_addr
)) {
578 dev_dbg(denali
->dev
, "Failed to DMA-map buffer. Trying PIO.\n");
579 return denali_pio_xfer(denali
, buf
, size
, page
, raw
, write
);
584 * INTR__PROGRAM_COMP is never asserted for the DMA transfer.
585 * We can use INTR__DMA_CMD_COMP instead. This flag is asserted
586 * when the page program is completed.
588 irq_mask
= INTR__DMA_CMD_COMP
| INTR__PROGRAM_FAIL
;
590 } else if (denali
->caps
& DENALI_CAP_HW_ECC_FIXUP
) {
591 irq_mask
= INTR__DMA_CMD_COMP
;
592 ecc_err_mask
= INTR__ECC_UNCOR_ERR
;
594 irq_mask
= INTR__DMA_CMD_COMP
;
595 ecc_err_mask
= INTR__ECC_ERR
;
598 iowrite32(DMA_ENABLE__FLAG
, denali
->reg
+ DMA_ENABLE
);
600 denali_reset_irq(denali
);
601 denali
->setup_dma(denali
, dma_addr
, page
, write
);
603 irq_status
= denali_wait_for_irq(denali
, irq_mask
);
604 if (!(irq_status
& INTR__DMA_CMD_COMP
))
606 else if (irq_status
& ecc_err_mask
)
609 iowrite32(0, denali
->reg
+ DMA_ENABLE
);
611 dma_unmap_single(denali
->dev
, dma_addr
, size
, dir
);
613 if (irq_status
& INTR__ERASED_PAGE
)
614 memset(buf
, 0xff, size
);
619 static int denali_data_xfer(struct denali_nand_info
*denali
, void *buf
,
620 size_t size
, int page
, int raw
, int write
)
622 iowrite32(raw
? 0 : ECC_ENABLE__FLAG
, denali
->reg
+ ECC_ENABLE
);
623 iowrite32(raw
? TRANSFER_SPARE_REG__FLAG
: 0,
624 denali
->reg
+ TRANSFER_SPARE_REG
);
626 if (denali
->dma_avail
)
627 return denali_dma_xfer(denali
, buf
, size
, page
, raw
, write
);
629 return denali_pio_xfer(denali
, buf
, size
, page
, raw
, write
);
632 static void denali_oob_xfer(struct mtd_info
*mtd
, struct nand_chip
*chip
,
635 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
636 int writesize
= mtd
->writesize
;
637 int oobsize
= mtd
->oobsize
;
638 uint8_t *bufpoi
= chip
->oob_poi
;
639 int ecc_steps
= chip
->ecc
.steps
;
640 int ecc_size
= chip
->ecc
.size
;
641 int ecc_bytes
= chip
->ecc
.bytes
;
642 int oob_skip
= denali
->oob_skip_bytes
;
643 size_t size
= writesize
+ oobsize
;
646 /* BBM at the beginning of the OOB area */
648 nand_prog_page_begin_op(chip
, page
, writesize
, bufpoi
,
651 nand_read_page_op(chip
, page
, writesize
, bufpoi
, oob_skip
);
655 for (i
= 0; i
< ecc_steps
; i
++) {
656 pos
= ecc_size
+ i
* (ecc_size
+ ecc_bytes
);
659 if (pos
>= writesize
)
661 else if (pos
+ len
> writesize
)
662 len
= writesize
- pos
;
665 nand_change_write_column_op(chip
, pos
, bufpoi
, len
,
668 nand_change_read_column_op(chip
, pos
, bufpoi
, len
,
671 if (len
< ecc_bytes
) {
672 len
= ecc_bytes
- len
;
674 nand_change_write_column_op(chip
, writesize
+
678 nand_change_read_column_op(chip
, writesize
+
686 len
= oobsize
- (bufpoi
- chip
->oob_poi
);
688 nand_change_write_column_op(chip
, size
- len
, bufpoi
, len
,
691 nand_change_read_column_op(chip
, size
- len
, bufpoi
, len
,
695 static int denali_read_page_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
696 uint8_t *buf
, int oob_required
, int page
)
698 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
699 int writesize
= mtd
->writesize
;
700 int oobsize
= mtd
->oobsize
;
701 int ecc_steps
= chip
->ecc
.steps
;
702 int ecc_size
= chip
->ecc
.size
;
703 int ecc_bytes
= chip
->ecc
.bytes
;
704 void *tmp_buf
= denali
->buf
;
705 int oob_skip
= denali
->oob_skip_bytes
;
706 size_t size
= writesize
+ oobsize
;
707 int ret
, i
, pos
, len
;
709 ret
= denali_data_xfer(denali
, tmp_buf
, size
, page
, 1, 0);
713 /* Arrange the buffer for syndrome payload/ecc layout */
715 for (i
= 0; i
< ecc_steps
; i
++) {
716 pos
= i
* (ecc_size
+ ecc_bytes
);
719 if (pos
>= writesize
)
721 else if (pos
+ len
> writesize
)
722 len
= writesize
- pos
;
724 memcpy(buf
, tmp_buf
+ pos
, len
);
726 if (len
< ecc_size
) {
727 len
= ecc_size
- len
;
728 memcpy(buf
, tmp_buf
+ writesize
+ oob_skip
,
736 uint8_t *oob
= chip
->oob_poi
;
738 /* BBM at the beginning of the OOB area */
739 memcpy(oob
, tmp_buf
+ writesize
, oob_skip
);
743 for (i
= 0; i
< ecc_steps
; i
++) {
744 pos
= ecc_size
+ i
* (ecc_size
+ ecc_bytes
);
747 if (pos
>= writesize
)
749 else if (pos
+ len
> writesize
)
750 len
= writesize
- pos
;
752 memcpy(oob
, tmp_buf
+ pos
, len
);
754 if (len
< ecc_bytes
) {
755 len
= ecc_bytes
- len
;
756 memcpy(oob
, tmp_buf
+ writesize
+ oob_skip
,
763 len
= oobsize
- (oob
- chip
->oob_poi
);
764 memcpy(oob
, tmp_buf
+ size
- len
, len
);
770 static int denali_read_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
773 denali_oob_xfer(mtd
, chip
, page
, 0);
778 static int denali_write_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
781 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
783 denali_reset_irq(denali
);
785 denali_oob_xfer(mtd
, chip
, page
, 1);
787 return nand_prog_page_end_op(chip
);
790 static int denali_read_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
791 uint8_t *buf
, int oob_required
, int page
)
793 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
794 unsigned long uncor_ecc_flags
= 0;
798 ret
= denali_data_xfer(denali
, buf
, mtd
->writesize
, page
, 0, 0);
799 if (ret
&& ret
!= -EBADMSG
)
802 if (denali
->caps
& DENALI_CAP_HW_ECC_FIXUP
)
803 stat
= denali_hw_ecc_fixup(mtd
, denali
, &uncor_ecc_flags
);
804 else if (ret
== -EBADMSG
)
805 stat
= denali_sw_ecc_fixup(mtd
, denali
, &uncor_ecc_flags
, buf
);
810 if (uncor_ecc_flags
) {
811 ret
= denali_read_oob(mtd
, chip
, page
);
815 stat
= denali_check_erased_page(mtd
, chip
, buf
,
816 uncor_ecc_flags
, stat
);
822 static int denali_write_page_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
823 const uint8_t *buf
, int oob_required
, int page
)
825 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
826 int writesize
= mtd
->writesize
;
827 int oobsize
= mtd
->oobsize
;
828 int ecc_steps
= chip
->ecc
.steps
;
829 int ecc_size
= chip
->ecc
.size
;
830 int ecc_bytes
= chip
->ecc
.bytes
;
831 void *tmp_buf
= denali
->buf
;
832 int oob_skip
= denali
->oob_skip_bytes
;
833 size_t size
= writesize
+ oobsize
;
837 * Fill the buffer with 0xff first except the full page transfer.
838 * This simplifies the logic.
840 if (!buf
|| !oob_required
)
841 memset(tmp_buf
, 0xff, size
);
843 /* Arrange the buffer for syndrome payload/ecc layout */
845 for (i
= 0; i
< ecc_steps
; i
++) {
846 pos
= i
* (ecc_size
+ ecc_bytes
);
849 if (pos
>= writesize
)
851 else if (pos
+ len
> writesize
)
852 len
= writesize
- pos
;
854 memcpy(tmp_buf
+ pos
, buf
, len
);
856 if (len
< ecc_size
) {
857 len
= ecc_size
- len
;
858 memcpy(tmp_buf
+ writesize
+ oob_skip
, buf
,
866 const uint8_t *oob
= chip
->oob_poi
;
868 /* BBM at the beginning of the OOB area */
869 memcpy(tmp_buf
+ writesize
, oob
, oob_skip
);
873 for (i
= 0; i
< ecc_steps
; i
++) {
874 pos
= ecc_size
+ i
* (ecc_size
+ ecc_bytes
);
877 if (pos
>= writesize
)
879 else if (pos
+ len
> writesize
)
880 len
= writesize
- pos
;
882 memcpy(tmp_buf
+ pos
, oob
, len
);
884 if (len
< ecc_bytes
) {
885 len
= ecc_bytes
- len
;
886 memcpy(tmp_buf
+ writesize
+ oob_skip
, oob
,
893 len
= oobsize
- (oob
- chip
->oob_poi
);
894 memcpy(tmp_buf
+ size
- len
, oob
, len
);
897 return denali_data_xfer(denali
, tmp_buf
, size
, page
, 1, 1);
900 static int denali_write_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
901 const uint8_t *buf
, int oob_required
, int page
)
903 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
905 return denali_data_xfer(denali
, (void *)buf
, mtd
->writesize
,
909 static void denali_select_chip(struct mtd_info
*mtd
, int chip
)
911 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
913 denali
->active_bank
= chip
;
916 static int denali_waitfunc(struct mtd_info
*mtd
, struct nand_chip
*chip
)
918 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
921 /* R/B# pin transitioned from low to high? */
922 irq_status
= denali_wait_for_irq(denali
, INTR__INT_ACT
);
924 return irq_status
& INTR__INT_ACT
? 0 : NAND_STATUS_FAIL
;
927 static int denali_erase(struct mtd_info
*mtd
, int page
)
929 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
932 denali_reset_irq(denali
);
934 denali
->host_write(denali
, DENALI_MAP10
| DENALI_BANK(denali
) | page
,
937 /* wait for erase to complete or failure to occur */
938 irq_status
= denali_wait_for_irq(denali
,
939 INTR__ERASE_COMP
| INTR__ERASE_FAIL
);
941 return irq_status
& INTR__ERASE_COMP
? 0 : -EIO
;
944 static int denali_setup_data_interface(struct mtd_info
*mtd
, int chipnr
,
945 const struct nand_data_interface
*conf
)
947 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
948 const struct nand_sdr_timings
*timings
;
949 unsigned long t_x
, mult_x
;
950 int acc_clks
, re_2_we
, re_2_re
, we_2_re
, addr_2_data
;
951 int rdwr_en_lo
, rdwr_en_hi
, rdwr_en_lo_hi
, cs_setup
;
952 int addr_2_data_mask
;
955 timings
= nand_get_sdr_timings(conf
);
957 return PTR_ERR(timings
);
959 /* clk_x period in picoseconds */
960 t_x
= DIV_ROUND_DOWN_ULL(1000000000000ULL, denali
->clk_x_rate
);
965 * The bus interface clock, clk_x, is phase aligned with the core clock.
966 * The clk_x is an integral multiple N of the core clk. The value N is
967 * configured at IP delivery time, and its available value is 4, 5, 6.
969 mult_x
= DIV_ROUND_CLOSEST_ULL(denali
->clk_x_rate
, denali
->clk_rate
);
970 if (mult_x
< 4 || mult_x
> 6)
973 if (chipnr
== NAND_DATA_IFACE_CHECK_ONLY
)
976 /* tREA -> ACC_CLKS */
977 acc_clks
= DIV_ROUND_UP(timings
->tREA_max
, t_x
);
978 acc_clks
= min_t(int, acc_clks
, ACC_CLKS__VALUE
);
980 tmp
= ioread32(denali
->reg
+ ACC_CLKS
);
981 tmp
&= ~ACC_CLKS__VALUE
;
982 tmp
|= FIELD_PREP(ACC_CLKS__VALUE
, acc_clks
);
983 iowrite32(tmp
, denali
->reg
+ ACC_CLKS
);
985 /* tRWH -> RE_2_WE */
986 re_2_we
= DIV_ROUND_UP(timings
->tRHW_min
, t_x
);
987 re_2_we
= min_t(int, re_2_we
, RE_2_WE__VALUE
);
989 tmp
= ioread32(denali
->reg
+ RE_2_WE
);
990 tmp
&= ~RE_2_WE__VALUE
;
991 tmp
|= FIELD_PREP(RE_2_WE__VALUE
, re_2_we
);
992 iowrite32(tmp
, denali
->reg
+ RE_2_WE
);
994 /* tRHZ -> RE_2_RE */
995 re_2_re
= DIV_ROUND_UP(timings
->tRHZ_max
, t_x
);
996 re_2_re
= min_t(int, re_2_re
, RE_2_RE__VALUE
);
998 tmp
= ioread32(denali
->reg
+ RE_2_RE
);
999 tmp
&= ~RE_2_RE__VALUE
;
1000 tmp
|= FIELD_PREP(RE_2_RE__VALUE
, re_2_re
);
1001 iowrite32(tmp
, denali
->reg
+ RE_2_RE
);
1004 * tCCS, tWHR -> WE_2_RE
1006 * With WE_2_RE properly set, the Denali controller automatically takes
1007 * care of the delay; the driver need not set NAND_WAIT_TCCS.
1009 we_2_re
= DIV_ROUND_UP(max(timings
->tCCS_min
, timings
->tWHR_min
), t_x
);
1010 we_2_re
= min_t(int, we_2_re
, TWHR2_AND_WE_2_RE__WE_2_RE
);
1012 tmp
= ioread32(denali
->reg
+ TWHR2_AND_WE_2_RE
);
1013 tmp
&= ~TWHR2_AND_WE_2_RE__WE_2_RE
;
1014 tmp
|= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE
, we_2_re
);
1015 iowrite32(tmp
, denali
->reg
+ TWHR2_AND_WE_2_RE
);
1017 /* tADL -> ADDR_2_DATA */
1019 /* for older versions, ADDR_2_DATA is only 6 bit wide */
1020 addr_2_data_mask
= TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA
;
1021 if (denali
->revision
< 0x0501)
1022 addr_2_data_mask
>>= 1;
1024 addr_2_data
= DIV_ROUND_UP(timings
->tADL_min
, t_x
);
1025 addr_2_data
= min_t(int, addr_2_data
, addr_2_data_mask
);
1027 tmp
= ioread32(denali
->reg
+ TCWAW_AND_ADDR_2_DATA
);
1028 tmp
&= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA
;
1029 tmp
|= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA
, addr_2_data
);
1030 iowrite32(tmp
, denali
->reg
+ TCWAW_AND_ADDR_2_DATA
);
1032 /* tREH, tWH -> RDWR_EN_HI_CNT */
1033 rdwr_en_hi
= DIV_ROUND_UP(max(timings
->tREH_min
, timings
->tWH_min
),
1035 rdwr_en_hi
= min_t(int, rdwr_en_hi
, RDWR_EN_HI_CNT__VALUE
);
1037 tmp
= ioread32(denali
->reg
+ RDWR_EN_HI_CNT
);
1038 tmp
&= ~RDWR_EN_HI_CNT__VALUE
;
1039 tmp
|= FIELD_PREP(RDWR_EN_HI_CNT__VALUE
, rdwr_en_hi
);
1040 iowrite32(tmp
, denali
->reg
+ RDWR_EN_HI_CNT
);
1042 /* tRP, tWP -> RDWR_EN_LO_CNT */
1043 rdwr_en_lo
= DIV_ROUND_UP(max(timings
->tRP_min
, timings
->tWP_min
), t_x
);
1044 rdwr_en_lo_hi
= DIV_ROUND_UP(max(timings
->tRC_min
, timings
->tWC_min
),
1046 rdwr_en_lo_hi
= max_t(int, rdwr_en_lo_hi
, mult_x
);
1047 rdwr_en_lo
= max(rdwr_en_lo
, rdwr_en_lo_hi
- rdwr_en_hi
);
1048 rdwr_en_lo
= min_t(int, rdwr_en_lo
, RDWR_EN_LO_CNT__VALUE
);
1050 tmp
= ioread32(denali
->reg
+ RDWR_EN_LO_CNT
);
1051 tmp
&= ~RDWR_EN_LO_CNT__VALUE
;
1052 tmp
|= FIELD_PREP(RDWR_EN_LO_CNT__VALUE
, rdwr_en_lo
);
1053 iowrite32(tmp
, denali
->reg
+ RDWR_EN_LO_CNT
);
1055 /* tCS, tCEA -> CS_SETUP_CNT */
1056 cs_setup
= max3((int)DIV_ROUND_UP(timings
->tCS_min
, t_x
) - rdwr_en_lo
,
1057 (int)DIV_ROUND_UP(timings
->tCEA_max
, t_x
) - acc_clks
,
1059 cs_setup
= min_t(int, cs_setup
, CS_SETUP_CNT__VALUE
);
1061 tmp
= ioread32(denali
->reg
+ CS_SETUP_CNT
);
1062 tmp
&= ~CS_SETUP_CNT__VALUE
;
1063 tmp
|= FIELD_PREP(CS_SETUP_CNT__VALUE
, cs_setup
);
1064 iowrite32(tmp
, denali
->reg
+ CS_SETUP_CNT
);
1069 static void denali_reset_banks(struct denali_nand_info
*denali
)
1074 for (i
= 0; i
< denali
->max_banks
; i
++) {
1075 denali
->active_bank
= i
;
1077 denali_reset_irq(denali
);
1079 iowrite32(DEVICE_RESET__BANK(i
),
1080 denali
->reg
+ DEVICE_RESET
);
1082 irq_status
= denali_wait_for_irq(denali
,
1083 INTR__RST_COMP
| INTR__INT_ACT
| INTR__TIME_OUT
);
1084 if (!(irq_status
& INTR__INT_ACT
))
1088 dev_dbg(denali
->dev
, "%d chips connected\n", i
);
1089 denali
->max_banks
= i
;
1092 static void denali_hw_init(struct denali_nand_info
*denali
)
1095 * The REVISION register may not be reliable. Platforms are allowed to
1098 if (!denali
->revision
)
1099 denali
->revision
= swab16(ioread32(denali
->reg
+ REVISION
));
1102 * tell driver how many bit controller will skip before
1103 * writing ECC code in OOB, this register may be already
1104 * set by firmware. So we read this value out.
1105 * if this value is 0, just let it be.
1107 denali
->oob_skip_bytes
= ioread32(denali
->reg
+ SPARE_AREA_SKIP_BYTES
);
1108 denali_detect_max_banks(denali
);
1109 iowrite32(0x0F, denali
->reg
+ RB_PIN_ENABLED
);
1110 iowrite32(CHIP_EN_DONT_CARE__FLAG
, denali
->reg
+ CHIP_ENABLE_DONT_CARE
);
1112 iowrite32(0xffff, denali
->reg
+ SPARE_AREA_MARKER
);
1115 int denali_calc_ecc_bytes(int step_size
, int strength
)
1117 /* BCH code. Denali requires ecc.bytes to be multiple of 2 */
1118 return DIV_ROUND_UP(strength
* fls(step_size
* 8), 16) * 2;
1120 EXPORT_SYMBOL(denali_calc_ecc_bytes
);
1122 static int denali_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
1123 struct mtd_oob_region
*oobregion
)
1125 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
1126 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1131 oobregion
->offset
= denali
->oob_skip_bytes
;
1132 oobregion
->length
= chip
->ecc
.total
;
1137 static int denali_ooblayout_free(struct mtd_info
*mtd
, int section
,
1138 struct mtd_oob_region
*oobregion
)
1140 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
1141 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1146 oobregion
->offset
= chip
->ecc
.total
+ denali
->oob_skip_bytes
;
1147 oobregion
->length
= mtd
->oobsize
- oobregion
->offset
;
1152 static const struct mtd_ooblayout_ops denali_ooblayout_ops
= {
1153 .ecc
= denali_ooblayout_ecc
,
1154 .free
= denali_ooblayout_free
,
1157 static int denali_multidev_fixup(struct denali_nand_info
*denali
)
1159 struct nand_chip
*chip
= &denali
->nand
;
1160 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1163 * Support for multi device:
1164 * When the IP configuration is x16 capable and two x8 chips are
1165 * connected in parallel, DEVICES_CONNECTED should be set to 2.
1166 * In this case, the core framework knows nothing about this fact,
1167 * so we should tell it the _logical_ pagesize and anything necessary.
1169 denali
->devs_per_cs
= ioread32(denali
->reg
+ DEVICES_CONNECTED
);
1172 * On some SoCs, DEVICES_CONNECTED is not auto-detected.
1173 * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case.
1175 if (denali
->devs_per_cs
== 0) {
1176 denali
->devs_per_cs
= 1;
1177 iowrite32(1, denali
->reg
+ DEVICES_CONNECTED
);
1180 if (denali
->devs_per_cs
== 1)
1183 if (denali
->devs_per_cs
!= 2) {
1184 dev_err(denali
->dev
, "unsupported number of devices %d\n",
1185 denali
->devs_per_cs
);
1189 /* 2 chips in parallel */
1191 mtd
->erasesize
<<= 1;
1192 mtd
->writesize
<<= 1;
1194 chip
->chipsize
<<= 1;
1195 chip
->page_shift
+= 1;
1196 chip
->phys_erase_shift
+= 1;
1197 chip
->bbt_erase_shift
+= 1;
1198 chip
->chip_shift
+= 1;
1199 chip
->pagemask
<<= 1;
1200 chip
->ecc
.size
<<= 1;
1201 chip
->ecc
.bytes
<<= 1;
1202 chip
->ecc
.strength
<<= 1;
1203 denali
->oob_skip_bytes
<<= 1;
1208 static int denali_attach_chip(struct nand_chip
*chip
)
1210 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1211 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
1214 if (ioread32(denali
->reg
+ FEATURES
) & FEATURES__DMA
)
1215 denali
->dma_avail
= 1;
1217 if (denali
->dma_avail
) {
1218 int dma_bit
= denali
->caps
& DENALI_CAP_DMA_64BIT
? 64 : 32;
1220 ret
= dma_set_mask(denali
->dev
, DMA_BIT_MASK(dma_bit
));
1222 dev_info(denali
->dev
,
1223 "Failed to set DMA mask. Disabling DMA.\n");
1224 denali
->dma_avail
= 0;
1228 if (denali
->dma_avail
) {
1229 chip
->options
|= NAND_USE_BOUNCE_BUFFER
;
1230 chip
->buf_align
= 16;
1231 if (denali
->caps
& DENALI_CAP_DMA_64BIT
)
1232 denali
->setup_dma
= denali_setup_dma64
;
1234 denali
->setup_dma
= denali_setup_dma32
;
1237 chip
->bbt_options
|= NAND_BBT_USE_FLASH
;
1238 chip
->bbt_options
|= NAND_BBT_NO_OOB
;
1239 chip
->ecc
.mode
= NAND_ECC_HW_SYNDROME
;
1240 chip
->options
|= NAND_NO_SUBPAGE_WRITE
;
1242 ret
= nand_ecc_choose_conf(chip
, denali
->ecc_caps
,
1243 mtd
->oobsize
- denali
->oob_skip_bytes
);
1245 dev_err(denali
->dev
, "Failed to setup ECC settings.\n");
1249 dev_dbg(denali
->dev
,
1250 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
1251 chip
->ecc
.size
, chip
->ecc
.strength
, chip
->ecc
.bytes
);
1253 iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD
, 1) |
1254 FIELD_PREP(ECC_CORRECTION__VALUE
, chip
->ecc
.strength
),
1255 denali
->reg
+ ECC_CORRECTION
);
1256 iowrite32(mtd
->erasesize
/ mtd
->writesize
,
1257 denali
->reg
+ PAGES_PER_BLOCK
);
1258 iowrite32(chip
->options
& NAND_BUSWIDTH_16
? 1 : 0,
1259 denali
->reg
+ DEVICE_WIDTH
);
1260 iowrite32(chip
->options
& NAND_ROW_ADDR_3
? 0 : TWO_ROW_ADDR_CYCLES__FLAG
,
1261 denali
->reg
+ TWO_ROW_ADDR_CYCLES
);
1262 iowrite32(mtd
->writesize
, denali
->reg
+ DEVICE_MAIN_AREA_SIZE
);
1263 iowrite32(mtd
->oobsize
, denali
->reg
+ DEVICE_SPARE_AREA_SIZE
);
1265 iowrite32(chip
->ecc
.size
, denali
->reg
+ CFG_DATA_BLOCK_SIZE
);
1266 iowrite32(chip
->ecc
.size
, denali
->reg
+ CFG_LAST_DATA_BLOCK_SIZE
);
1267 /* chip->ecc.steps is set by nand_scan_tail(); not available here */
1268 iowrite32(mtd
->writesize
/ chip
->ecc
.size
,
1269 denali
->reg
+ CFG_NUM_DATA_BLOCKS
);
1271 mtd_set_ooblayout(mtd
, &denali_ooblayout_ops
);
1273 if (chip
->options
& NAND_BUSWIDTH_16
) {
1274 chip
->read_buf
= denali_read_buf16
;
1275 chip
->write_buf
= denali_write_buf16
;
1277 chip
->read_buf
= denali_read_buf
;
1278 chip
->write_buf
= denali_write_buf
;
1280 chip
->ecc
.read_page
= denali_read_page
;
1281 chip
->ecc
.read_page_raw
= denali_read_page_raw
;
1282 chip
->ecc
.write_page
= denali_write_page
;
1283 chip
->ecc
.write_page_raw
= denali_write_page_raw
;
1284 chip
->ecc
.read_oob
= denali_read_oob
;
1285 chip
->ecc
.write_oob
= denali_write_oob
;
1286 chip
->erase
= denali_erase
;
1288 ret
= denali_multidev_fixup(denali
);
1293 * This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not
1294 * use devm_kmalloc() because the memory allocated by devm_ does not
1295 * guarantee DMA-safe alignment.
1297 denali
->buf
= kmalloc(mtd
->writesize
+ mtd
->oobsize
, GFP_KERNEL
);
1304 static void denali_detach_chip(struct nand_chip
*chip
)
1306 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1307 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
1312 static const struct nand_controller_ops denali_controller_ops
= {
1313 .attach_chip
= denali_attach_chip
,
1314 .detach_chip
= denali_detach_chip
,
1317 int denali_init(struct denali_nand_info
*denali
)
1319 struct nand_chip
*chip
= &denali
->nand
;
1320 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1321 u32 features
= ioread32(denali
->reg
+ FEATURES
);
1324 mtd
->dev
.parent
= denali
->dev
;
1325 denali_hw_init(denali
);
1327 init_completion(&denali
->complete
);
1328 spin_lock_init(&denali
->irq_lock
);
1330 denali_clear_irq_all(denali
);
1332 ret
= devm_request_irq(denali
->dev
, denali
->irq
, denali_isr
,
1333 IRQF_SHARED
, DENALI_NAND_NAME
, denali
);
1335 dev_err(denali
->dev
, "Unable to request IRQ\n");
1339 denali_enable_irq(denali
);
1340 denali_reset_banks(denali
);
1341 if (!denali
->max_banks
) {
1342 /* Error out earlier if no chip is found for some reasons. */
1347 denali
->active_bank
= DENALI_INVALID_BANK
;
1349 nand_set_flash_node(chip
, denali
->dev
->of_node
);
1350 /* Fallback to the default name if DT did not give "label" property */
1352 mtd
->name
= "denali-nand";
1354 chip
->select_chip
= denali_select_chip
;
1355 chip
->read_byte
= denali_read_byte
;
1356 chip
->write_byte
= denali_write_byte
;
1357 chip
->read_word
= denali_read_word
;
1358 chip
->cmd_ctrl
= denali_cmd_ctrl
;
1359 chip
->dev_ready
= denali_dev_ready
;
1360 chip
->waitfunc
= denali_waitfunc
;
1362 if (features
& FEATURES__INDEX_ADDR
) {
1363 denali
->host_read
= denali_indexed_read
;
1364 denali
->host_write
= denali_indexed_write
;
1366 denali
->host_read
= denali_direct_read
;
1367 denali
->host_write
= denali_direct_write
;
1370 /* clk rate info is needed for setup_data_interface */
1371 if (denali
->clk_rate
&& denali
->clk_x_rate
)
1372 chip
->setup_data_interface
= denali_setup_data_interface
;
1374 chip
->dummy_controller
.ops
= &denali_controller_ops
;
1375 ret
= nand_scan(mtd
, denali
->max_banks
);
1379 ret
= mtd_device_register(mtd
, NULL
, 0);
1381 dev_err(denali
->dev
, "Failed to register MTD: %d\n", ret
);
1390 denali_disable_irq(denali
);
1394 EXPORT_SYMBOL(denali_init
);
1396 void denali_remove(struct denali_nand_info
*denali
)
1398 struct mtd_info
*mtd
= nand_to_mtd(&denali
->nand
);
1401 denali_disable_irq(denali
);
1403 EXPORT_SYMBOL(denali_remove
);