1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
7 #include <linux/slab.h>
8 #include <linux/bitops.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dmaengine.h>
11 #include <linux/module.h>
12 #include <linux/mtd/rawnand.h>
13 #include <linux/mtd/partitions.h>
15 #include <linux/of_device.h>
16 #include <linux/delay.h>
17 #include <linux/dma/qcom_bam_dma.h>
19 /* NANDc reg offsets */
20 #define NAND_FLASH_CMD 0x00
21 #define NAND_ADDR0 0x04
22 #define NAND_ADDR1 0x08
23 #define NAND_FLASH_CHIP_SELECT 0x0c
24 #define NAND_EXEC_CMD 0x10
25 #define NAND_FLASH_STATUS 0x14
26 #define NAND_BUFFER_STATUS 0x18
27 #define NAND_DEV0_CFG0 0x20
28 #define NAND_DEV0_CFG1 0x24
29 #define NAND_DEV0_ECC_CFG 0x28
30 #define NAND_DEV1_ECC_CFG 0x2c
31 #define NAND_DEV1_CFG0 0x30
32 #define NAND_DEV1_CFG1 0x34
33 #define NAND_READ_ID 0x40
34 #define NAND_READ_STATUS 0x44
35 #define NAND_DEV_CMD0 0xa0
36 #define NAND_DEV_CMD1 0xa4
37 #define NAND_DEV_CMD2 0xa8
38 #define NAND_DEV_CMD_VLD 0xac
39 #define SFLASHC_BURST_CFG 0xe0
40 #define NAND_ERASED_CW_DETECT_CFG 0xe8
41 #define NAND_ERASED_CW_DETECT_STATUS 0xec
42 #define NAND_EBI2_ECC_BUF_CFG 0xf0
43 #define FLASH_BUF_ACC 0x100
45 #define NAND_CTRL 0xf00
46 #define NAND_VERSION 0xf08
47 #define NAND_READ_LOCATION_0 0xf20
48 #define NAND_READ_LOCATION_1 0xf24
49 #define NAND_READ_LOCATION_2 0xf28
50 #define NAND_READ_LOCATION_3 0xf2c
52 /* dummy register offsets, used by write_reg_dma */
53 #define NAND_DEV_CMD1_RESTORE 0xdead
54 #define NAND_DEV_CMD_VLD_RESTORE 0xbeef
56 /* NAND_FLASH_CMD bits */
57 #define PAGE_ACC BIT(4)
58 #define LAST_PAGE BIT(5)
60 /* NAND_FLASH_CHIP_SELECT bits */
61 #define NAND_DEV_SEL 0
64 /* NAND_FLASH_STATUS bits */
65 #define FS_OP_ERR BIT(4)
66 #define FS_READY_BSY_N BIT(5)
67 #define FS_MPU_ERR BIT(8)
68 #define FS_DEVICE_STS_ERR BIT(16)
69 #define FS_DEVICE_WP BIT(23)
71 /* NAND_BUFFER_STATUS bits */
72 #define BS_UNCORRECTABLE_BIT BIT(8)
73 #define BS_CORRECTABLE_ERR_MSK 0x1f
75 /* NAND_DEVn_CFG0 bits */
76 #define DISABLE_STATUS_AFTER_WRITE 4
78 #define UD_SIZE_BYTES 9
79 #define ECC_PARITY_SIZE_BYTES_RS 19
80 #define SPARE_SIZE_BYTES 23
81 #define NUM_ADDR_CYCLES 27
82 #define STATUS_BFR_READ 30
83 #define SET_RD_MODE_AFTER_STATUS 31
85 /* NAND_DEVn_CFG0 bits */
86 #define DEV0_CFG1_ECC_DISABLE 0
88 #define NAND_RECOVERY_CYCLES 2
89 #define CS_ACTIVE_BSY 5
90 #define BAD_BLOCK_BYTE_NUM 6
91 #define BAD_BLOCK_IN_SPARE_AREA 16
92 #define WR_RD_BSY_GAP 17
93 #define ENABLE_BCH_ECC 27
95 /* NAND_DEV0_ECC_CFG bits */
96 #define ECC_CFG_ECC_DISABLE 0
97 #define ECC_SW_RESET 1
99 #define ECC_PARITY_SIZE_BYTES_BCH 8
100 #define ECC_NUM_DATA_BYTES 16
101 #define ECC_FORCE_CLK_OPEN 30
103 /* NAND_DEV_CMD1 bits */
106 /* NAND_DEV_CMD_VLD bits */
107 #define READ_START_VLD BIT(0)
108 #define READ_STOP_VLD BIT(1)
109 #define WRITE_START_VLD BIT(2)
110 #define ERASE_START_VLD BIT(3)
111 #define SEQ_READ_START_VLD BIT(4)
113 /* NAND_EBI2_ECC_BUF_CFG bits */
116 /* NAND_ERASED_CW_DETECT_CFG bits */
117 #define ERASED_CW_ECC_MASK 1
118 #define AUTO_DETECT_RES 0
119 #define MASK_ECC (1 << ERASED_CW_ECC_MASK)
120 #define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
121 #define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
122 #define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
123 #define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
125 /* NAND_ERASED_CW_DETECT_STATUS bits */
126 #define PAGE_ALL_ERASED BIT(7)
127 #define CODEWORD_ALL_ERASED BIT(6)
128 #define PAGE_ERASED BIT(5)
129 #define CODEWORD_ERASED BIT(4)
130 #define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
131 #define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
133 /* NAND_READ_LOCATION_n bits */
134 #define READ_LOCATION_OFFSET 0
135 #define READ_LOCATION_SIZE 16
136 #define READ_LOCATION_LAST 31
139 #define NAND_VERSION_MAJOR_MASK 0xf0000000
140 #define NAND_VERSION_MAJOR_SHIFT 28
141 #define NAND_VERSION_MINOR_MASK 0x0fff0000
142 #define NAND_VERSION_MINOR_SHIFT 16
145 #define OP_PAGE_READ 0x2
146 #define OP_PAGE_READ_WITH_ECC 0x3
147 #define OP_PAGE_READ_WITH_ECC_SPARE 0x4
148 #define OP_PROGRAM_PAGE 0x6
149 #define OP_PAGE_PROGRAM_WITH_ECC 0x7
150 #define OP_PROGRAM_PAGE_SPARE 0x9
151 #define OP_BLOCK_ERASE 0xa
152 #define OP_FETCH_ID 0xb
153 #define OP_RESET_DEVICE 0xd
155 /* Default Value for NAND_DEV_CMD_VLD */
156 #define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
157 ERASE_START_VLD | SEQ_READ_START_VLD)
160 #define BAM_MODE_EN BIT(0)
163 * the NAND controller performs reads/writes with ECC in 516 byte chunks.
164 * the driver calls the chunks 'step' or 'codeword' interchangeably
166 #define NANDC_STEP_SIZE 512
169 * the largest page size we support is 8K, this will have 16 steps/codewords
172 #define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
174 /* we read at most 3 registers per codeword scan */
175 #define MAX_REG_RD (3 * MAX_NUM_STEPS)
177 /* ECC modes supported by the controller */
178 #define ECC_NONE BIT(0)
179 #define ECC_RS_4BIT BIT(1)
180 #define ECC_BCH_4BIT BIT(2)
181 #define ECC_BCH_8BIT BIT(3)
183 #define nandc_set_read_loc(nandc, reg, offset, size, is_last) \
184 nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
185 ((offset) << READ_LOCATION_OFFSET) | \
186 ((size) << READ_LOCATION_SIZE) | \
187 ((is_last) << READ_LOCATION_LAST))
190 * Returns the actual register address for all NAND_DEV_ registers
191 * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
193 #define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
195 /* Returns the NAND register physical address */
196 #define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
198 /* Returns the dma address for reg read buffer */
199 #define reg_buf_dma_addr(chip, vaddr) \
200 ((chip)->reg_read_dma + \
201 ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
203 #define QPIC_PER_CW_CMD_ELEMENTS 32
204 #define QPIC_PER_CW_CMD_SGL 32
205 #define QPIC_PER_CW_DATA_SGL 8
207 #define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
210 * Flags used in DMA descriptor preparation helper functions
211 * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
213 /* Don't set the EOT in current tx BAM sgl */
214 #define NAND_BAM_NO_EOT BIT(0)
215 /* Set the NWD flag in current BAM sgl */
216 #define NAND_BAM_NWD BIT(1)
217 /* Finish writing in the current BAM sgl and start writing in another BAM sgl */
218 #define NAND_BAM_NEXT_SGL BIT(2)
220 * Erased codeword status is being used two times in single transfer so this
221 * flag will determine the current value of erased codeword status register
223 #define NAND_ERASED_CW_SET BIT(4)
226 * This data type corresponds to the BAM transaction which will be used for all
228 * @bam_ce - the array of BAM command elements
229 * @cmd_sgl - sgl for NAND BAM command pipe
230 * @data_sgl - sgl for NAND BAM consumer/producer pipe
231 * @bam_ce_pos - the index in bam_ce which is available for next sgl
232 * @bam_ce_start - the index in bam_ce which marks the start position ce
233 * for current sgl. It will be used for size calculation
235 * @cmd_sgl_pos - current index in command sgl.
236 * @cmd_sgl_start - start index in command sgl.
237 * @tx_sgl_pos - current index in data sgl for tx.
238 * @tx_sgl_start - start index in data sgl for tx.
239 * @rx_sgl_pos - current index in data sgl for rx.
240 * @rx_sgl_start - start index in data sgl for rx.
241 * @wait_second_completion - wait for second DMA desc completion before making
242 * the NAND transfer completion.
243 * @txn_done - completion for NAND transfer.
244 * @last_data_desc - last DMA desc in data channel (tx/rx).
245 * @last_cmd_desc - last DMA desc in command channel.
247 struct bam_transaction
{
248 struct bam_cmd_element
*bam_ce
;
249 struct scatterlist
*cmd_sgl
;
250 struct scatterlist
*data_sgl
;
259 bool wait_second_completion
;
260 struct completion txn_done
;
261 struct dma_async_tx_descriptor
*last_data_desc
;
262 struct dma_async_tx_descriptor
*last_cmd_desc
;
266 * This data type corresponds to the nand dma descriptor
267 * @list - list for desc_info
268 * @dir - DMA transfer direction
269 * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
271 * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
272 * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
273 * @dma_desc - low level DMA engine descriptor
276 struct list_head node
;
278 enum dma_data_direction dir
;
280 struct scatterlist adm_sgl
;
282 struct scatterlist
*bam_sgl
;
286 struct dma_async_tx_descriptor
*dma_desc
;
290 * holds the current register values that we want to write. acts as a contiguous
291 * chunk of memory which we use to write the controller registers through DMA.
304 __le32 clrflashstatus
;
305 __le32 clrreadstatus
;
314 __le32 read_location0
;
315 __le32 read_location1
;
316 __le32 read_location2
;
317 __le32 read_location3
;
319 __le32 erased_cw_detect_cfg_clr
;
320 __le32 erased_cw_detect_cfg_set
;
324 * NAND controller data struct
326 * @controller: base controller structure
327 * @host_list: list containing all the chips attached to the
329 * @dev: parent device
331 * @base_phys: physical base address of controller registers
332 * @base_dma: dma base address of controller registers
333 * @core_clk: controller clock
334 * @aon_clk: another controller clock
337 * @cmd_crci: ADM DMA CRCI for command flow control
338 * @data_crci: ADM DMA CRCI for data flow control
339 * @desc_list: DMA descriptor list (list of desc_infos)
341 * @data_buffer: our local DMA buffer for page read/writes,
342 * used when we can't use the buffer provided
343 * by upper layers directly
344 * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
346 * @reg_read_buf: local buffer for reading back registers via DMA
347 * @reg_read_dma: contains dma address for register read buffer
348 * @reg_read_pos: marker for data read in reg_read_buf
350 * @regs: a contiguous chunk of memory for DMA register
351 * writes. contains the register values to be
352 * written to controller
353 * @cmd1/vld: some fixed controller register values
354 * @props: properties of current NAND controller,
355 * initialized via DT match data
356 * @max_cwperpage: maximum QPIC codewords required. calculated
357 * from all connected NAND devices pagesize
359 struct qcom_nand_controller
{
360 struct nand_controller controller
;
361 struct list_head host_list
;
366 phys_addr_t base_phys
;
369 struct clk
*core_clk
;
373 /* will be used only by QPIC for BAM DMA */
375 struct dma_chan
*tx_chan
;
376 struct dma_chan
*rx_chan
;
377 struct dma_chan
*cmd_chan
;
380 /* will be used only by EBI2 for ADM DMA */
382 struct dma_chan
*chan
;
383 unsigned int cmd_crci
;
384 unsigned int data_crci
;
388 struct list_head desc_list
;
389 struct bam_transaction
*bam_txn
;
395 unsigned int max_cwperpage
;
397 __le32
*reg_read_buf
;
398 dma_addr_t reg_read_dma
;
401 struct nandc_regs
*regs
;
404 const struct qcom_nandc_props
*props
;
408 * NAND chip structure
410 * @chip: base NAND chip structure
411 * @node: list node to add itself to host_list in
412 * qcom_nand_controller
414 * @cs: chip select value for this chip
415 * @cw_size: the number of bytes in a single step/codeword
416 * of a page, consisting of all data, ecc, spare
418 * @cw_data: the number of bytes within a codeword protected
420 * @use_ecc: request the controller to use ECC for the
421 * upcoming read/write
422 * @bch_enabled: flag to tell whether BCH ECC mode is used
423 * @ecc_bytes_hw: ECC bytes used by controller hardware for this
425 * @status: value to be returned if NAND_CMD_STATUS command
427 * @last_command: keeps track of last command on this chip. used
428 * for reading correct status
430 * @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for
431 * ecc/non-ecc mode for the current nand flash
434 struct qcom_nand_host
{
435 struct nand_chip chip
;
436 struct list_head node
;
450 u32 cfg0_raw
, cfg1_raw
;
458 * This data type corresponds to the NAND controller properties which varies
459 * among different NAND controllers.
460 * @ecc_modes - ecc mode for NAND
461 * @is_bam - whether NAND controller is using BAM
462 * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
464 struct qcom_nandc_props
{
467 u32 dev_cmd_reg_start
;
470 /* Frees the BAM transaction memory */
471 static void free_bam_transaction(struct qcom_nand_controller
*nandc
)
473 struct bam_transaction
*bam_txn
= nandc
->bam_txn
;
475 devm_kfree(nandc
->dev
, bam_txn
);
478 /* Allocates and Initializes the BAM transaction */
479 static struct bam_transaction
*
480 alloc_bam_transaction(struct qcom_nand_controller
*nandc
)
482 struct bam_transaction
*bam_txn
;
484 unsigned int num_cw
= nandc
->max_cwperpage
;
488 sizeof(*bam_txn
) + num_cw
*
489 ((sizeof(*bam_txn
->bam_ce
) * QPIC_PER_CW_CMD_ELEMENTS
) +
490 (sizeof(*bam_txn
->cmd_sgl
) * QPIC_PER_CW_CMD_SGL
) +
491 (sizeof(*bam_txn
->data_sgl
) * QPIC_PER_CW_DATA_SGL
));
493 bam_txn_buf
= devm_kzalloc(nandc
->dev
, bam_txn_size
, GFP_KERNEL
);
497 bam_txn
= bam_txn_buf
;
498 bam_txn_buf
+= sizeof(*bam_txn
);
500 bam_txn
->bam_ce
= bam_txn_buf
;
502 sizeof(*bam_txn
->bam_ce
) * QPIC_PER_CW_CMD_ELEMENTS
* num_cw
;
504 bam_txn
->cmd_sgl
= bam_txn_buf
;
506 sizeof(*bam_txn
->cmd_sgl
) * QPIC_PER_CW_CMD_SGL
* num_cw
;
508 bam_txn
->data_sgl
= bam_txn_buf
;
510 init_completion(&bam_txn
->txn_done
);
515 /* Clears the BAM transaction indexes */
516 static void clear_bam_transaction(struct qcom_nand_controller
*nandc
)
518 struct bam_transaction
*bam_txn
= nandc
->bam_txn
;
520 if (!nandc
->props
->is_bam
)
523 bam_txn
->bam_ce_pos
= 0;
524 bam_txn
->bam_ce_start
= 0;
525 bam_txn
->cmd_sgl_pos
= 0;
526 bam_txn
->cmd_sgl_start
= 0;
527 bam_txn
->tx_sgl_pos
= 0;
528 bam_txn
->tx_sgl_start
= 0;
529 bam_txn
->rx_sgl_pos
= 0;
530 bam_txn
->rx_sgl_start
= 0;
531 bam_txn
->last_data_desc
= NULL
;
532 bam_txn
->wait_second_completion
= false;
534 sg_init_table(bam_txn
->cmd_sgl
, nandc
->max_cwperpage
*
535 QPIC_PER_CW_CMD_SGL
);
536 sg_init_table(bam_txn
->data_sgl
, nandc
->max_cwperpage
*
537 QPIC_PER_CW_DATA_SGL
);
539 reinit_completion(&bam_txn
->txn_done
);
542 /* Callback for DMA descriptor completion */
543 static void qpic_bam_dma_done(void *data
)
545 struct bam_transaction
*bam_txn
= data
;
548 * In case of data transfer with NAND, 2 callbacks will be generated.
549 * One for command channel and another one for data channel.
550 * If current transaction has data descriptors
551 * (i.e. wait_second_completion is true), then set this to false
552 * and wait for second DMA descriptor completion.
554 if (bam_txn
->wait_second_completion
)
555 bam_txn
->wait_second_completion
= false;
557 complete(&bam_txn
->txn_done
);
560 static inline struct qcom_nand_host
*to_qcom_nand_host(struct nand_chip
*chip
)
562 return container_of(chip
, struct qcom_nand_host
, chip
);
565 static inline struct qcom_nand_controller
*
566 get_qcom_nand_controller(struct nand_chip
*chip
)
568 return container_of(chip
->controller
, struct qcom_nand_controller
,
572 static inline u32
nandc_read(struct qcom_nand_controller
*nandc
, int offset
)
574 return ioread32(nandc
->base
+ offset
);
577 static inline void nandc_write(struct qcom_nand_controller
*nandc
, int offset
,
580 iowrite32(val
, nandc
->base
+ offset
);
583 static inline void nandc_read_buffer_sync(struct qcom_nand_controller
*nandc
,
586 if (!nandc
->props
->is_bam
)
590 dma_sync_single_for_cpu(nandc
->dev
, nandc
->reg_read_dma
,
592 sizeof(*nandc
->reg_read_buf
),
595 dma_sync_single_for_device(nandc
->dev
, nandc
->reg_read_dma
,
597 sizeof(*nandc
->reg_read_buf
),
601 static __le32
*offset_to_nandc_reg(struct nandc_regs
*regs
, int offset
)
610 case NAND_FLASH_CHIP_SELECT
:
611 return ®s
->chip_sel
;
614 case NAND_FLASH_STATUS
:
615 return ®s
->clrflashstatus
;
620 case NAND_DEV0_ECC_CFG
:
621 return ®s
->ecc_bch_cfg
;
622 case NAND_READ_STATUS
:
623 return ®s
->clrreadstatus
;
626 case NAND_DEV_CMD1_RESTORE
:
627 return ®s
->orig_cmd1
;
628 case NAND_DEV_CMD_VLD
:
630 case NAND_DEV_CMD_VLD_RESTORE
:
631 return ®s
->orig_vld
;
632 case NAND_EBI2_ECC_BUF_CFG
:
633 return ®s
->ecc_buf_cfg
;
634 case NAND_READ_LOCATION_0
:
635 return ®s
->read_location0
;
636 case NAND_READ_LOCATION_1
:
637 return ®s
->read_location1
;
638 case NAND_READ_LOCATION_2
:
639 return ®s
->read_location2
;
640 case NAND_READ_LOCATION_3
:
641 return ®s
->read_location3
;
647 static void nandc_set_reg(struct qcom_nand_controller
*nandc
, int offset
,
650 struct nandc_regs
*regs
= nandc
->regs
;
653 reg
= offset_to_nandc_reg(regs
, offset
);
656 *reg
= cpu_to_le32(val
);
659 /* helper to configure address register values */
660 static void set_address(struct qcom_nand_host
*host
, u16 column
, int page
)
662 struct nand_chip
*chip
= &host
->chip
;
663 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
665 if (chip
->options
& NAND_BUSWIDTH_16
)
668 nandc_set_reg(nandc
, NAND_ADDR0
, page
<< 16 | column
);
669 nandc_set_reg(nandc
, NAND_ADDR1
, page
>> 16 & 0xff);
673 * update_rw_regs: set up read/write register values, these will be
674 * written to the NAND controller registers via DMA
676 * @num_cw: number of steps for the read/write operation
677 * @read: read or write operation
679 static void update_rw_regs(struct qcom_nand_host
*host
, int num_cw
, bool read
)
681 struct nand_chip
*chip
= &host
->chip
;
682 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
683 u32 cmd
, cfg0
, cfg1
, ecc_bch_cfg
;
687 cmd
= OP_PAGE_READ_WITH_ECC
| PAGE_ACC
| LAST_PAGE
;
689 cmd
= OP_PAGE_READ
| PAGE_ACC
| LAST_PAGE
;
691 cmd
= OP_PROGRAM_PAGE
| PAGE_ACC
| LAST_PAGE
;
695 cfg0
= (host
->cfg0
& ~(7U << CW_PER_PAGE
)) |
696 (num_cw
- 1) << CW_PER_PAGE
;
699 ecc_bch_cfg
= host
->ecc_bch_cfg
;
701 cfg0
= (host
->cfg0_raw
& ~(7U << CW_PER_PAGE
)) |
702 (num_cw
- 1) << CW_PER_PAGE
;
704 cfg1
= host
->cfg1_raw
;
705 ecc_bch_cfg
= 1 << ECC_CFG_ECC_DISABLE
;
708 nandc_set_reg(nandc
, NAND_FLASH_CMD
, cmd
);
709 nandc_set_reg(nandc
, NAND_DEV0_CFG0
, cfg0
);
710 nandc_set_reg(nandc
, NAND_DEV0_CFG1
, cfg1
);
711 nandc_set_reg(nandc
, NAND_DEV0_ECC_CFG
, ecc_bch_cfg
);
712 nandc_set_reg(nandc
, NAND_EBI2_ECC_BUF_CFG
, host
->ecc_buf_cfg
);
713 nandc_set_reg(nandc
, NAND_FLASH_STATUS
, host
->clrflashstatus
);
714 nandc_set_reg(nandc
, NAND_READ_STATUS
, host
->clrreadstatus
);
715 nandc_set_reg(nandc
, NAND_EXEC_CMD
, 1);
718 nandc_set_read_loc(nandc
, 0, 0, host
->use_ecc
?
719 host
->cw_data
: host
->cw_size
, 1);
723 * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
724 * for BAM. This descriptor will be added in the NAND DMA descriptor queue
725 * which will be submitted to DMA engine.
727 static int prepare_bam_async_desc(struct qcom_nand_controller
*nandc
,
728 struct dma_chan
*chan
,
731 struct desc_info
*desc
;
732 struct scatterlist
*sgl
;
733 unsigned int sgl_cnt
;
735 struct bam_transaction
*bam_txn
= nandc
->bam_txn
;
736 enum dma_transfer_direction dir_eng
;
737 struct dma_async_tx_descriptor
*dma_desc
;
739 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
743 if (chan
== nandc
->cmd_chan
) {
744 sgl
= &bam_txn
->cmd_sgl
[bam_txn
->cmd_sgl_start
];
745 sgl_cnt
= bam_txn
->cmd_sgl_pos
- bam_txn
->cmd_sgl_start
;
746 bam_txn
->cmd_sgl_start
= bam_txn
->cmd_sgl_pos
;
747 dir_eng
= DMA_MEM_TO_DEV
;
748 desc
->dir
= DMA_TO_DEVICE
;
749 } else if (chan
== nandc
->tx_chan
) {
750 sgl
= &bam_txn
->data_sgl
[bam_txn
->tx_sgl_start
];
751 sgl_cnt
= bam_txn
->tx_sgl_pos
- bam_txn
->tx_sgl_start
;
752 bam_txn
->tx_sgl_start
= bam_txn
->tx_sgl_pos
;
753 dir_eng
= DMA_MEM_TO_DEV
;
754 desc
->dir
= DMA_TO_DEVICE
;
756 sgl
= &bam_txn
->data_sgl
[bam_txn
->rx_sgl_start
];
757 sgl_cnt
= bam_txn
->rx_sgl_pos
- bam_txn
->rx_sgl_start
;
758 bam_txn
->rx_sgl_start
= bam_txn
->rx_sgl_pos
;
759 dir_eng
= DMA_DEV_TO_MEM
;
760 desc
->dir
= DMA_FROM_DEVICE
;
763 sg_mark_end(sgl
+ sgl_cnt
- 1);
764 ret
= dma_map_sg(nandc
->dev
, sgl
, sgl_cnt
, desc
->dir
);
766 dev_err(nandc
->dev
, "failure in mapping desc\n");
771 desc
->sgl_cnt
= sgl_cnt
;
774 dma_desc
= dmaengine_prep_slave_sg(chan
, sgl
, sgl_cnt
, dir_eng
,
778 dev_err(nandc
->dev
, "failure in prep desc\n");
779 dma_unmap_sg(nandc
->dev
, sgl
, sgl_cnt
, desc
->dir
);
784 desc
->dma_desc
= dma_desc
;
786 /* update last data/command descriptor */
787 if (chan
== nandc
->cmd_chan
)
788 bam_txn
->last_cmd_desc
= dma_desc
;
790 bam_txn
->last_data_desc
= dma_desc
;
792 list_add_tail(&desc
->node
, &nandc
->desc_list
);
798 * Prepares the command descriptor for BAM DMA which will be used for NAND
799 * register reads and writes. The command descriptor requires the command
800 * to be formed in command element type so this function uses the command
801 * element from bam transaction ce array and fills the same with required
802 * data. A single SGL can contain multiple command elements so
803 * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
804 * after the current command element.
806 static int prep_bam_dma_desc_cmd(struct qcom_nand_controller
*nandc
, bool read
,
807 int reg_off
, const void *vaddr
,
808 int size
, unsigned int flags
)
812 struct bam_cmd_element
*bam_ce_buffer
;
813 struct bam_transaction
*bam_txn
= nandc
->bam_txn
;
815 bam_ce_buffer
= &bam_txn
->bam_ce
[bam_txn
->bam_ce_pos
];
817 /* fill the command desc */
818 for (i
= 0; i
< size
; i
++) {
820 bam_prep_ce(&bam_ce_buffer
[i
],
821 nandc_reg_phys(nandc
, reg_off
+ 4 * i
),
823 reg_buf_dma_addr(nandc
,
824 (__le32
*)vaddr
+ i
));
826 bam_prep_ce_le32(&bam_ce_buffer
[i
],
827 nandc_reg_phys(nandc
, reg_off
+ 4 * i
),
829 *((__le32
*)vaddr
+ i
));
832 bam_txn
->bam_ce_pos
+= size
;
834 /* use the separate sgl after this command */
835 if (flags
& NAND_BAM_NEXT_SGL
) {
836 bam_ce_buffer
= &bam_txn
->bam_ce
[bam_txn
->bam_ce_start
];
837 bam_ce_size
= (bam_txn
->bam_ce_pos
-
838 bam_txn
->bam_ce_start
) *
839 sizeof(struct bam_cmd_element
);
840 sg_set_buf(&bam_txn
->cmd_sgl
[bam_txn
->cmd_sgl_pos
],
841 bam_ce_buffer
, bam_ce_size
);
842 bam_txn
->cmd_sgl_pos
++;
843 bam_txn
->bam_ce_start
= bam_txn
->bam_ce_pos
;
845 if (flags
& NAND_BAM_NWD
) {
846 ret
= prepare_bam_async_desc(nandc
, nandc
->cmd_chan
,
858 * Prepares the data descriptor for BAM DMA which will be used for NAND
859 * data reads and writes.
861 static int prep_bam_dma_desc_data(struct qcom_nand_controller
*nandc
, bool read
,
863 int size
, unsigned int flags
)
866 struct bam_transaction
*bam_txn
= nandc
->bam_txn
;
869 sg_set_buf(&bam_txn
->data_sgl
[bam_txn
->rx_sgl_pos
],
871 bam_txn
->rx_sgl_pos
++;
873 sg_set_buf(&bam_txn
->data_sgl
[bam_txn
->tx_sgl_pos
],
875 bam_txn
->tx_sgl_pos
++;
878 * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
879 * is not set, form the DMA descriptor
881 if (!(flags
& NAND_BAM_NO_EOT
)) {
882 ret
= prepare_bam_async_desc(nandc
, nandc
->tx_chan
,
892 static int prep_adm_dma_desc(struct qcom_nand_controller
*nandc
, bool read
,
893 int reg_off
, const void *vaddr
, int size
,
896 struct desc_info
*desc
;
897 struct dma_async_tx_descriptor
*dma_desc
;
898 struct scatterlist
*sgl
;
899 struct dma_slave_config slave_conf
;
900 enum dma_transfer_direction dir_eng
;
903 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
907 sgl
= &desc
->adm_sgl
;
909 sg_init_one(sgl
, vaddr
, size
);
912 dir_eng
= DMA_DEV_TO_MEM
;
913 desc
->dir
= DMA_FROM_DEVICE
;
915 dir_eng
= DMA_MEM_TO_DEV
;
916 desc
->dir
= DMA_TO_DEVICE
;
919 ret
= dma_map_sg(nandc
->dev
, sgl
, 1, desc
->dir
);
925 memset(&slave_conf
, 0x00, sizeof(slave_conf
));
927 slave_conf
.device_fc
= flow_control
;
929 slave_conf
.src_maxburst
= 16;
930 slave_conf
.src_addr
= nandc
->base_dma
+ reg_off
;
931 slave_conf
.slave_id
= nandc
->data_crci
;
933 slave_conf
.dst_maxburst
= 16;
934 slave_conf
.dst_addr
= nandc
->base_dma
+ reg_off
;
935 slave_conf
.slave_id
= nandc
->cmd_crci
;
938 ret
= dmaengine_slave_config(nandc
->chan
, &slave_conf
);
940 dev_err(nandc
->dev
, "failed to configure dma channel\n");
944 dma_desc
= dmaengine_prep_slave_sg(nandc
->chan
, sgl
, 1, dir_eng
, 0);
946 dev_err(nandc
->dev
, "failed to prepare desc\n");
951 desc
->dma_desc
= dma_desc
;
953 list_add_tail(&desc
->node
, &nandc
->desc_list
);
963 * read_reg_dma: prepares a descriptor to read a given number of
964 * contiguous registers to the reg_read_buf pointer
966 * @first: offset of the first register in the contiguous block
967 * @num_regs: number of registers to read
968 * @flags: flags to control DMA descriptor preparation
970 static int read_reg_dma(struct qcom_nand_controller
*nandc
, int first
,
971 int num_regs
, unsigned int flags
)
973 bool flow_control
= false;
976 vaddr
= nandc
->reg_read_buf
+ nandc
->reg_read_pos
;
977 nandc
->reg_read_pos
+= num_regs
;
979 if (first
== NAND_DEV_CMD_VLD
|| first
== NAND_DEV_CMD1
)
980 first
= dev_cmd_reg_addr(nandc
, first
);
982 if (nandc
->props
->is_bam
)
983 return prep_bam_dma_desc_cmd(nandc
, true, first
, vaddr
,
986 if (first
== NAND_READ_ID
|| first
== NAND_FLASH_STATUS
)
989 return prep_adm_dma_desc(nandc
, true, first
, vaddr
,
990 num_regs
* sizeof(u32
), flow_control
);
994 * write_reg_dma: prepares a descriptor to write a given number of
995 * contiguous registers
997 * @first: offset of the first register in the contiguous block
998 * @num_regs: number of registers to write
999 * @flags: flags to control DMA descriptor preparation
1001 static int write_reg_dma(struct qcom_nand_controller
*nandc
, int first
,
1002 int num_regs
, unsigned int flags
)
1004 bool flow_control
= false;
1005 struct nandc_regs
*regs
= nandc
->regs
;
1008 vaddr
= offset_to_nandc_reg(regs
, first
);
1010 if (first
== NAND_ERASED_CW_DETECT_CFG
) {
1011 if (flags
& NAND_ERASED_CW_SET
)
1012 vaddr
= ®s
->erased_cw_detect_cfg_set
;
1014 vaddr
= ®s
->erased_cw_detect_cfg_clr
;
1017 if (first
== NAND_EXEC_CMD
)
1018 flags
|= NAND_BAM_NWD
;
1020 if (first
== NAND_DEV_CMD1_RESTORE
|| first
== NAND_DEV_CMD1
)
1021 first
= dev_cmd_reg_addr(nandc
, NAND_DEV_CMD1
);
1023 if (first
== NAND_DEV_CMD_VLD_RESTORE
|| first
== NAND_DEV_CMD_VLD
)
1024 first
= dev_cmd_reg_addr(nandc
, NAND_DEV_CMD_VLD
);
1026 if (nandc
->props
->is_bam
)
1027 return prep_bam_dma_desc_cmd(nandc
, false, first
, vaddr
,
1030 if (first
== NAND_FLASH_CMD
)
1031 flow_control
= true;
1033 return prep_adm_dma_desc(nandc
, false, first
, vaddr
,
1034 num_regs
* sizeof(u32
), flow_control
);
1038 * read_data_dma: prepares a DMA descriptor to transfer data from the
1039 * controller's internal buffer to the buffer 'vaddr'
1041 * @reg_off: offset within the controller's data buffer
1042 * @vaddr: virtual address of the buffer we want to write to
1043 * @size: DMA transaction size in bytes
1044 * @flags: flags to control DMA descriptor preparation
1046 static int read_data_dma(struct qcom_nand_controller
*nandc
, int reg_off
,
1047 const u8
*vaddr
, int size
, unsigned int flags
)
1049 if (nandc
->props
->is_bam
)
1050 return prep_bam_dma_desc_data(nandc
, true, vaddr
, size
, flags
);
1052 return prep_adm_dma_desc(nandc
, true, reg_off
, vaddr
, size
, false);
1056 * write_data_dma: prepares a DMA descriptor to transfer data from
1057 * 'vaddr' to the controller's internal buffer
1059 * @reg_off: offset within the controller's data buffer
1060 * @vaddr: virtual address of the buffer we want to read from
1061 * @size: DMA transaction size in bytes
1062 * @flags: flags to control DMA descriptor preparation
1064 static int write_data_dma(struct qcom_nand_controller
*nandc
, int reg_off
,
1065 const u8
*vaddr
, int size
, unsigned int flags
)
1067 if (nandc
->props
->is_bam
)
1068 return prep_bam_dma_desc_data(nandc
, false, vaddr
, size
, flags
);
1070 return prep_adm_dma_desc(nandc
, false, reg_off
, vaddr
, size
, false);
1074 * Helper to prepare DMA descriptors for configuring registers
1075 * before reading a NAND page.
1077 static void config_nand_page_read(struct qcom_nand_controller
*nandc
)
1079 write_reg_dma(nandc
, NAND_ADDR0
, 2, 0);
1080 write_reg_dma(nandc
, NAND_DEV0_CFG0
, 3, 0);
1081 write_reg_dma(nandc
, NAND_EBI2_ECC_BUF_CFG
, 1, 0);
1082 write_reg_dma(nandc
, NAND_ERASED_CW_DETECT_CFG
, 1, 0);
1083 write_reg_dma(nandc
, NAND_ERASED_CW_DETECT_CFG
, 1,
1084 NAND_ERASED_CW_SET
| NAND_BAM_NEXT_SGL
);
1088 * Helper to prepare DMA descriptors for configuring registers
1089 * before reading each codeword in NAND page.
1092 config_nand_cw_read(struct qcom_nand_controller
*nandc
, bool use_ecc
)
1094 if (nandc
->props
->is_bam
)
1095 write_reg_dma(nandc
, NAND_READ_LOCATION_0
, 4,
1098 write_reg_dma(nandc
, NAND_FLASH_CMD
, 1, NAND_BAM_NEXT_SGL
);
1099 write_reg_dma(nandc
, NAND_EXEC_CMD
, 1, NAND_BAM_NEXT_SGL
);
1102 read_reg_dma(nandc
, NAND_FLASH_STATUS
, 2, 0);
1103 read_reg_dma(nandc
, NAND_ERASED_CW_DETECT_STATUS
, 1,
1106 read_reg_dma(nandc
, NAND_FLASH_STATUS
, 1, NAND_BAM_NEXT_SGL
);
1111 * Helper to prepare dma descriptors to configure registers needed for reading a
1112 * single codeword in page
1115 config_nand_single_cw_page_read(struct qcom_nand_controller
*nandc
,
1118 config_nand_page_read(nandc
);
1119 config_nand_cw_read(nandc
, use_ecc
);
1123 * Helper to prepare DMA descriptors used to configure registers needed for
1124 * before writing a NAND page.
1126 static void config_nand_page_write(struct qcom_nand_controller
*nandc
)
1128 write_reg_dma(nandc
, NAND_ADDR0
, 2, 0);
1129 write_reg_dma(nandc
, NAND_DEV0_CFG0
, 3, 0);
1130 write_reg_dma(nandc
, NAND_EBI2_ECC_BUF_CFG
, 1,
1135 * Helper to prepare DMA descriptors for configuring registers
1136 * before writing each codeword in NAND page.
1138 static void config_nand_cw_write(struct qcom_nand_controller
*nandc
)
1140 write_reg_dma(nandc
, NAND_FLASH_CMD
, 1, NAND_BAM_NEXT_SGL
);
1141 write_reg_dma(nandc
, NAND_EXEC_CMD
, 1, NAND_BAM_NEXT_SGL
);
1143 read_reg_dma(nandc
, NAND_FLASH_STATUS
, 1, NAND_BAM_NEXT_SGL
);
1145 write_reg_dma(nandc
, NAND_FLASH_STATUS
, 1, 0);
1146 write_reg_dma(nandc
, NAND_READ_STATUS
, 1, NAND_BAM_NEXT_SGL
);
1150 * the following functions are used within chip->legacy.cmdfunc() to
1151 * perform different NAND_CMD_* commands
1154 /* sets up descriptors for NAND_CMD_PARAM */
1155 static int nandc_param(struct qcom_nand_host
*host
)
1157 struct nand_chip
*chip
= &host
->chip
;
1158 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
1161 * NAND_CMD_PARAM is called before we know much about the FLASH chip
1162 * in use. we configure the controller to perform a raw read of 512
1163 * bytes to read onfi params
1165 nandc_set_reg(nandc
, NAND_FLASH_CMD
, OP_PAGE_READ
| PAGE_ACC
| LAST_PAGE
);
1166 nandc_set_reg(nandc
, NAND_ADDR0
, 0);
1167 nandc_set_reg(nandc
, NAND_ADDR1
, 0);
1168 nandc_set_reg(nandc
, NAND_DEV0_CFG0
, 0 << CW_PER_PAGE
1169 | 512 << UD_SIZE_BYTES
1170 | 5 << NUM_ADDR_CYCLES
1171 | 0 << SPARE_SIZE_BYTES
);
1172 nandc_set_reg(nandc
, NAND_DEV0_CFG1
, 7 << NAND_RECOVERY_CYCLES
1173 | 0 << CS_ACTIVE_BSY
1174 | 17 << BAD_BLOCK_BYTE_NUM
1175 | 1 << BAD_BLOCK_IN_SPARE_AREA
1176 | 2 << WR_RD_BSY_GAP
1178 | 1 << DEV0_CFG1_ECC_DISABLE
);
1179 nandc_set_reg(nandc
, NAND_EBI2_ECC_BUF_CFG
, 1 << ECC_CFG_ECC_DISABLE
);
1181 /* configure CMD1 and VLD for ONFI param probing */
1182 nandc_set_reg(nandc
, NAND_DEV_CMD_VLD
,
1183 (nandc
->vld
& ~READ_START_VLD
));
1184 nandc_set_reg(nandc
, NAND_DEV_CMD1
,
1185 (nandc
->cmd1
& ~(0xFF << READ_ADDR
))
1186 | NAND_CMD_PARAM
<< READ_ADDR
);
1188 nandc_set_reg(nandc
, NAND_EXEC_CMD
, 1);
1190 nandc_set_reg(nandc
, NAND_DEV_CMD1_RESTORE
, nandc
->cmd1
);
1191 nandc_set_reg(nandc
, NAND_DEV_CMD_VLD_RESTORE
, nandc
->vld
);
1192 nandc_set_read_loc(nandc
, 0, 0, 512, 1);
1194 write_reg_dma(nandc
, NAND_DEV_CMD_VLD
, 1, 0);
1195 write_reg_dma(nandc
, NAND_DEV_CMD1
, 1, NAND_BAM_NEXT_SGL
);
1197 nandc
->buf_count
= 512;
1198 memset(nandc
->data_buffer
, 0xff, nandc
->buf_count
);
1200 config_nand_single_cw_page_read(nandc
, false);
1202 read_data_dma(nandc
, FLASH_BUF_ACC
, nandc
->data_buffer
,
1203 nandc
->buf_count
, 0);
1205 /* restore CMD1 and VLD regs */
1206 write_reg_dma(nandc
, NAND_DEV_CMD1_RESTORE
, 1, 0);
1207 write_reg_dma(nandc
, NAND_DEV_CMD_VLD_RESTORE
, 1, NAND_BAM_NEXT_SGL
);
1212 /* sets up descriptors for NAND_CMD_ERASE1 */
1213 static int erase_block(struct qcom_nand_host
*host
, int page_addr
)
1215 struct nand_chip
*chip
= &host
->chip
;
1216 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
1218 nandc_set_reg(nandc
, NAND_FLASH_CMD
,
1219 OP_BLOCK_ERASE
| PAGE_ACC
| LAST_PAGE
);
1220 nandc_set_reg(nandc
, NAND_ADDR0
, page_addr
);
1221 nandc_set_reg(nandc
, NAND_ADDR1
, 0);
1222 nandc_set_reg(nandc
, NAND_DEV0_CFG0
,
1223 host
->cfg0_raw
& ~(7 << CW_PER_PAGE
));
1224 nandc_set_reg(nandc
, NAND_DEV0_CFG1
, host
->cfg1_raw
);
1225 nandc_set_reg(nandc
, NAND_EXEC_CMD
, 1);
1226 nandc_set_reg(nandc
, NAND_FLASH_STATUS
, host
->clrflashstatus
);
1227 nandc_set_reg(nandc
, NAND_READ_STATUS
, host
->clrreadstatus
);
1229 write_reg_dma(nandc
, NAND_FLASH_CMD
, 3, NAND_BAM_NEXT_SGL
);
1230 write_reg_dma(nandc
, NAND_DEV0_CFG0
, 2, NAND_BAM_NEXT_SGL
);
1231 write_reg_dma(nandc
, NAND_EXEC_CMD
, 1, NAND_BAM_NEXT_SGL
);
1233 read_reg_dma(nandc
, NAND_FLASH_STATUS
, 1, NAND_BAM_NEXT_SGL
);
1235 write_reg_dma(nandc
, NAND_FLASH_STATUS
, 1, 0);
1236 write_reg_dma(nandc
, NAND_READ_STATUS
, 1, NAND_BAM_NEXT_SGL
);
1241 /* sets up descriptors for NAND_CMD_READID */
1242 static int read_id(struct qcom_nand_host
*host
, int column
)
1244 struct nand_chip
*chip
= &host
->chip
;
1245 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
1250 nandc_set_reg(nandc
, NAND_FLASH_CMD
, OP_FETCH_ID
);
1251 nandc_set_reg(nandc
, NAND_ADDR0
, column
);
1252 nandc_set_reg(nandc
, NAND_ADDR1
, 0);
1253 nandc_set_reg(nandc
, NAND_FLASH_CHIP_SELECT
,
1254 nandc
->props
->is_bam
? 0 : DM_EN
);
1255 nandc_set_reg(nandc
, NAND_EXEC_CMD
, 1);
1257 write_reg_dma(nandc
, NAND_FLASH_CMD
, 4, NAND_BAM_NEXT_SGL
);
1258 write_reg_dma(nandc
, NAND_EXEC_CMD
, 1, NAND_BAM_NEXT_SGL
);
1260 read_reg_dma(nandc
, NAND_READ_ID
, 1, NAND_BAM_NEXT_SGL
);
1265 /* sets up descriptors for NAND_CMD_RESET */
1266 static int reset(struct qcom_nand_host
*host
)
1268 struct nand_chip
*chip
= &host
->chip
;
1269 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
1271 nandc_set_reg(nandc
, NAND_FLASH_CMD
, OP_RESET_DEVICE
);
1272 nandc_set_reg(nandc
, NAND_EXEC_CMD
, 1);
1274 write_reg_dma(nandc
, NAND_FLASH_CMD
, 1, NAND_BAM_NEXT_SGL
);
1275 write_reg_dma(nandc
, NAND_EXEC_CMD
, 1, NAND_BAM_NEXT_SGL
);
1277 read_reg_dma(nandc
, NAND_FLASH_STATUS
, 1, NAND_BAM_NEXT_SGL
);
1282 /* helpers to submit/free our list of dma descriptors */
1283 static int submit_descs(struct qcom_nand_controller
*nandc
)
1285 struct desc_info
*desc
;
1286 dma_cookie_t cookie
= 0;
1287 struct bam_transaction
*bam_txn
= nandc
->bam_txn
;
1290 if (nandc
->props
->is_bam
) {
1291 if (bam_txn
->rx_sgl_pos
> bam_txn
->rx_sgl_start
) {
1292 r
= prepare_bam_async_desc(nandc
, nandc
->rx_chan
, 0);
1297 if (bam_txn
->tx_sgl_pos
> bam_txn
->tx_sgl_start
) {
1298 r
= prepare_bam_async_desc(nandc
, nandc
->tx_chan
,
1299 DMA_PREP_INTERRUPT
);
1304 if (bam_txn
->cmd_sgl_pos
> bam_txn
->cmd_sgl_start
) {
1305 r
= prepare_bam_async_desc(nandc
, nandc
->cmd_chan
,
1312 list_for_each_entry(desc
, &nandc
->desc_list
, node
)
1313 cookie
= dmaengine_submit(desc
->dma_desc
);
1315 if (nandc
->props
->is_bam
) {
1316 bam_txn
->last_cmd_desc
->callback
= qpic_bam_dma_done
;
1317 bam_txn
->last_cmd_desc
->callback_param
= bam_txn
;
1318 if (bam_txn
->last_data_desc
) {
1319 bam_txn
->last_data_desc
->callback
= qpic_bam_dma_done
;
1320 bam_txn
->last_data_desc
->callback_param
= bam_txn
;
1321 bam_txn
->wait_second_completion
= true;
1324 dma_async_issue_pending(nandc
->tx_chan
);
1325 dma_async_issue_pending(nandc
->rx_chan
);
1326 dma_async_issue_pending(nandc
->cmd_chan
);
1328 if (!wait_for_completion_timeout(&bam_txn
->txn_done
,
1329 QPIC_NAND_COMPLETION_TIMEOUT
))
1332 if (dma_sync_wait(nandc
->chan
, cookie
) != DMA_COMPLETE
)
1339 static void free_descs(struct qcom_nand_controller
*nandc
)
1341 struct desc_info
*desc
, *n
;
1343 list_for_each_entry_safe(desc
, n
, &nandc
->desc_list
, node
) {
1344 list_del(&desc
->node
);
1346 if (nandc
->props
->is_bam
)
1347 dma_unmap_sg(nandc
->dev
, desc
->bam_sgl
,
1348 desc
->sgl_cnt
, desc
->dir
);
1350 dma_unmap_sg(nandc
->dev
, &desc
->adm_sgl
, 1,
1357 /* reset the register read buffer for next NAND operation */
1358 static void clear_read_regs(struct qcom_nand_controller
*nandc
)
1360 nandc
->reg_read_pos
= 0;
1361 nandc_read_buffer_sync(nandc
, false);
1364 static void pre_command(struct qcom_nand_host
*host
, int command
)
1366 struct nand_chip
*chip
= &host
->chip
;
1367 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
1369 nandc
->buf_count
= 0;
1370 nandc
->buf_start
= 0;
1371 host
->use_ecc
= false;
1372 host
->last_command
= command
;
1374 clear_read_regs(nandc
);
1376 if (command
== NAND_CMD_RESET
|| command
== NAND_CMD_READID
||
1377 command
== NAND_CMD_PARAM
|| command
== NAND_CMD_ERASE1
)
1378 clear_bam_transaction(nandc
);
1382 * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
1383 * privately maintained status byte, this status byte can be read after
1384 * NAND_CMD_STATUS is called
1386 static void parse_erase_write_errors(struct qcom_nand_host
*host
, int command
)
1388 struct nand_chip
*chip
= &host
->chip
;
1389 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
1390 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
1394 num_cw
= command
== NAND_CMD_PAGEPROG
? ecc
->steps
: 1;
1395 nandc_read_buffer_sync(nandc
, true);
1397 for (i
= 0; i
< num_cw
; i
++) {
1398 u32 flash_status
= le32_to_cpu(nandc
->reg_read_buf
[i
]);
1400 if (flash_status
& FS_MPU_ERR
)
1401 host
->status
&= ~NAND_STATUS_WP
;
1403 if (flash_status
& FS_OP_ERR
|| (i
== (num_cw
- 1) &&
1405 FS_DEVICE_STS_ERR
)))
1406 host
->status
|= NAND_STATUS_FAIL
;
1410 static void post_command(struct qcom_nand_host
*host
, int command
)
1412 struct nand_chip
*chip
= &host
->chip
;
1413 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
1416 case NAND_CMD_READID
:
1417 nandc_read_buffer_sync(nandc
, true);
1418 memcpy(nandc
->data_buffer
, nandc
->reg_read_buf
,
1421 case NAND_CMD_PAGEPROG
:
1422 case NAND_CMD_ERASE1
:
1423 parse_erase_write_errors(host
, command
);
1431 * Implements chip->legacy.cmdfunc. It's only used for a limited set of
1432 * commands. The rest of the commands wouldn't be called by upper layers.
1433 * For example, NAND_CMD_READOOB would never be called because we have our own
1434 * versions of read_oob ops for nand_ecc_ctrl.
1436 static void qcom_nandc_command(struct nand_chip
*chip
, unsigned int command
,
1437 int column
, int page_addr
)
1439 struct qcom_nand_host
*host
= to_qcom_nand_host(chip
);
1440 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
1441 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
1445 pre_command(host
, command
);
1448 case NAND_CMD_RESET
:
1453 case NAND_CMD_READID
:
1454 nandc
->buf_count
= 4;
1455 ret
= read_id(host
, column
);
1459 case NAND_CMD_PARAM
:
1460 ret
= nandc_param(host
);
1464 case NAND_CMD_ERASE1
:
1465 ret
= erase_block(host
, page_addr
);
1469 case NAND_CMD_READ0
:
1470 /* we read the entire page for now */
1471 WARN_ON(column
!= 0);
1473 host
->use_ecc
= true;
1474 set_address(host
, 0, page_addr
);
1475 update_rw_regs(host
, ecc
->steps
, true);
1478 case NAND_CMD_SEQIN
:
1479 WARN_ON(column
!= 0);
1480 set_address(host
, 0, page_addr
);
1483 case NAND_CMD_PAGEPROG
:
1484 case NAND_CMD_STATUS
:
1491 dev_err(nandc
->dev
, "failure executing command %d\n",
1498 ret
= submit_descs(nandc
);
1501 "failure submitting descs for command %d\n",
1507 post_command(host
, command
);
1511 * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
1512 * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
1514 * when using RS ECC, the HW reports the same erros when reading an erased CW,
1515 * but it notifies that it is an erased CW by placing special characters at
1516 * certain offsets in the buffer.
1518 * verify if the page is erased or not, and fix up the page for RS ECC by
1519 * replacing the special characters with 0xff.
1521 static bool erased_chunk_check_and_fixup(u8
*data_buf
, int data_len
)
1526 * an erased page flags an error in NAND_FLASH_STATUS, check if the page
1527 * is erased by looking for 0x54s at offsets 3 and 175 from the
1528 * beginning of each codeword
1531 empty1
= data_buf
[3];
1532 empty2
= data_buf
[175];
1535 * if the erased codework markers, if they exist override them with
1538 if ((empty1
== 0x54 && empty2
== 0xff) ||
1539 (empty1
== 0xff && empty2
== 0x54)) {
1541 data_buf
[175] = 0xff;
1545 * check if the entire chunk contains 0xffs or not. if it doesn't, then
1546 * restore the original values at the special offsets
1548 if (memchr_inv(data_buf
, 0xff, data_len
)) {
1549 data_buf
[3] = empty1
;
1550 data_buf
[175] = empty2
;
1564 /* reads back FLASH_STATUS register set by the controller */
1565 static int check_flash_errors(struct qcom_nand_host
*host
, int cw_cnt
)
1567 struct nand_chip
*chip
= &host
->chip
;
1568 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
1571 for (i
= 0; i
< cw_cnt
; i
++) {
1572 u32 flash
= le32_to_cpu(nandc
->reg_read_buf
[i
]);
1574 if (flash
& (FS_OP_ERR
| FS_MPU_ERR
))
1581 /* performs raw read for one codeword */
1583 qcom_nandc_read_cw_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1584 u8
*data_buf
, u8
*oob_buf
, int page
, int cw
)
1586 struct qcom_nand_host
*host
= to_qcom_nand_host(chip
);
1587 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
1588 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
1589 int data_size1
, data_size2
, oob_size1
, oob_size2
;
1590 int ret
, reg_off
= FLASH_BUF_ACC
, read_loc
= 0;
1592 nand_read_page_op(chip
, page
, 0, NULL
, 0);
1593 host
->use_ecc
= false;
1595 clear_bam_transaction(nandc
);
1596 set_address(host
, host
->cw_size
* cw
, page
);
1597 update_rw_regs(host
, 1, true);
1598 config_nand_page_read(nandc
);
1600 data_size1
= mtd
->writesize
- host
->cw_size
* (ecc
->steps
- 1);
1601 oob_size1
= host
->bbm_size
;
1603 if (cw
== (ecc
->steps
- 1)) {
1604 data_size2
= ecc
->size
- data_size1
-
1605 ((ecc
->steps
- 1) * 4);
1606 oob_size2
= (ecc
->steps
* 4) + host
->ecc_bytes_hw
+
1609 data_size2
= host
->cw_data
- data_size1
;
1610 oob_size2
= host
->ecc_bytes_hw
+ host
->spare_bytes
;
1613 if (nandc
->props
->is_bam
) {
1614 nandc_set_read_loc(nandc
, 0, read_loc
, data_size1
, 0);
1615 read_loc
+= data_size1
;
1617 nandc_set_read_loc(nandc
, 1, read_loc
, oob_size1
, 0);
1618 read_loc
+= oob_size1
;
1620 nandc_set_read_loc(nandc
, 2, read_loc
, data_size2
, 0);
1621 read_loc
+= data_size2
;
1623 nandc_set_read_loc(nandc
, 3, read_loc
, oob_size2
, 1);
1626 config_nand_cw_read(nandc
, false);
1628 read_data_dma(nandc
, reg_off
, data_buf
, data_size1
, 0);
1629 reg_off
+= data_size1
;
1631 read_data_dma(nandc
, reg_off
, oob_buf
, oob_size1
, 0);
1632 reg_off
+= oob_size1
;
1634 read_data_dma(nandc
, reg_off
, data_buf
+ data_size1
, data_size2
, 0);
1635 reg_off
+= data_size2
;
1637 read_data_dma(nandc
, reg_off
, oob_buf
+ oob_size1
, oob_size2
, 0);
1639 ret
= submit_descs(nandc
);
1642 dev_err(nandc
->dev
, "failure to read raw cw %d\n", cw
);
1646 return check_flash_errors(host
, 1);
1650 * Bitflips can happen in erased codewords also so this function counts the
1651 * number of 0 in each CW for which ECC engine returns the uncorrectable
1652 * error. The page will be assumed as erased if this count is less than or
1653 * equal to the ecc->strength for each CW.
1655 * 1. Both DATA and OOB need to be checked for number of 0. The
1656 * top-level API can be called with only data buf or OOB buf so use
1657 * chip->data_buf if data buf is null and chip->oob_poi if oob buf
1658 * is null for copying the raw bytes.
1659 * 2. Perform raw read for all the CW which has uncorrectable errors.
1660 * 3. For each CW, check the number of 0 in cw_data and usable OOB bytes.
1661 * The BBM and spare bytes bit flip won’t affect the ECC so don’t check
1662 * the number of bitflips in this area.
1665 check_for_erased_page(struct qcom_nand_host
*host
, u8
*data_buf
,
1666 u8
*oob_buf
, unsigned long uncorrectable_cws
,
1667 int page
, unsigned int max_bitflips
)
1669 struct nand_chip
*chip
= &host
->chip
;
1670 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1671 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
1672 u8
*cw_data_buf
, *cw_oob_buf
;
1673 int cw
, data_size
, oob_size
, ret
= 0;
1676 data_buf
= nand_get_data_buf(chip
);
1679 nand_get_data_buf(chip
);
1680 oob_buf
= chip
->oob_poi
;
1683 for_each_set_bit(cw
, &uncorrectable_cws
, ecc
->steps
) {
1684 if (cw
== (ecc
->steps
- 1)) {
1685 data_size
= ecc
->size
- ((ecc
->steps
- 1) * 4);
1686 oob_size
= (ecc
->steps
* 4) + host
->ecc_bytes_hw
;
1688 data_size
= host
->cw_data
;
1689 oob_size
= host
->ecc_bytes_hw
;
1692 /* determine starting buffer address for current CW */
1693 cw_data_buf
= data_buf
+ (cw
* host
->cw_data
);
1694 cw_oob_buf
= oob_buf
+ (cw
* ecc
->bytes
);
1696 ret
= qcom_nandc_read_cw_raw(mtd
, chip
, cw_data_buf
,
1697 cw_oob_buf
, page
, cw
);
1702 * make sure it isn't an erased page reported
1703 * as not-erased by HW because of a few bitflips
1705 ret
= nand_check_erased_ecc_chunk(cw_data_buf
, data_size
,
1706 cw_oob_buf
+ host
->bbm_size
,
1710 mtd
->ecc_stats
.failed
++;
1712 mtd
->ecc_stats
.corrected
+= ret
;
1713 max_bitflips
= max_t(unsigned int, max_bitflips
, ret
);
1717 return max_bitflips
;
1721 * reads back status registers set by the controller to notify page read
1722 * errors. this is equivalent to what 'ecc->correct()' would do.
1724 static int parse_read_errors(struct qcom_nand_host
*host
, u8
*data_buf
,
1725 u8
*oob_buf
, int page
)
1727 struct nand_chip
*chip
= &host
->chip
;
1728 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
1729 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1730 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
1731 unsigned int max_bitflips
= 0, uncorrectable_cws
= 0;
1732 struct read_stats
*buf
;
1733 bool flash_op_err
= false, erased
;
1735 u8
*data_buf_start
= data_buf
, *oob_buf_start
= oob_buf
;
1737 buf
= (struct read_stats
*)nandc
->reg_read_buf
;
1738 nandc_read_buffer_sync(nandc
, true);
1740 for (i
= 0; i
< ecc
->steps
; i
++, buf
++) {
1741 u32 flash
, buffer
, erased_cw
;
1742 int data_len
, oob_len
;
1744 if (i
== (ecc
->steps
- 1)) {
1745 data_len
= ecc
->size
- ((ecc
->steps
- 1) << 2);
1746 oob_len
= ecc
->steps
<< 2;
1748 data_len
= host
->cw_data
;
1752 flash
= le32_to_cpu(buf
->flash
);
1753 buffer
= le32_to_cpu(buf
->buffer
);
1754 erased_cw
= le32_to_cpu(buf
->erased_cw
);
1757 * Check ECC failure for each codeword. ECC failure can
1758 * happen in either of the following conditions
1759 * 1. If number of bitflips are greater than ECC engine
1761 * 2. If this codeword contains all 0xff for which erased
1762 * codeword detection check will be done.
1764 if ((flash
& FS_OP_ERR
) && (buffer
& BS_UNCORRECTABLE_BIT
)) {
1766 * For BCH ECC, ignore erased codeword errors, if
1767 * ERASED_CW bits are set.
1769 if (host
->bch_enabled
) {
1770 erased
= (erased_cw
& ERASED_CW
) == ERASED_CW
?
1773 * For RS ECC, HW reports the erased CW by placing
1774 * special characters at certain offsets in the buffer.
1775 * These special characters will be valid only if
1776 * complete page is read i.e. data_buf is not NULL.
1778 } else if (data_buf
) {
1779 erased
= erased_chunk_check_and_fixup(data_buf
,
1786 uncorrectable_cws
|= BIT(i
);
1788 * Check if MPU or any other operational error (timeout,
1789 * device failure, etc.) happened for this codeword and
1790 * make flash_op_err true. If flash_op_err is set, then
1791 * EIO will be returned for page read.
1793 } else if (flash
& (FS_OP_ERR
| FS_MPU_ERR
)) {
1794 flash_op_err
= true;
1796 * No ECC or operational errors happened. Check the number of
1797 * bits corrected and update the ecc_stats.corrected.
1802 stat
= buffer
& BS_CORRECTABLE_ERR_MSK
;
1803 mtd
->ecc_stats
.corrected
+= stat
;
1804 max_bitflips
= max(max_bitflips
, stat
);
1808 data_buf
+= data_len
;
1810 oob_buf
+= oob_len
+ ecc
->bytes
;
1816 if (!uncorrectable_cws
)
1817 return max_bitflips
;
1819 return check_for_erased_page(host
, data_buf_start
, oob_buf_start
,
1820 uncorrectable_cws
, page
,
1825 * helper to perform the actual page read operation, used by ecc->read_page(),
1828 static int read_page_ecc(struct qcom_nand_host
*host
, u8
*data_buf
,
1829 u8
*oob_buf
, int page
)
1831 struct nand_chip
*chip
= &host
->chip
;
1832 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
1833 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
1834 u8
*data_buf_start
= data_buf
, *oob_buf_start
= oob_buf
;
1837 config_nand_page_read(nandc
);
1839 /* queue cmd descs for each codeword */
1840 for (i
= 0; i
< ecc
->steps
; i
++) {
1841 int data_size
, oob_size
;
1843 if (i
== (ecc
->steps
- 1)) {
1844 data_size
= ecc
->size
- ((ecc
->steps
- 1) << 2);
1845 oob_size
= (ecc
->steps
<< 2) + host
->ecc_bytes_hw
+
1848 data_size
= host
->cw_data
;
1849 oob_size
= host
->ecc_bytes_hw
+ host
->spare_bytes
;
1852 if (nandc
->props
->is_bam
) {
1853 if (data_buf
&& oob_buf
) {
1854 nandc_set_read_loc(nandc
, 0, 0, data_size
, 0);
1855 nandc_set_read_loc(nandc
, 1, data_size
,
1857 } else if (data_buf
) {
1858 nandc_set_read_loc(nandc
, 0, 0, data_size
, 1);
1860 nandc_set_read_loc(nandc
, 0, data_size
,
1865 config_nand_cw_read(nandc
, true);
1868 read_data_dma(nandc
, FLASH_BUF_ACC
, data_buf
,
1872 * when ecc is enabled, the controller doesn't read the real
1873 * or dummy bad block markers in each chunk. To maintain a
1874 * consistent layout across RAW and ECC reads, we just
1875 * leave the real/dummy BBM offsets empty (i.e, filled with
1881 for (j
= 0; j
< host
->bbm_size
; j
++)
1884 read_data_dma(nandc
, FLASH_BUF_ACC
+ data_size
,
1885 oob_buf
, oob_size
, 0);
1889 data_buf
+= data_size
;
1891 oob_buf
+= oob_size
;
1894 ret
= submit_descs(nandc
);
1898 dev_err(nandc
->dev
, "failure to read page/oob\n");
1902 return parse_read_errors(host
, data_buf_start
, oob_buf_start
, page
);
1906 * a helper that copies the last step/codeword of a page (containing free oob)
1907 * into our local buffer
1909 static int copy_last_cw(struct qcom_nand_host
*host
, int page
)
1911 struct nand_chip
*chip
= &host
->chip
;
1912 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
1913 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
1917 clear_read_regs(nandc
);
1919 size
= host
->use_ecc
? host
->cw_data
: host
->cw_size
;
1921 /* prepare a clean read buffer */
1922 memset(nandc
->data_buffer
, 0xff, size
);
1924 set_address(host
, host
->cw_size
* (ecc
->steps
- 1), page
);
1925 update_rw_regs(host
, 1, true);
1927 config_nand_single_cw_page_read(nandc
, host
->use_ecc
);
1929 read_data_dma(nandc
, FLASH_BUF_ACC
, nandc
->data_buffer
, size
, 0);
1931 ret
= submit_descs(nandc
);
1933 dev_err(nandc
->dev
, "failed to copy last codeword\n");
1940 /* implements ecc->read_page() */
1941 static int qcom_nandc_read_page(struct nand_chip
*chip
, uint8_t *buf
,
1942 int oob_required
, int page
)
1944 struct qcom_nand_host
*host
= to_qcom_nand_host(chip
);
1945 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
1946 u8
*data_buf
, *oob_buf
= NULL
;
1948 nand_read_page_op(chip
, page
, 0, NULL
, 0);
1950 oob_buf
= oob_required
? chip
->oob_poi
: NULL
;
1952 clear_bam_transaction(nandc
);
1954 return read_page_ecc(host
, data_buf
, oob_buf
, page
);
1957 /* implements ecc->read_page_raw() */
1958 static int qcom_nandc_read_page_raw(struct nand_chip
*chip
, uint8_t *buf
,
1959 int oob_required
, int page
)
1961 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1962 struct qcom_nand_host
*host
= to_qcom_nand_host(chip
);
1963 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
1965 u8
*data_buf
= buf
, *oob_buf
= chip
->oob_poi
;
1967 for (cw
= 0; cw
< ecc
->steps
; cw
++) {
1968 ret
= qcom_nandc_read_cw_raw(mtd
, chip
, data_buf
, oob_buf
,
1973 data_buf
+= host
->cw_data
;
1974 oob_buf
+= ecc
->bytes
;
1980 /* implements ecc->read_oob() */
1981 static int qcom_nandc_read_oob(struct nand_chip
*chip
, int page
)
1983 struct qcom_nand_host
*host
= to_qcom_nand_host(chip
);
1984 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
1985 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
1987 clear_read_regs(nandc
);
1988 clear_bam_transaction(nandc
);
1990 host
->use_ecc
= true;
1991 set_address(host
, 0, page
);
1992 update_rw_regs(host
, ecc
->steps
, true);
1994 return read_page_ecc(host
, NULL
, chip
->oob_poi
, page
);
1997 /* implements ecc->write_page() */
1998 static int qcom_nandc_write_page(struct nand_chip
*chip
, const uint8_t *buf
,
1999 int oob_required
, int page
)
2001 struct qcom_nand_host
*host
= to_qcom_nand_host(chip
);
2002 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
2003 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
2004 u8
*data_buf
, *oob_buf
;
2007 nand_prog_page_begin_op(chip
, page
, 0, NULL
, 0);
2009 clear_read_regs(nandc
);
2010 clear_bam_transaction(nandc
);
2012 data_buf
= (u8
*)buf
;
2013 oob_buf
= chip
->oob_poi
;
2015 host
->use_ecc
= true;
2016 update_rw_regs(host
, ecc
->steps
, false);
2017 config_nand_page_write(nandc
);
2019 for (i
= 0; i
< ecc
->steps
; i
++) {
2020 int data_size
, oob_size
;
2022 if (i
== (ecc
->steps
- 1)) {
2023 data_size
= ecc
->size
- ((ecc
->steps
- 1) << 2);
2024 oob_size
= (ecc
->steps
<< 2) + host
->ecc_bytes_hw
+
2027 data_size
= host
->cw_data
;
2028 oob_size
= ecc
->bytes
;
2032 write_data_dma(nandc
, FLASH_BUF_ACC
, data_buf
, data_size
,
2033 i
== (ecc
->steps
- 1) ? NAND_BAM_NO_EOT
: 0);
2036 * when ECC is enabled, we don't really need to write anything
2037 * to oob for the first n - 1 codewords since these oob regions
2038 * just contain ECC bytes that's written by the controller
2039 * itself. For the last codeword, we skip the bbm positions and
2040 * write to the free oob area.
2042 if (i
== (ecc
->steps
- 1)) {
2043 oob_buf
+= host
->bbm_size
;
2045 write_data_dma(nandc
, FLASH_BUF_ACC
+ data_size
,
2046 oob_buf
, oob_size
, 0);
2049 config_nand_cw_write(nandc
);
2051 data_buf
+= data_size
;
2052 oob_buf
+= oob_size
;
2055 ret
= submit_descs(nandc
);
2057 dev_err(nandc
->dev
, "failure to write page\n");
2062 ret
= nand_prog_page_end_op(chip
);
2067 /* implements ecc->write_page_raw() */
2068 static int qcom_nandc_write_page_raw(struct nand_chip
*chip
,
2069 const uint8_t *buf
, int oob_required
,
2072 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2073 struct qcom_nand_host
*host
= to_qcom_nand_host(chip
);
2074 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
2075 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
2076 u8
*data_buf
, *oob_buf
;
2079 nand_prog_page_begin_op(chip
, page
, 0, NULL
, 0);
2080 clear_read_regs(nandc
);
2081 clear_bam_transaction(nandc
);
2083 data_buf
= (u8
*)buf
;
2084 oob_buf
= chip
->oob_poi
;
2086 host
->use_ecc
= false;
2087 update_rw_regs(host
, ecc
->steps
, false);
2088 config_nand_page_write(nandc
);
2090 for (i
= 0; i
< ecc
->steps
; i
++) {
2091 int data_size1
, data_size2
, oob_size1
, oob_size2
;
2092 int reg_off
= FLASH_BUF_ACC
;
2094 data_size1
= mtd
->writesize
- host
->cw_size
* (ecc
->steps
- 1);
2095 oob_size1
= host
->bbm_size
;
2097 if (i
== (ecc
->steps
- 1)) {
2098 data_size2
= ecc
->size
- data_size1
-
2099 ((ecc
->steps
- 1) << 2);
2100 oob_size2
= (ecc
->steps
<< 2) + host
->ecc_bytes_hw
+
2103 data_size2
= host
->cw_data
- data_size1
;
2104 oob_size2
= host
->ecc_bytes_hw
+ host
->spare_bytes
;
2107 write_data_dma(nandc
, reg_off
, data_buf
, data_size1
,
2109 reg_off
+= data_size1
;
2110 data_buf
+= data_size1
;
2112 write_data_dma(nandc
, reg_off
, oob_buf
, oob_size1
,
2114 reg_off
+= oob_size1
;
2115 oob_buf
+= oob_size1
;
2117 write_data_dma(nandc
, reg_off
, data_buf
, data_size2
,
2119 reg_off
+= data_size2
;
2120 data_buf
+= data_size2
;
2122 write_data_dma(nandc
, reg_off
, oob_buf
, oob_size2
, 0);
2123 oob_buf
+= oob_size2
;
2125 config_nand_cw_write(nandc
);
2128 ret
= submit_descs(nandc
);
2130 dev_err(nandc
->dev
, "failure to write raw page\n");
2135 ret
= nand_prog_page_end_op(chip
);
2141 * implements ecc->write_oob()
2143 * the NAND controller cannot write only data or only OOB within a codeword
2144 * since ECC is calculated for the combined codeword. So update the OOB from
2145 * chip->oob_poi, and pad the data area with OxFF before writing.
2147 static int qcom_nandc_write_oob(struct nand_chip
*chip
, int page
)
2149 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2150 struct qcom_nand_host
*host
= to_qcom_nand_host(chip
);
2151 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
2152 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
2153 u8
*oob
= chip
->oob_poi
;
2154 int data_size
, oob_size
;
2157 host
->use_ecc
= true;
2158 clear_bam_transaction(nandc
);
2160 /* calculate the data and oob size for the last codeword/step */
2161 data_size
= ecc
->size
- ((ecc
->steps
- 1) << 2);
2162 oob_size
= mtd
->oobavail
;
2164 memset(nandc
->data_buffer
, 0xff, host
->cw_data
);
2165 /* override new oob content to last codeword */
2166 mtd_ooblayout_get_databytes(mtd
, nandc
->data_buffer
+ data_size
, oob
,
2169 set_address(host
, host
->cw_size
* (ecc
->steps
- 1), page
);
2170 update_rw_regs(host
, 1, false);
2172 config_nand_page_write(nandc
);
2173 write_data_dma(nandc
, FLASH_BUF_ACC
,
2174 nandc
->data_buffer
, data_size
+ oob_size
, 0);
2175 config_nand_cw_write(nandc
);
2177 ret
= submit_descs(nandc
);
2182 dev_err(nandc
->dev
, "failure to write oob\n");
2186 return nand_prog_page_end_op(chip
);
2189 static int qcom_nandc_block_bad(struct nand_chip
*chip
, loff_t ofs
)
2191 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2192 struct qcom_nand_host
*host
= to_qcom_nand_host(chip
);
2193 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
2194 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
2195 int page
, ret
, bbpos
, bad
= 0;
2197 page
= (int)(ofs
>> chip
->page_shift
) & chip
->pagemask
;
2200 * configure registers for a raw sub page read, the address is set to
2201 * the beginning of the last codeword, we don't care about reading ecc
2202 * portion of oob. we just want the first few bytes from this codeword
2203 * that contains the BBM
2205 host
->use_ecc
= false;
2207 clear_bam_transaction(nandc
);
2208 ret
= copy_last_cw(host
, page
);
2212 if (check_flash_errors(host
, 1)) {
2213 dev_warn(nandc
->dev
, "error when trying to read BBM\n");
2217 bbpos
= mtd
->writesize
- host
->cw_size
* (ecc
->steps
- 1);
2219 bad
= nandc
->data_buffer
[bbpos
] != 0xff;
2221 if (chip
->options
& NAND_BUSWIDTH_16
)
2222 bad
= bad
|| (nandc
->data_buffer
[bbpos
+ 1] != 0xff);
2227 static int qcom_nandc_block_markbad(struct nand_chip
*chip
, loff_t ofs
)
2229 struct qcom_nand_host
*host
= to_qcom_nand_host(chip
);
2230 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
2231 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
2234 clear_read_regs(nandc
);
2235 clear_bam_transaction(nandc
);
2238 * to mark the BBM as bad, we flash the entire last codeword with 0s.
2239 * we don't care about the rest of the content in the codeword since
2240 * we aren't going to use this block again
2242 memset(nandc
->data_buffer
, 0x00, host
->cw_size
);
2244 page
= (int)(ofs
>> chip
->page_shift
) & chip
->pagemask
;
2247 host
->use_ecc
= false;
2248 set_address(host
, host
->cw_size
* (ecc
->steps
- 1), page
);
2249 update_rw_regs(host
, 1, false);
2251 config_nand_page_write(nandc
);
2252 write_data_dma(nandc
, FLASH_BUF_ACC
,
2253 nandc
->data_buffer
, host
->cw_size
, 0);
2254 config_nand_cw_write(nandc
);
2256 ret
= submit_descs(nandc
);
2261 dev_err(nandc
->dev
, "failure to update BBM\n");
2265 return nand_prog_page_end_op(chip
);
2269 * the three functions below implement chip->legacy.read_byte(),
2270 * chip->legacy.read_buf() and chip->legacy.write_buf() respectively. these
2271 * aren't used for reading/writing page data, they are used for smaller data
2272 * like reading id, status etc
2274 static uint8_t qcom_nandc_read_byte(struct nand_chip
*chip
)
2276 struct qcom_nand_host
*host
= to_qcom_nand_host(chip
);
2277 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
2278 u8
*buf
= nandc
->data_buffer
;
2281 if (host
->last_command
== NAND_CMD_STATUS
) {
2284 host
->status
= NAND_STATUS_READY
| NAND_STATUS_WP
;
2289 if (nandc
->buf_start
< nandc
->buf_count
)
2290 ret
= buf
[nandc
->buf_start
++];
2295 static void qcom_nandc_read_buf(struct nand_chip
*chip
, uint8_t *buf
, int len
)
2297 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
2298 int real_len
= min_t(size_t, len
, nandc
->buf_count
- nandc
->buf_start
);
2300 memcpy(buf
, nandc
->data_buffer
+ nandc
->buf_start
, real_len
);
2301 nandc
->buf_start
+= real_len
;
2304 static void qcom_nandc_write_buf(struct nand_chip
*chip
, const uint8_t *buf
,
2307 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
2308 int real_len
= min_t(size_t, len
, nandc
->buf_count
- nandc
->buf_start
);
2310 memcpy(nandc
->data_buffer
+ nandc
->buf_start
, buf
, real_len
);
2312 nandc
->buf_start
+= real_len
;
2315 /* we support only one external chip for now */
2316 static void qcom_nandc_select_chip(struct nand_chip
*chip
, int chipnr
)
2318 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
2323 dev_warn(nandc
->dev
, "invalid chip select\n");
2327 * NAND controller page layout info
2329 * Layout with ECC enabled:
2331 * |----------------------| |---------------------------------|
2332 * | xx.......yy| | *********xx.......yy|
2333 * | DATA xx..ECC..yy| | DATA **SPARE**xx..ECC..yy|
2334 * | (516) xx.......yy| | (516-n*4) **(n*4)**xx.......yy|
2335 * | xx.......yy| | *********xx.......yy|
2336 * |----------------------| |---------------------------------|
2337 * codeword 1,2..n-1 codeword n
2338 * <---(528/532 Bytes)--> <-------(528/532 Bytes)--------->
2340 * n = Number of codewords in the page
2342 * * = Spare/free bytes
2343 * x = Unused byte(s)
2344 * y = Reserved byte(s)
2346 * 2K page: n = 4, spare = 16 bytes
2347 * 4K page: n = 8, spare = 32 bytes
2348 * 8K page: n = 16, spare = 64 bytes
2350 * the qcom nand controller operates at a sub page/codeword level. each
2351 * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
2352 * the number of ECC bytes vary based on the ECC strength and the bus width.
2354 * the first n - 1 codewords contains 516 bytes of user data, the remaining
2355 * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
2356 * both user data and spare(oobavail) bytes that sum up to 516 bytes.
2358 * When we access a page with ECC enabled, the reserved bytes(s) are not
2359 * accessible at all. When reading, we fill up these unreadable positions
2360 * with 0xffs. When writing, the controller skips writing the inaccessible
2363 * Layout with ECC disabled:
2365 * |------------------------------| |---------------------------------------|
2366 * | yy xx.......| | bb *********xx.......|
2367 * | DATA1 yy DATA2 xx..ECC..| | DATA1 bb DATA2 **SPARE**xx..ECC..|
2368 * | (size1) yy (size2) xx.......| | (size1) bb (size2) **(n*4)**xx.......|
2369 * | yy xx.......| | bb *********xx.......|
2370 * |------------------------------| |---------------------------------------|
2371 * codeword 1,2..n-1 codeword n
2372 * <-------(528/532 Bytes)------> <-----------(528/532 Bytes)----------->
2374 * n = Number of codewords in the page
2376 * * = Spare/free bytes
2377 * x = Unused byte(s)
2378 * y = Dummy Bad Bock byte(s)
2379 * b = Real Bad Block byte(s)
2380 * size1/size2 = function of codeword size and 'n'
2382 * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
2383 * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
2384 * Block Markers. In the last codeword, this position contains the real BBM
2386 * In order to have a consistent layout between RAW and ECC modes, we assume
2387 * the following OOB layout arrangement:
2389 * |-----------| |--------------------|
2390 * |yyxx.......| |bb*********xx.......|
2391 * |yyxx..ECC..| |bb*FREEOOB*xx..ECC..|
2392 * |yyxx.......| |bb*********xx.......|
2393 * |yyxx.......| |bb*********xx.......|
2394 * |-----------| |--------------------|
2395 * first n - 1 nth OOB region
2398 * n = Number of codewords in the page
2400 * * = FREE OOB bytes
2401 * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
2402 * x = Unused byte(s)
2403 * b = Real bad block byte(s) (inaccessible when ECC enabled)
2405 * This layout is read as is when ECC is disabled. When ECC is enabled, the
2406 * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
2407 * and assumed as 0xffs when we read a page/oob. The ECC, unused and
2408 * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
2409 * the sum of the three).
2411 static int qcom_nand_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
2412 struct mtd_oob_region
*oobregion
)
2414 struct nand_chip
*chip
= mtd_to_nand(mtd
);
2415 struct qcom_nand_host
*host
= to_qcom_nand_host(chip
);
2416 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
2422 oobregion
->length
= (ecc
->bytes
* (ecc
->steps
- 1)) +
2424 oobregion
->offset
= 0;
2426 oobregion
->length
= host
->ecc_bytes_hw
+ host
->spare_bytes
;
2427 oobregion
->offset
= mtd
->oobsize
- oobregion
->length
;
2433 static int qcom_nand_ooblayout_free(struct mtd_info
*mtd
, int section
,
2434 struct mtd_oob_region
*oobregion
)
2436 struct nand_chip
*chip
= mtd_to_nand(mtd
);
2437 struct qcom_nand_host
*host
= to_qcom_nand_host(chip
);
2438 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
2443 oobregion
->length
= ecc
->steps
* 4;
2444 oobregion
->offset
= ((ecc
->steps
- 1) * ecc
->bytes
) + host
->bbm_size
;
2449 static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops
= {
2450 .ecc
= qcom_nand_ooblayout_ecc
,
2451 .free
= qcom_nand_ooblayout_free
,
2455 qcom_nandc_calc_ecc_bytes(int step_size
, int strength
)
2457 return strength
== 4 ? 12 : 16;
2459 NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps
, qcom_nandc_calc_ecc_bytes
,
2460 NANDC_STEP_SIZE
, 4, 8);
2462 static int qcom_nand_attach_chip(struct nand_chip
*chip
)
2464 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2465 struct qcom_nand_host
*host
= to_qcom_nand_host(chip
);
2466 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
2467 struct qcom_nand_controller
*nandc
= get_qcom_nand_controller(chip
);
2468 int cwperpage
, bad_block_byte
, ret
;
2472 /* controller only supports 512 bytes data steps */
2473 ecc
->size
= NANDC_STEP_SIZE
;
2474 wide_bus
= chip
->options
& NAND_BUSWIDTH_16
? true : false;
2475 cwperpage
= mtd
->writesize
/ NANDC_STEP_SIZE
;
2478 * Each CW has 4 available OOB bytes which will be protected with ECC
2479 * so remaining bytes can be used for ECC.
2481 ret
= nand_ecc_choose_conf(chip
, &qcom_nandc_ecc_caps
,
2482 mtd
->oobsize
- (cwperpage
* 4));
2484 dev_err(nandc
->dev
, "No valid ECC settings possible\n");
2488 if (ecc
->strength
>= 8) {
2489 /* 8 bit ECC defaults to BCH ECC on all platforms */
2490 host
->bch_enabled
= true;
2494 host
->ecc_bytes_hw
= 14;
2495 host
->spare_bytes
= 0;
2498 host
->ecc_bytes_hw
= 13;
2499 host
->spare_bytes
= 2;
2504 * if the controller supports BCH for 4 bit ECC, the controller
2505 * uses lesser bytes for ECC. If RS is used, the ECC bytes is
2508 if (nandc
->props
->ecc_modes
& ECC_BCH_4BIT
) {
2510 host
->bch_enabled
= true;
2514 host
->ecc_bytes_hw
= 8;
2515 host
->spare_bytes
= 2;
2518 host
->ecc_bytes_hw
= 7;
2519 host
->spare_bytes
= 4;
2524 host
->ecc_bytes_hw
= 10;
2527 host
->spare_bytes
= 0;
2530 host
->spare_bytes
= 1;
2537 * we consider ecc->bytes as the sum of all the non-data content in a
2538 * step. It gives us a clean representation of the oob area (even if
2539 * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
2540 * ECC and 12 bytes for 4 bit ECC
2542 ecc
->bytes
= host
->ecc_bytes_hw
+ host
->spare_bytes
+ host
->bbm_size
;
2544 ecc
->read_page
= qcom_nandc_read_page
;
2545 ecc
->read_page_raw
= qcom_nandc_read_page_raw
;
2546 ecc
->read_oob
= qcom_nandc_read_oob
;
2547 ecc
->write_page
= qcom_nandc_write_page
;
2548 ecc
->write_page_raw
= qcom_nandc_write_page_raw
;
2549 ecc
->write_oob
= qcom_nandc_write_oob
;
2551 ecc
->mode
= NAND_ECC_HW
;
2553 mtd_set_ooblayout(mtd
, &qcom_nand_ooblayout_ops
);
2555 nandc
->max_cwperpage
= max_t(unsigned int, nandc
->max_cwperpage
,
2559 * DATA_UD_BYTES varies based on whether the read/write command protects
2560 * spare data with ECC too. We protect spare data by default, so we set
2561 * it to main + spare data, which are 512 and 4 bytes respectively.
2563 host
->cw_data
= 516;
2566 * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
2569 host
->cw_size
= host
->cw_data
+ ecc
->bytes
;
2570 bad_block_byte
= mtd
->writesize
- host
->cw_size
* (cwperpage
- 1) + 1;
2572 host
->cfg0
= (cwperpage
- 1) << CW_PER_PAGE
2573 | host
->cw_data
<< UD_SIZE_BYTES
2574 | 0 << DISABLE_STATUS_AFTER_WRITE
2575 | 5 << NUM_ADDR_CYCLES
2576 | host
->ecc_bytes_hw
<< ECC_PARITY_SIZE_BYTES_RS
2577 | 0 << STATUS_BFR_READ
2578 | 1 << SET_RD_MODE_AFTER_STATUS
2579 | host
->spare_bytes
<< SPARE_SIZE_BYTES
;
2581 host
->cfg1
= 7 << NAND_RECOVERY_CYCLES
2582 | 0 << CS_ACTIVE_BSY
2583 | bad_block_byte
<< BAD_BLOCK_BYTE_NUM
2584 | 0 << BAD_BLOCK_IN_SPARE_AREA
2585 | 2 << WR_RD_BSY_GAP
2586 | wide_bus
<< WIDE_FLASH
2587 | host
->bch_enabled
<< ENABLE_BCH_ECC
;
2589 host
->cfg0_raw
= (cwperpage
- 1) << CW_PER_PAGE
2590 | host
->cw_size
<< UD_SIZE_BYTES
2591 | 5 << NUM_ADDR_CYCLES
2592 | 0 << SPARE_SIZE_BYTES
;
2594 host
->cfg1_raw
= 7 << NAND_RECOVERY_CYCLES
2595 | 0 << CS_ACTIVE_BSY
2596 | 17 << BAD_BLOCK_BYTE_NUM
2597 | 1 << BAD_BLOCK_IN_SPARE_AREA
2598 | 2 << WR_RD_BSY_GAP
2599 | wide_bus
<< WIDE_FLASH
2600 | 1 << DEV0_CFG1_ECC_DISABLE
;
2602 host
->ecc_bch_cfg
= !host
->bch_enabled
<< ECC_CFG_ECC_DISABLE
2604 | host
->cw_data
<< ECC_NUM_DATA_BYTES
2605 | 1 << ECC_FORCE_CLK_OPEN
2606 | ecc_mode
<< ECC_MODE
2607 | host
->ecc_bytes_hw
<< ECC_PARITY_SIZE_BYTES_BCH
;
2609 host
->ecc_buf_cfg
= 0x203 << NUM_STEPS
;
2611 host
->clrflashstatus
= FS_READY_BSY_N
;
2612 host
->clrreadstatus
= 0xc0;
2613 nandc
->regs
->erased_cw_detect_cfg_clr
=
2614 cpu_to_le32(CLR_ERASED_PAGE_DET
);
2615 nandc
->regs
->erased_cw_detect_cfg_set
=
2616 cpu_to_le32(SET_ERASED_PAGE_DET
);
2619 "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
2620 host
->cfg0
, host
->cfg1
, host
->ecc_buf_cfg
, host
->ecc_bch_cfg
,
2621 host
->cw_size
, host
->cw_data
, ecc
->strength
, ecc
->bytes
,
2627 static const struct nand_controller_ops qcom_nandc_ops
= {
2628 .attach_chip
= qcom_nand_attach_chip
,
2631 static int qcom_nandc_alloc(struct qcom_nand_controller
*nandc
)
2635 ret
= dma_set_coherent_mask(nandc
->dev
, DMA_BIT_MASK(32));
2637 dev_err(nandc
->dev
, "failed to set DMA mask\n");
2642 * we use the internal buffer for reading ONFI params, reading small
2643 * data like ID and status, and preforming read-copy-write operations
2644 * when writing to a codeword partially. 532 is the maximum possible
2645 * size of a codeword for our nand controller
2647 nandc
->buf_size
= 532;
2649 nandc
->data_buffer
= devm_kzalloc(nandc
->dev
, nandc
->buf_size
,
2651 if (!nandc
->data_buffer
)
2654 nandc
->regs
= devm_kzalloc(nandc
->dev
, sizeof(*nandc
->regs
),
2659 nandc
->reg_read_buf
= devm_kcalloc(nandc
->dev
,
2660 MAX_REG_RD
, sizeof(*nandc
->reg_read_buf
),
2662 if (!nandc
->reg_read_buf
)
2665 if (nandc
->props
->is_bam
) {
2666 nandc
->reg_read_dma
=
2667 dma_map_single(nandc
->dev
, nandc
->reg_read_buf
,
2669 sizeof(*nandc
->reg_read_buf
),
2671 if (dma_mapping_error(nandc
->dev
, nandc
->reg_read_dma
)) {
2672 dev_err(nandc
->dev
, "failed to DMA MAP reg buffer\n");
2676 nandc
->tx_chan
= dma_request_slave_channel(nandc
->dev
, "tx");
2677 if (!nandc
->tx_chan
) {
2678 dev_err(nandc
->dev
, "failed to request tx channel\n");
2682 nandc
->rx_chan
= dma_request_slave_channel(nandc
->dev
, "rx");
2683 if (!nandc
->rx_chan
) {
2684 dev_err(nandc
->dev
, "failed to request rx channel\n");
2688 nandc
->cmd_chan
= dma_request_slave_channel(nandc
->dev
, "cmd");
2689 if (!nandc
->cmd_chan
) {
2690 dev_err(nandc
->dev
, "failed to request cmd channel\n");
2695 * Initially allocate BAM transaction to read ONFI param page.
2696 * After detecting all the devices, this BAM transaction will
2697 * be freed and the next BAM tranasction will be allocated with
2698 * maximum codeword size
2700 nandc
->max_cwperpage
= 1;
2701 nandc
->bam_txn
= alloc_bam_transaction(nandc
);
2702 if (!nandc
->bam_txn
) {
2704 "failed to allocate bam transaction\n");
2708 nandc
->chan
= dma_request_slave_channel(nandc
->dev
, "rxtx");
2711 "failed to request slave channel\n");
2716 INIT_LIST_HEAD(&nandc
->desc_list
);
2717 INIT_LIST_HEAD(&nandc
->host_list
);
2719 nand_controller_init(&nandc
->controller
);
2720 nandc
->controller
.ops
= &qcom_nandc_ops
;
2725 static void qcom_nandc_unalloc(struct qcom_nand_controller
*nandc
)
2727 if (nandc
->props
->is_bam
) {
2728 if (!dma_mapping_error(nandc
->dev
, nandc
->reg_read_dma
))
2729 dma_unmap_single(nandc
->dev
, nandc
->reg_read_dma
,
2731 sizeof(*nandc
->reg_read_buf
),
2735 dma_release_channel(nandc
->tx_chan
);
2738 dma_release_channel(nandc
->rx_chan
);
2740 if (nandc
->cmd_chan
)
2741 dma_release_channel(nandc
->cmd_chan
);
2744 dma_release_channel(nandc
->chan
);
2748 /* one time setup of a few nand controller registers */
2749 static int qcom_nandc_setup(struct qcom_nand_controller
*nandc
)
2754 nandc_write(nandc
, SFLASHC_BURST_CFG
, 0);
2755 nandc_write(nandc
, dev_cmd_reg_addr(nandc
, NAND_DEV_CMD_VLD
),
2756 NAND_DEV_CMD_VLD_VAL
);
2758 /* enable ADM or BAM DMA */
2759 if (nandc
->props
->is_bam
) {
2760 nand_ctrl
= nandc_read(nandc
, NAND_CTRL
);
2761 nandc_write(nandc
, NAND_CTRL
, nand_ctrl
| BAM_MODE_EN
);
2763 nandc_write(nandc
, NAND_FLASH_CHIP_SELECT
, DM_EN
);
2766 /* save the original values of these registers */
2767 nandc
->cmd1
= nandc_read(nandc
, dev_cmd_reg_addr(nandc
, NAND_DEV_CMD1
));
2768 nandc
->vld
= NAND_DEV_CMD_VLD_VAL
;
2773 static int qcom_nand_host_init_and_register(struct qcom_nand_controller
*nandc
,
2774 struct qcom_nand_host
*host
,
2775 struct device_node
*dn
)
2777 struct nand_chip
*chip
= &host
->chip
;
2778 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2779 struct device
*dev
= nandc
->dev
;
2782 ret
= of_property_read_u32(dn
, "reg", &host
->cs
);
2784 dev_err(dev
, "can't get chip-select\n");
2788 nand_set_flash_node(chip
, dn
);
2789 mtd
->name
= devm_kasprintf(dev
, GFP_KERNEL
, "qcom_nand.%d", host
->cs
);
2793 mtd
->owner
= THIS_MODULE
;
2794 mtd
->dev
.parent
= dev
;
2796 chip
->legacy
.cmdfunc
= qcom_nandc_command
;
2797 chip
->legacy
.select_chip
= qcom_nandc_select_chip
;
2798 chip
->legacy
.read_byte
= qcom_nandc_read_byte
;
2799 chip
->legacy
.read_buf
= qcom_nandc_read_buf
;
2800 chip
->legacy
.write_buf
= qcom_nandc_write_buf
;
2801 chip
->legacy
.set_features
= nand_get_set_features_notsupp
;
2802 chip
->legacy
.get_features
= nand_get_set_features_notsupp
;
2805 * the bad block marker is readable only when we read the last codeword
2806 * of a page with ECC disabled. currently, the nand_base and nand_bbt
2807 * helpers don't allow us to read BB from a nand chip with ECC
2808 * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
2809 * and block_markbad helpers until we permanently switch to using
2810 * MTD_OPS_RAW for all drivers (with the help of badblockbits)
2812 chip
->legacy
.block_bad
= qcom_nandc_block_bad
;
2813 chip
->legacy
.block_markbad
= qcom_nandc_block_markbad
;
2815 chip
->controller
= &nandc
->controller
;
2816 chip
->options
|= NAND_NO_SUBPAGE_WRITE
| NAND_USE_BOUNCE_BUFFER
|
2819 /* set up initial status value */
2820 host
->status
= NAND_STATUS_READY
| NAND_STATUS_WP
;
2822 ret
= nand_scan(chip
, 1);
2826 if (nandc
->props
->is_bam
) {
2827 free_bam_transaction(nandc
);
2828 nandc
->bam_txn
= alloc_bam_transaction(nandc
);
2829 if (!nandc
->bam_txn
) {
2831 "failed to allocate bam transaction\n");
2836 ret
= mtd_device_register(mtd
, NULL
, 0);
2843 static int qcom_probe_nand_devices(struct qcom_nand_controller
*nandc
)
2845 struct device
*dev
= nandc
->dev
;
2846 struct device_node
*dn
= dev
->of_node
, *child
;
2847 struct qcom_nand_host
*host
;
2850 for_each_available_child_of_node(dn
, child
) {
2851 host
= devm_kzalloc(dev
, sizeof(*host
), GFP_KERNEL
);
2857 ret
= qcom_nand_host_init_and_register(nandc
, host
, child
);
2859 devm_kfree(dev
, host
);
2863 list_add_tail(&host
->node
, &nandc
->host_list
);
2866 if (list_empty(&nandc
->host_list
))
2872 /* parse custom DT properties here */
2873 static int qcom_nandc_parse_dt(struct platform_device
*pdev
)
2875 struct qcom_nand_controller
*nandc
= platform_get_drvdata(pdev
);
2876 struct device_node
*np
= nandc
->dev
->of_node
;
2879 if (!nandc
->props
->is_bam
) {
2880 ret
= of_property_read_u32(np
, "qcom,cmd-crci",
2883 dev_err(nandc
->dev
, "command CRCI unspecified\n");
2887 ret
= of_property_read_u32(np
, "qcom,data-crci",
2890 dev_err(nandc
->dev
, "data CRCI unspecified\n");
2898 static int qcom_nandc_probe(struct platform_device
*pdev
)
2900 struct qcom_nand_controller
*nandc
;
2901 const void *dev_data
;
2902 struct device
*dev
= &pdev
->dev
;
2903 struct resource
*res
;
2906 nandc
= devm_kzalloc(&pdev
->dev
, sizeof(*nandc
), GFP_KERNEL
);
2910 platform_set_drvdata(pdev
, nandc
);
2913 dev_data
= of_device_get_match_data(dev
);
2915 dev_err(&pdev
->dev
, "failed to get device data\n");
2919 nandc
->props
= dev_data
;
2921 nandc
->core_clk
= devm_clk_get(dev
, "core");
2922 if (IS_ERR(nandc
->core_clk
))
2923 return PTR_ERR(nandc
->core_clk
);
2925 nandc
->aon_clk
= devm_clk_get(dev
, "aon");
2926 if (IS_ERR(nandc
->aon_clk
))
2927 return PTR_ERR(nandc
->aon_clk
);
2929 ret
= qcom_nandc_parse_dt(pdev
);
2933 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2934 nandc
->base
= devm_ioremap_resource(dev
, res
);
2935 if (IS_ERR(nandc
->base
))
2936 return PTR_ERR(nandc
->base
);
2938 nandc
->base_phys
= res
->start
;
2939 nandc
->base_dma
= dma_map_resource(dev
, res
->start
,
2941 DMA_BIDIRECTIONAL
, 0);
2942 if (!nandc
->base_dma
)
2945 ret
= qcom_nandc_alloc(nandc
);
2947 goto err_nandc_alloc
;
2949 ret
= clk_prepare_enable(nandc
->core_clk
);
2953 ret
= clk_prepare_enable(nandc
->aon_clk
);
2957 ret
= qcom_nandc_setup(nandc
);
2961 ret
= qcom_probe_nand_devices(nandc
);
2968 clk_disable_unprepare(nandc
->aon_clk
);
2970 clk_disable_unprepare(nandc
->core_clk
);
2972 qcom_nandc_unalloc(nandc
);
2974 dma_unmap_resource(dev
, res
->start
, resource_size(res
),
2975 DMA_BIDIRECTIONAL
, 0);
2980 static int qcom_nandc_remove(struct platform_device
*pdev
)
2982 struct qcom_nand_controller
*nandc
= platform_get_drvdata(pdev
);
2983 struct resource
*res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2984 struct qcom_nand_host
*host
;
2986 list_for_each_entry(host
, &nandc
->host_list
, node
)
2987 nand_release(&host
->chip
);
2990 qcom_nandc_unalloc(nandc
);
2992 clk_disable_unprepare(nandc
->aon_clk
);
2993 clk_disable_unprepare(nandc
->core_clk
);
2995 dma_unmap_resource(&pdev
->dev
, nandc
->base_dma
, resource_size(res
),
2996 DMA_BIDIRECTIONAL
, 0);
3001 static const struct qcom_nandc_props ipq806x_nandc_props
= {
3002 .ecc_modes
= (ECC_RS_4BIT
| ECC_BCH_8BIT
),
3004 .dev_cmd_reg_start
= 0x0,
3007 static const struct qcom_nandc_props ipq4019_nandc_props
= {
3008 .ecc_modes
= (ECC_BCH_4BIT
| ECC_BCH_8BIT
),
3010 .dev_cmd_reg_start
= 0x0,
3013 static const struct qcom_nandc_props ipq8074_nandc_props
= {
3014 .ecc_modes
= (ECC_BCH_4BIT
| ECC_BCH_8BIT
),
3016 .dev_cmd_reg_start
= 0x7000,
3020 * data will hold a struct pointer containing more differences once we support
3021 * more controller variants
3023 static const struct of_device_id qcom_nandc_of_match
[] = {
3025 .compatible
= "qcom,ipq806x-nand",
3026 .data
= &ipq806x_nandc_props
,
3029 .compatible
= "qcom,ipq4019-nand",
3030 .data
= &ipq4019_nandc_props
,
3033 .compatible
= "qcom,ipq8074-nand",
3034 .data
= &ipq8074_nandc_props
,
3038 MODULE_DEVICE_TABLE(of
, qcom_nandc_of_match
);
3040 static struct platform_driver qcom_nandc_driver
= {
3042 .name
= "qcom-nandc",
3043 .of_match_table
= qcom_nandc_of_match
,
3045 .probe
= qcom_nandc_probe
,
3046 .remove
= qcom_nandc_remove
,
3048 module_platform_driver(qcom_nandc_driver
);
3050 MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
3051 MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
3052 MODULE_LICENSE("GPL v2");