WIP FPC-III support
[linux/fpc-iii.git] / drivers / mtd / nand / raw / qcom_nandc.c
blob667e4bfe369fc273adbf4b447a2b530ab46ca0f4
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
4 */
6 #include <linux/clk.h>
7 #include <linux/slab.h>
8 #include <linux/bitops.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dmaengine.h>
11 #include <linux/module.h>
12 #include <linux/mtd/rawnand.h>
13 #include <linux/mtd/partitions.h>
14 #include <linux/of.h>
15 #include <linux/of_device.h>
16 #include <linux/delay.h>
17 #include <linux/dma/qcom_bam_dma.h>
19 /* NANDc reg offsets */
20 #define NAND_FLASH_CMD 0x00
21 #define NAND_ADDR0 0x04
22 #define NAND_ADDR1 0x08
23 #define NAND_FLASH_CHIP_SELECT 0x0c
24 #define NAND_EXEC_CMD 0x10
25 #define NAND_FLASH_STATUS 0x14
26 #define NAND_BUFFER_STATUS 0x18
27 #define NAND_DEV0_CFG0 0x20
28 #define NAND_DEV0_CFG1 0x24
29 #define NAND_DEV0_ECC_CFG 0x28
30 #define NAND_DEV1_ECC_CFG 0x2c
31 #define NAND_DEV1_CFG0 0x30
32 #define NAND_DEV1_CFG1 0x34
33 #define NAND_READ_ID 0x40
34 #define NAND_READ_STATUS 0x44
35 #define NAND_DEV_CMD0 0xa0
36 #define NAND_DEV_CMD1 0xa4
37 #define NAND_DEV_CMD2 0xa8
38 #define NAND_DEV_CMD_VLD 0xac
39 #define SFLASHC_BURST_CFG 0xe0
40 #define NAND_ERASED_CW_DETECT_CFG 0xe8
41 #define NAND_ERASED_CW_DETECT_STATUS 0xec
42 #define NAND_EBI2_ECC_BUF_CFG 0xf0
43 #define FLASH_BUF_ACC 0x100
45 #define NAND_CTRL 0xf00
46 #define NAND_VERSION 0xf08
47 #define NAND_READ_LOCATION_0 0xf20
48 #define NAND_READ_LOCATION_1 0xf24
49 #define NAND_READ_LOCATION_2 0xf28
50 #define NAND_READ_LOCATION_3 0xf2c
52 /* dummy register offsets, used by write_reg_dma */
53 #define NAND_DEV_CMD1_RESTORE 0xdead
54 #define NAND_DEV_CMD_VLD_RESTORE 0xbeef
56 /* NAND_FLASH_CMD bits */
57 #define PAGE_ACC BIT(4)
58 #define LAST_PAGE BIT(5)
60 /* NAND_FLASH_CHIP_SELECT bits */
61 #define NAND_DEV_SEL 0
62 #define DM_EN BIT(2)
64 /* NAND_FLASH_STATUS bits */
65 #define FS_OP_ERR BIT(4)
66 #define FS_READY_BSY_N BIT(5)
67 #define FS_MPU_ERR BIT(8)
68 #define FS_DEVICE_STS_ERR BIT(16)
69 #define FS_DEVICE_WP BIT(23)
71 /* NAND_BUFFER_STATUS bits */
72 #define BS_UNCORRECTABLE_BIT BIT(8)
73 #define BS_CORRECTABLE_ERR_MSK 0x1f
75 /* NAND_DEVn_CFG0 bits */
76 #define DISABLE_STATUS_AFTER_WRITE 4
77 #define CW_PER_PAGE 6
78 #define UD_SIZE_BYTES 9
79 #define ECC_PARITY_SIZE_BYTES_RS 19
80 #define SPARE_SIZE_BYTES 23
81 #define NUM_ADDR_CYCLES 27
82 #define STATUS_BFR_READ 30
83 #define SET_RD_MODE_AFTER_STATUS 31
85 /* NAND_DEVn_CFG0 bits */
86 #define DEV0_CFG1_ECC_DISABLE 0
87 #define WIDE_FLASH 1
88 #define NAND_RECOVERY_CYCLES 2
89 #define CS_ACTIVE_BSY 5
90 #define BAD_BLOCK_BYTE_NUM 6
91 #define BAD_BLOCK_IN_SPARE_AREA 16
92 #define WR_RD_BSY_GAP 17
93 #define ENABLE_BCH_ECC 27
95 /* NAND_DEV0_ECC_CFG bits */
96 #define ECC_CFG_ECC_DISABLE 0
97 #define ECC_SW_RESET 1
98 #define ECC_MODE 4
99 #define ECC_PARITY_SIZE_BYTES_BCH 8
100 #define ECC_NUM_DATA_BYTES 16
101 #define ECC_FORCE_CLK_OPEN 30
103 /* NAND_DEV_CMD1 bits */
104 #define READ_ADDR 0
106 /* NAND_DEV_CMD_VLD bits */
107 #define READ_START_VLD BIT(0)
108 #define READ_STOP_VLD BIT(1)
109 #define WRITE_START_VLD BIT(2)
110 #define ERASE_START_VLD BIT(3)
111 #define SEQ_READ_START_VLD BIT(4)
113 /* NAND_EBI2_ECC_BUF_CFG bits */
114 #define NUM_STEPS 0
116 /* NAND_ERASED_CW_DETECT_CFG bits */
117 #define ERASED_CW_ECC_MASK 1
118 #define AUTO_DETECT_RES 0
119 #define MASK_ECC (1 << ERASED_CW_ECC_MASK)
120 #define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
121 #define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
122 #define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
123 #define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
125 /* NAND_ERASED_CW_DETECT_STATUS bits */
126 #define PAGE_ALL_ERASED BIT(7)
127 #define CODEWORD_ALL_ERASED BIT(6)
128 #define PAGE_ERASED BIT(5)
129 #define CODEWORD_ERASED BIT(4)
130 #define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
131 #define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
133 /* NAND_READ_LOCATION_n bits */
134 #define READ_LOCATION_OFFSET 0
135 #define READ_LOCATION_SIZE 16
136 #define READ_LOCATION_LAST 31
138 /* Version Mask */
139 #define NAND_VERSION_MAJOR_MASK 0xf0000000
140 #define NAND_VERSION_MAJOR_SHIFT 28
141 #define NAND_VERSION_MINOR_MASK 0x0fff0000
142 #define NAND_VERSION_MINOR_SHIFT 16
144 /* NAND OP_CMDs */
145 #define OP_PAGE_READ 0x2
146 #define OP_PAGE_READ_WITH_ECC 0x3
147 #define OP_PAGE_READ_WITH_ECC_SPARE 0x4
148 #define OP_PAGE_READ_ONFI_READ 0x5
149 #define OP_PROGRAM_PAGE 0x6
150 #define OP_PAGE_PROGRAM_WITH_ECC 0x7
151 #define OP_PROGRAM_PAGE_SPARE 0x9
152 #define OP_BLOCK_ERASE 0xa
153 #define OP_FETCH_ID 0xb
154 #define OP_RESET_DEVICE 0xd
156 /* Default Value for NAND_DEV_CMD_VLD */
157 #define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
158 ERASE_START_VLD | SEQ_READ_START_VLD)
160 /* NAND_CTRL bits */
161 #define BAM_MODE_EN BIT(0)
164 * the NAND controller performs reads/writes with ECC in 516 byte chunks.
165 * the driver calls the chunks 'step' or 'codeword' interchangeably
167 #define NANDC_STEP_SIZE 512
170 * the largest page size we support is 8K, this will have 16 steps/codewords
171 * of 512 bytes each
173 #define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
175 /* we read at most 3 registers per codeword scan */
176 #define MAX_REG_RD (3 * MAX_NUM_STEPS)
178 /* ECC modes supported by the controller */
179 #define ECC_NONE BIT(0)
180 #define ECC_RS_4BIT BIT(1)
181 #define ECC_BCH_4BIT BIT(2)
182 #define ECC_BCH_8BIT BIT(3)
184 #define nandc_set_read_loc(nandc, reg, offset, size, is_last) \
185 nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
186 ((offset) << READ_LOCATION_OFFSET) | \
187 ((size) << READ_LOCATION_SIZE) | \
188 ((is_last) << READ_LOCATION_LAST))
191 * Returns the actual register address for all NAND_DEV_ registers
192 * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
194 #define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
196 /* Returns the NAND register physical address */
197 #define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
199 /* Returns the dma address for reg read buffer */
200 #define reg_buf_dma_addr(chip, vaddr) \
201 ((chip)->reg_read_dma + \
202 ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
204 #define QPIC_PER_CW_CMD_ELEMENTS 32
205 #define QPIC_PER_CW_CMD_SGL 32
206 #define QPIC_PER_CW_DATA_SGL 8
208 #define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
211 * Flags used in DMA descriptor preparation helper functions
212 * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
214 /* Don't set the EOT in current tx BAM sgl */
215 #define NAND_BAM_NO_EOT BIT(0)
216 /* Set the NWD flag in current BAM sgl */
217 #define NAND_BAM_NWD BIT(1)
218 /* Finish writing in the current BAM sgl and start writing in another BAM sgl */
219 #define NAND_BAM_NEXT_SGL BIT(2)
221 * Erased codeword status is being used two times in single transfer so this
222 * flag will determine the current value of erased codeword status register
224 #define NAND_ERASED_CW_SET BIT(4)
227 * This data type corresponds to the BAM transaction which will be used for all
228 * NAND transfers.
229 * @bam_ce - the array of BAM command elements
230 * @cmd_sgl - sgl for NAND BAM command pipe
231 * @data_sgl - sgl for NAND BAM consumer/producer pipe
232 * @bam_ce_pos - the index in bam_ce which is available for next sgl
233 * @bam_ce_start - the index in bam_ce which marks the start position ce
234 * for current sgl. It will be used for size calculation
235 * for current sgl
236 * @cmd_sgl_pos - current index in command sgl.
237 * @cmd_sgl_start - start index in command sgl.
238 * @tx_sgl_pos - current index in data sgl for tx.
239 * @tx_sgl_start - start index in data sgl for tx.
240 * @rx_sgl_pos - current index in data sgl for rx.
241 * @rx_sgl_start - start index in data sgl for rx.
242 * @wait_second_completion - wait for second DMA desc completion before making
243 * the NAND transfer completion.
244 * @txn_done - completion for NAND transfer.
245 * @last_data_desc - last DMA desc in data channel (tx/rx).
246 * @last_cmd_desc - last DMA desc in command channel.
248 struct bam_transaction {
249 struct bam_cmd_element *bam_ce;
250 struct scatterlist *cmd_sgl;
251 struct scatterlist *data_sgl;
252 u32 bam_ce_pos;
253 u32 bam_ce_start;
254 u32 cmd_sgl_pos;
255 u32 cmd_sgl_start;
256 u32 tx_sgl_pos;
257 u32 tx_sgl_start;
258 u32 rx_sgl_pos;
259 u32 rx_sgl_start;
260 bool wait_second_completion;
261 struct completion txn_done;
262 struct dma_async_tx_descriptor *last_data_desc;
263 struct dma_async_tx_descriptor *last_cmd_desc;
267 * This data type corresponds to the nand dma descriptor
268 * @list - list for desc_info
269 * @dir - DMA transfer direction
270 * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
271 * ADM
272 * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
273 * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
274 * @dma_desc - low level DMA engine descriptor
276 struct desc_info {
277 struct list_head node;
279 enum dma_data_direction dir;
280 union {
281 struct scatterlist adm_sgl;
282 struct {
283 struct scatterlist *bam_sgl;
284 int sgl_cnt;
287 struct dma_async_tx_descriptor *dma_desc;
291 * holds the current register values that we want to write. acts as a contiguous
292 * chunk of memory which we use to write the controller registers through DMA.
294 struct nandc_regs {
295 __le32 cmd;
296 __le32 addr0;
297 __le32 addr1;
298 __le32 chip_sel;
299 __le32 exec;
301 __le32 cfg0;
302 __le32 cfg1;
303 __le32 ecc_bch_cfg;
305 __le32 clrflashstatus;
306 __le32 clrreadstatus;
308 __le32 cmd1;
309 __le32 vld;
311 __le32 orig_cmd1;
312 __le32 orig_vld;
314 __le32 ecc_buf_cfg;
315 __le32 read_location0;
316 __le32 read_location1;
317 __le32 read_location2;
318 __le32 read_location3;
320 __le32 erased_cw_detect_cfg_clr;
321 __le32 erased_cw_detect_cfg_set;
325 * NAND controller data struct
327 * @controller: base controller structure
328 * @host_list: list containing all the chips attached to the
329 * controller
330 * @dev: parent device
331 * @base: MMIO base
332 * @base_phys: physical base address of controller registers
333 * @base_dma: dma base address of controller registers
334 * @core_clk: controller clock
335 * @aon_clk: another controller clock
337 * @chan: dma channel
338 * @cmd_crci: ADM DMA CRCI for command flow control
339 * @data_crci: ADM DMA CRCI for data flow control
340 * @desc_list: DMA descriptor list (list of desc_infos)
342 * @data_buffer: our local DMA buffer for page read/writes,
343 * used when we can't use the buffer provided
344 * by upper layers directly
345 * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
346 * functions
347 * @reg_read_buf: local buffer for reading back registers via DMA
348 * @reg_read_dma: contains dma address for register read buffer
349 * @reg_read_pos: marker for data read in reg_read_buf
351 * @regs: a contiguous chunk of memory for DMA register
352 * writes. contains the register values to be
353 * written to controller
354 * @cmd1/vld: some fixed controller register values
355 * @props: properties of current NAND controller,
356 * initialized via DT match data
357 * @max_cwperpage: maximum QPIC codewords required. calculated
358 * from all connected NAND devices pagesize
360 struct qcom_nand_controller {
361 struct nand_controller controller;
362 struct list_head host_list;
364 struct device *dev;
366 void __iomem *base;
367 phys_addr_t base_phys;
368 dma_addr_t base_dma;
370 struct clk *core_clk;
371 struct clk *aon_clk;
373 union {
374 /* will be used only by QPIC for BAM DMA */
375 struct {
376 struct dma_chan *tx_chan;
377 struct dma_chan *rx_chan;
378 struct dma_chan *cmd_chan;
381 /* will be used only by EBI2 for ADM DMA */
382 struct {
383 struct dma_chan *chan;
384 unsigned int cmd_crci;
385 unsigned int data_crci;
389 struct list_head desc_list;
390 struct bam_transaction *bam_txn;
392 u8 *data_buffer;
393 int buf_size;
394 int buf_count;
395 int buf_start;
396 unsigned int max_cwperpage;
398 __le32 *reg_read_buf;
399 dma_addr_t reg_read_dma;
400 int reg_read_pos;
402 struct nandc_regs *regs;
404 u32 cmd1, vld;
405 const struct qcom_nandc_props *props;
409 * NAND chip structure
411 * @chip: base NAND chip structure
412 * @node: list node to add itself to host_list in
413 * qcom_nand_controller
415 * @cs: chip select value for this chip
416 * @cw_size: the number of bytes in a single step/codeword
417 * of a page, consisting of all data, ecc, spare
418 * and reserved bytes
419 * @cw_data: the number of bytes within a codeword protected
420 * by ECC
421 * @use_ecc: request the controller to use ECC for the
422 * upcoming read/write
423 * @bch_enabled: flag to tell whether BCH ECC mode is used
424 * @ecc_bytes_hw: ECC bytes used by controller hardware for this
425 * chip
426 * @status: value to be returned if NAND_CMD_STATUS command
427 * is executed
428 * @last_command: keeps track of last command on this chip. used
429 * for reading correct status
431 * @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for
432 * ecc/non-ecc mode for the current nand flash
433 * device
435 struct qcom_nand_host {
436 struct nand_chip chip;
437 struct list_head node;
439 int cs;
440 int cw_size;
441 int cw_data;
442 bool use_ecc;
443 bool bch_enabled;
444 int ecc_bytes_hw;
445 int spare_bytes;
446 int bbm_size;
447 u8 status;
448 int last_command;
450 u32 cfg0, cfg1;
451 u32 cfg0_raw, cfg1_raw;
452 u32 ecc_buf_cfg;
453 u32 ecc_bch_cfg;
454 u32 clrflashstatus;
455 u32 clrreadstatus;
459 * This data type corresponds to the NAND controller properties which varies
460 * among different NAND controllers.
461 * @ecc_modes - ecc mode for NAND
462 * @is_bam - whether NAND controller is using BAM
463 * @is_qpic - whether NAND CTRL is part of qpic IP
464 * @qpic_v2 - flag to indicate QPIC IP version 2
465 * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
467 struct qcom_nandc_props {
468 u32 ecc_modes;
469 bool is_bam;
470 bool is_qpic;
471 bool qpic_v2;
472 u32 dev_cmd_reg_start;
475 /* Frees the BAM transaction memory */
476 static void free_bam_transaction(struct qcom_nand_controller *nandc)
478 struct bam_transaction *bam_txn = nandc->bam_txn;
480 devm_kfree(nandc->dev, bam_txn);
483 /* Allocates and Initializes the BAM transaction */
484 static struct bam_transaction *
485 alloc_bam_transaction(struct qcom_nand_controller *nandc)
487 struct bam_transaction *bam_txn;
488 size_t bam_txn_size;
489 unsigned int num_cw = nandc->max_cwperpage;
490 void *bam_txn_buf;
492 bam_txn_size =
493 sizeof(*bam_txn) + num_cw *
494 ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
495 (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
496 (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
498 bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
499 if (!bam_txn_buf)
500 return NULL;
502 bam_txn = bam_txn_buf;
503 bam_txn_buf += sizeof(*bam_txn);
505 bam_txn->bam_ce = bam_txn_buf;
506 bam_txn_buf +=
507 sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
509 bam_txn->cmd_sgl = bam_txn_buf;
510 bam_txn_buf +=
511 sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
513 bam_txn->data_sgl = bam_txn_buf;
515 init_completion(&bam_txn->txn_done);
517 return bam_txn;
520 /* Clears the BAM transaction indexes */
521 static void clear_bam_transaction(struct qcom_nand_controller *nandc)
523 struct bam_transaction *bam_txn = nandc->bam_txn;
525 if (!nandc->props->is_bam)
526 return;
528 bam_txn->bam_ce_pos = 0;
529 bam_txn->bam_ce_start = 0;
530 bam_txn->cmd_sgl_pos = 0;
531 bam_txn->cmd_sgl_start = 0;
532 bam_txn->tx_sgl_pos = 0;
533 bam_txn->tx_sgl_start = 0;
534 bam_txn->rx_sgl_pos = 0;
535 bam_txn->rx_sgl_start = 0;
536 bam_txn->last_data_desc = NULL;
537 bam_txn->wait_second_completion = false;
539 sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
540 QPIC_PER_CW_CMD_SGL);
541 sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
542 QPIC_PER_CW_DATA_SGL);
544 reinit_completion(&bam_txn->txn_done);
547 /* Callback for DMA descriptor completion */
548 static void qpic_bam_dma_done(void *data)
550 struct bam_transaction *bam_txn = data;
553 * In case of data transfer with NAND, 2 callbacks will be generated.
554 * One for command channel and another one for data channel.
555 * If current transaction has data descriptors
556 * (i.e. wait_second_completion is true), then set this to false
557 * and wait for second DMA descriptor completion.
559 if (bam_txn->wait_second_completion)
560 bam_txn->wait_second_completion = false;
561 else
562 complete(&bam_txn->txn_done);
565 static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
567 return container_of(chip, struct qcom_nand_host, chip);
570 static inline struct qcom_nand_controller *
571 get_qcom_nand_controller(struct nand_chip *chip)
573 return container_of(chip->controller, struct qcom_nand_controller,
574 controller);
577 static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
579 return ioread32(nandc->base + offset);
582 static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
583 u32 val)
585 iowrite32(val, nandc->base + offset);
588 static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
589 bool is_cpu)
591 if (!nandc->props->is_bam)
592 return;
594 if (is_cpu)
595 dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
596 MAX_REG_RD *
597 sizeof(*nandc->reg_read_buf),
598 DMA_FROM_DEVICE);
599 else
600 dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
601 MAX_REG_RD *
602 sizeof(*nandc->reg_read_buf),
603 DMA_FROM_DEVICE);
606 static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
608 switch (offset) {
609 case NAND_FLASH_CMD:
610 return &regs->cmd;
611 case NAND_ADDR0:
612 return &regs->addr0;
613 case NAND_ADDR1:
614 return &regs->addr1;
615 case NAND_FLASH_CHIP_SELECT:
616 return &regs->chip_sel;
617 case NAND_EXEC_CMD:
618 return &regs->exec;
619 case NAND_FLASH_STATUS:
620 return &regs->clrflashstatus;
621 case NAND_DEV0_CFG0:
622 return &regs->cfg0;
623 case NAND_DEV0_CFG1:
624 return &regs->cfg1;
625 case NAND_DEV0_ECC_CFG:
626 return &regs->ecc_bch_cfg;
627 case NAND_READ_STATUS:
628 return &regs->clrreadstatus;
629 case NAND_DEV_CMD1:
630 return &regs->cmd1;
631 case NAND_DEV_CMD1_RESTORE:
632 return &regs->orig_cmd1;
633 case NAND_DEV_CMD_VLD:
634 return &regs->vld;
635 case NAND_DEV_CMD_VLD_RESTORE:
636 return &regs->orig_vld;
637 case NAND_EBI2_ECC_BUF_CFG:
638 return &regs->ecc_buf_cfg;
639 case NAND_READ_LOCATION_0:
640 return &regs->read_location0;
641 case NAND_READ_LOCATION_1:
642 return &regs->read_location1;
643 case NAND_READ_LOCATION_2:
644 return &regs->read_location2;
645 case NAND_READ_LOCATION_3:
646 return &regs->read_location3;
647 default:
648 return NULL;
652 static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
653 u32 val)
655 struct nandc_regs *regs = nandc->regs;
656 __le32 *reg;
658 reg = offset_to_nandc_reg(regs, offset);
660 if (reg)
661 *reg = cpu_to_le32(val);
664 /* helper to configure address register values */
665 static void set_address(struct qcom_nand_host *host, u16 column, int page)
667 struct nand_chip *chip = &host->chip;
668 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
670 if (chip->options & NAND_BUSWIDTH_16)
671 column >>= 1;
673 nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
674 nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
678 * update_rw_regs: set up read/write register values, these will be
679 * written to the NAND controller registers via DMA
681 * @num_cw: number of steps for the read/write operation
682 * @read: read or write operation
684 static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
686 struct nand_chip *chip = &host->chip;
687 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
688 u32 cmd, cfg0, cfg1, ecc_bch_cfg;
690 if (read) {
691 if (host->use_ecc)
692 cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
693 else
694 cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
695 } else {
696 cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
699 if (host->use_ecc) {
700 cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
701 (num_cw - 1) << CW_PER_PAGE;
703 cfg1 = host->cfg1;
704 ecc_bch_cfg = host->ecc_bch_cfg;
705 } else {
706 cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
707 (num_cw - 1) << CW_PER_PAGE;
709 cfg1 = host->cfg1_raw;
710 ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
713 nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
714 nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
715 nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
716 nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
717 nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
718 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
719 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
720 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
722 if (read)
723 nandc_set_read_loc(nandc, 0, 0, host->use_ecc ?
724 host->cw_data : host->cw_size, 1);
728 * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
729 * for BAM. This descriptor will be added in the NAND DMA descriptor queue
730 * which will be submitted to DMA engine.
732 static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
733 struct dma_chan *chan,
734 unsigned long flags)
736 struct desc_info *desc;
737 struct scatterlist *sgl;
738 unsigned int sgl_cnt;
739 int ret;
740 struct bam_transaction *bam_txn = nandc->bam_txn;
741 enum dma_transfer_direction dir_eng;
742 struct dma_async_tx_descriptor *dma_desc;
744 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
745 if (!desc)
746 return -ENOMEM;
748 if (chan == nandc->cmd_chan) {
749 sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
750 sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
751 bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
752 dir_eng = DMA_MEM_TO_DEV;
753 desc->dir = DMA_TO_DEVICE;
754 } else if (chan == nandc->tx_chan) {
755 sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
756 sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
757 bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
758 dir_eng = DMA_MEM_TO_DEV;
759 desc->dir = DMA_TO_DEVICE;
760 } else {
761 sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
762 sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
763 bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
764 dir_eng = DMA_DEV_TO_MEM;
765 desc->dir = DMA_FROM_DEVICE;
768 sg_mark_end(sgl + sgl_cnt - 1);
769 ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
770 if (ret == 0) {
771 dev_err(nandc->dev, "failure in mapping desc\n");
772 kfree(desc);
773 return -ENOMEM;
776 desc->sgl_cnt = sgl_cnt;
777 desc->bam_sgl = sgl;
779 dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
780 flags);
782 if (!dma_desc) {
783 dev_err(nandc->dev, "failure in prep desc\n");
784 dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
785 kfree(desc);
786 return -EINVAL;
789 desc->dma_desc = dma_desc;
791 /* update last data/command descriptor */
792 if (chan == nandc->cmd_chan)
793 bam_txn->last_cmd_desc = dma_desc;
794 else
795 bam_txn->last_data_desc = dma_desc;
797 list_add_tail(&desc->node, &nandc->desc_list);
799 return 0;
803 * Prepares the command descriptor for BAM DMA which will be used for NAND
804 * register reads and writes. The command descriptor requires the command
805 * to be formed in command element type so this function uses the command
806 * element from bam transaction ce array and fills the same with required
807 * data. A single SGL can contain multiple command elements so
808 * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
809 * after the current command element.
811 static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
812 int reg_off, const void *vaddr,
813 int size, unsigned int flags)
815 int bam_ce_size;
816 int i, ret;
817 struct bam_cmd_element *bam_ce_buffer;
818 struct bam_transaction *bam_txn = nandc->bam_txn;
820 bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
822 /* fill the command desc */
823 for (i = 0; i < size; i++) {
824 if (read)
825 bam_prep_ce(&bam_ce_buffer[i],
826 nandc_reg_phys(nandc, reg_off + 4 * i),
827 BAM_READ_COMMAND,
828 reg_buf_dma_addr(nandc,
829 (__le32 *)vaddr + i));
830 else
831 bam_prep_ce_le32(&bam_ce_buffer[i],
832 nandc_reg_phys(nandc, reg_off + 4 * i),
833 BAM_WRITE_COMMAND,
834 *((__le32 *)vaddr + i));
837 bam_txn->bam_ce_pos += size;
839 /* use the separate sgl after this command */
840 if (flags & NAND_BAM_NEXT_SGL) {
841 bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
842 bam_ce_size = (bam_txn->bam_ce_pos -
843 bam_txn->bam_ce_start) *
844 sizeof(struct bam_cmd_element);
845 sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
846 bam_ce_buffer, bam_ce_size);
847 bam_txn->cmd_sgl_pos++;
848 bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
850 if (flags & NAND_BAM_NWD) {
851 ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
852 DMA_PREP_FENCE |
853 DMA_PREP_CMD);
854 if (ret)
855 return ret;
859 return 0;
863 * Prepares the data descriptor for BAM DMA which will be used for NAND
864 * data reads and writes.
866 static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
867 const void *vaddr,
868 int size, unsigned int flags)
870 int ret;
871 struct bam_transaction *bam_txn = nandc->bam_txn;
873 if (read) {
874 sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
875 vaddr, size);
876 bam_txn->rx_sgl_pos++;
877 } else {
878 sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
879 vaddr, size);
880 bam_txn->tx_sgl_pos++;
883 * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
884 * is not set, form the DMA descriptor
886 if (!(flags & NAND_BAM_NO_EOT)) {
887 ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
888 DMA_PREP_INTERRUPT);
889 if (ret)
890 return ret;
894 return 0;
897 static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
898 int reg_off, const void *vaddr, int size,
899 bool flow_control)
901 struct desc_info *desc;
902 struct dma_async_tx_descriptor *dma_desc;
903 struct scatterlist *sgl;
904 struct dma_slave_config slave_conf;
905 enum dma_transfer_direction dir_eng;
906 int ret;
908 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
909 if (!desc)
910 return -ENOMEM;
912 sgl = &desc->adm_sgl;
914 sg_init_one(sgl, vaddr, size);
916 if (read) {
917 dir_eng = DMA_DEV_TO_MEM;
918 desc->dir = DMA_FROM_DEVICE;
919 } else {
920 dir_eng = DMA_MEM_TO_DEV;
921 desc->dir = DMA_TO_DEVICE;
924 ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
925 if (ret == 0) {
926 ret = -ENOMEM;
927 goto err;
930 memset(&slave_conf, 0x00, sizeof(slave_conf));
932 slave_conf.device_fc = flow_control;
933 if (read) {
934 slave_conf.src_maxburst = 16;
935 slave_conf.src_addr = nandc->base_dma + reg_off;
936 slave_conf.slave_id = nandc->data_crci;
937 } else {
938 slave_conf.dst_maxburst = 16;
939 slave_conf.dst_addr = nandc->base_dma + reg_off;
940 slave_conf.slave_id = nandc->cmd_crci;
943 ret = dmaengine_slave_config(nandc->chan, &slave_conf);
944 if (ret) {
945 dev_err(nandc->dev, "failed to configure dma channel\n");
946 goto err;
949 dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
950 if (!dma_desc) {
951 dev_err(nandc->dev, "failed to prepare desc\n");
952 ret = -EINVAL;
953 goto err;
956 desc->dma_desc = dma_desc;
958 list_add_tail(&desc->node, &nandc->desc_list);
960 return 0;
961 err:
962 kfree(desc);
964 return ret;
968 * read_reg_dma: prepares a descriptor to read a given number of
969 * contiguous registers to the reg_read_buf pointer
971 * @first: offset of the first register in the contiguous block
972 * @num_regs: number of registers to read
973 * @flags: flags to control DMA descriptor preparation
975 static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
976 int num_regs, unsigned int flags)
978 bool flow_control = false;
979 void *vaddr;
981 vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
982 nandc->reg_read_pos += num_regs;
984 if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
985 first = dev_cmd_reg_addr(nandc, first);
987 if (nandc->props->is_bam)
988 return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
989 num_regs, flags);
991 if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
992 flow_control = true;
994 return prep_adm_dma_desc(nandc, true, first, vaddr,
995 num_regs * sizeof(u32), flow_control);
999 * write_reg_dma: prepares a descriptor to write a given number of
1000 * contiguous registers
1002 * @first: offset of the first register in the contiguous block
1003 * @num_regs: number of registers to write
1004 * @flags: flags to control DMA descriptor preparation
1006 static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
1007 int num_regs, unsigned int flags)
1009 bool flow_control = false;
1010 struct nandc_regs *regs = nandc->regs;
1011 void *vaddr;
1013 vaddr = offset_to_nandc_reg(regs, first);
1015 if (first == NAND_ERASED_CW_DETECT_CFG) {
1016 if (flags & NAND_ERASED_CW_SET)
1017 vaddr = &regs->erased_cw_detect_cfg_set;
1018 else
1019 vaddr = &regs->erased_cw_detect_cfg_clr;
1022 if (first == NAND_EXEC_CMD)
1023 flags |= NAND_BAM_NWD;
1025 if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
1026 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
1028 if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
1029 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
1031 if (nandc->props->is_bam)
1032 return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
1033 num_regs, flags);
1035 if (first == NAND_FLASH_CMD)
1036 flow_control = true;
1038 return prep_adm_dma_desc(nandc, false, first, vaddr,
1039 num_regs * sizeof(u32), flow_control);
1043 * read_data_dma: prepares a DMA descriptor to transfer data from the
1044 * controller's internal buffer to the buffer 'vaddr'
1046 * @reg_off: offset within the controller's data buffer
1047 * @vaddr: virtual address of the buffer we want to write to
1048 * @size: DMA transaction size in bytes
1049 * @flags: flags to control DMA descriptor preparation
1051 static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
1052 const u8 *vaddr, int size, unsigned int flags)
1054 if (nandc->props->is_bam)
1055 return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
1057 return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
1061 * write_data_dma: prepares a DMA descriptor to transfer data from
1062 * 'vaddr' to the controller's internal buffer
1064 * @reg_off: offset within the controller's data buffer
1065 * @vaddr: virtual address of the buffer we want to read from
1066 * @size: DMA transaction size in bytes
1067 * @flags: flags to control DMA descriptor preparation
1069 static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
1070 const u8 *vaddr, int size, unsigned int flags)
1072 if (nandc->props->is_bam)
1073 return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
1075 return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
1079 * Helper to prepare DMA descriptors for configuring registers
1080 * before reading a NAND page.
1082 static void config_nand_page_read(struct qcom_nand_controller *nandc)
1084 write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1085 write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1086 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
1087 write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
1088 write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
1089 NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
1093 * Helper to prepare DMA descriptors for configuring registers
1094 * before reading each codeword in NAND page.
1096 static void
1097 config_nand_cw_read(struct qcom_nand_controller *nandc, bool use_ecc)
1099 if (nandc->props->is_bam)
1100 write_reg_dma(nandc, NAND_READ_LOCATION_0, 4,
1101 NAND_BAM_NEXT_SGL);
1103 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1104 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1106 if (use_ecc) {
1107 read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
1108 read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
1109 NAND_BAM_NEXT_SGL);
1110 } else {
1111 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1116 * Helper to prepare dma descriptors to configure registers needed for reading a
1117 * single codeword in page
1119 static void
1120 config_nand_single_cw_page_read(struct qcom_nand_controller *nandc,
1121 bool use_ecc)
1123 config_nand_page_read(nandc);
1124 config_nand_cw_read(nandc, use_ecc);
1128 * Helper to prepare DMA descriptors used to configure registers needed for
1129 * before writing a NAND page.
1131 static void config_nand_page_write(struct qcom_nand_controller *nandc)
1133 write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1134 write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1135 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
1136 NAND_BAM_NEXT_SGL);
1140 * Helper to prepare DMA descriptors for configuring registers
1141 * before writing each codeword in NAND page.
1143 static void config_nand_cw_write(struct qcom_nand_controller *nandc)
1145 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1146 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1148 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1150 write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1151 write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1155 * the following functions are used within chip->legacy.cmdfunc() to
1156 * perform different NAND_CMD_* commands
1159 /* sets up descriptors for NAND_CMD_PARAM */
1160 static int nandc_param(struct qcom_nand_host *host)
1162 struct nand_chip *chip = &host->chip;
1163 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1166 * NAND_CMD_PARAM is called before we know much about the FLASH chip
1167 * in use. we configure the controller to perform a raw read of 512
1168 * bytes to read onfi params
1170 if (nandc->props->qpic_v2)
1171 nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ_ONFI_READ |
1172 PAGE_ACC | LAST_PAGE);
1173 else
1174 nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ |
1175 PAGE_ACC | LAST_PAGE);
1177 nandc_set_reg(nandc, NAND_ADDR0, 0);
1178 nandc_set_reg(nandc, NAND_ADDR1, 0);
1179 nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
1180 | 512 << UD_SIZE_BYTES
1181 | 5 << NUM_ADDR_CYCLES
1182 | 0 << SPARE_SIZE_BYTES);
1183 nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
1184 | 0 << CS_ACTIVE_BSY
1185 | 17 << BAD_BLOCK_BYTE_NUM
1186 | 1 << BAD_BLOCK_IN_SPARE_AREA
1187 | 2 << WR_RD_BSY_GAP
1188 | 0 << WIDE_FLASH
1189 | 1 << DEV0_CFG1_ECC_DISABLE);
1190 nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
1192 /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
1193 if (!nandc->props->qpic_v2) {
1194 nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
1195 (nandc->vld & ~READ_START_VLD));
1196 nandc_set_reg(nandc, NAND_DEV_CMD1,
1197 (nandc->cmd1 & ~(0xFF << READ_ADDR))
1198 | NAND_CMD_PARAM << READ_ADDR);
1201 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1203 if (!nandc->props->qpic_v2) {
1204 nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
1205 nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
1208 nandc_set_read_loc(nandc, 0, 0, 512, 1);
1210 if (!nandc->props->qpic_v2) {
1211 write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
1212 write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
1215 nandc->buf_count = 512;
1216 memset(nandc->data_buffer, 0xff, nandc->buf_count);
1218 config_nand_single_cw_page_read(nandc, false);
1220 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
1221 nandc->buf_count, 0);
1223 /* restore CMD1 and VLD regs */
1224 if (!nandc->props->qpic_v2) {
1225 write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
1226 write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
1229 return 0;
1232 /* sets up descriptors for NAND_CMD_ERASE1 */
1233 static int erase_block(struct qcom_nand_host *host, int page_addr)
1235 struct nand_chip *chip = &host->chip;
1236 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1238 nandc_set_reg(nandc, NAND_FLASH_CMD,
1239 OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
1240 nandc_set_reg(nandc, NAND_ADDR0, page_addr);
1241 nandc_set_reg(nandc, NAND_ADDR1, 0);
1242 nandc_set_reg(nandc, NAND_DEV0_CFG0,
1243 host->cfg0_raw & ~(7 << CW_PER_PAGE));
1244 nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
1245 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1246 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
1247 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
1249 write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
1250 write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
1251 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1253 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1255 write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1256 write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1258 return 0;
1261 /* sets up descriptors for NAND_CMD_READID */
1262 static int read_id(struct qcom_nand_host *host, int column)
1264 struct nand_chip *chip = &host->chip;
1265 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1267 if (column == -1)
1268 return 0;
1270 nandc_set_reg(nandc, NAND_FLASH_CMD, OP_FETCH_ID);
1271 nandc_set_reg(nandc, NAND_ADDR0, column);
1272 nandc_set_reg(nandc, NAND_ADDR1, 0);
1273 nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
1274 nandc->props->is_bam ? 0 : DM_EN);
1275 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1277 write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
1278 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1280 read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
1282 return 0;
1285 /* sets up descriptors for NAND_CMD_RESET */
1286 static int reset(struct qcom_nand_host *host)
1288 struct nand_chip *chip = &host->chip;
1289 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1291 nandc_set_reg(nandc, NAND_FLASH_CMD, OP_RESET_DEVICE);
1292 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1294 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1295 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1297 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1299 return 0;
1302 /* helpers to submit/free our list of dma descriptors */
1303 static int submit_descs(struct qcom_nand_controller *nandc)
1305 struct desc_info *desc;
1306 dma_cookie_t cookie = 0;
1307 struct bam_transaction *bam_txn = nandc->bam_txn;
1308 int r;
1310 if (nandc->props->is_bam) {
1311 if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
1312 r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
1313 if (r)
1314 return r;
1317 if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
1318 r = prepare_bam_async_desc(nandc, nandc->tx_chan,
1319 DMA_PREP_INTERRUPT);
1320 if (r)
1321 return r;
1324 if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
1325 r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
1326 DMA_PREP_CMD);
1327 if (r)
1328 return r;
1332 list_for_each_entry(desc, &nandc->desc_list, node)
1333 cookie = dmaengine_submit(desc->dma_desc);
1335 if (nandc->props->is_bam) {
1336 bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
1337 bam_txn->last_cmd_desc->callback_param = bam_txn;
1338 if (bam_txn->last_data_desc) {
1339 bam_txn->last_data_desc->callback = qpic_bam_dma_done;
1340 bam_txn->last_data_desc->callback_param = bam_txn;
1341 bam_txn->wait_second_completion = true;
1344 dma_async_issue_pending(nandc->tx_chan);
1345 dma_async_issue_pending(nandc->rx_chan);
1346 dma_async_issue_pending(nandc->cmd_chan);
1348 if (!wait_for_completion_timeout(&bam_txn->txn_done,
1349 QPIC_NAND_COMPLETION_TIMEOUT))
1350 return -ETIMEDOUT;
1351 } else {
1352 if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
1353 return -ETIMEDOUT;
1356 return 0;
1359 static void free_descs(struct qcom_nand_controller *nandc)
1361 struct desc_info *desc, *n;
1363 list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
1364 list_del(&desc->node);
1366 if (nandc->props->is_bam)
1367 dma_unmap_sg(nandc->dev, desc->bam_sgl,
1368 desc->sgl_cnt, desc->dir);
1369 else
1370 dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
1371 desc->dir);
1373 kfree(desc);
1377 /* reset the register read buffer for next NAND operation */
1378 static void clear_read_regs(struct qcom_nand_controller *nandc)
1380 nandc->reg_read_pos = 0;
1381 nandc_read_buffer_sync(nandc, false);
1384 static void pre_command(struct qcom_nand_host *host, int command)
1386 struct nand_chip *chip = &host->chip;
1387 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1389 nandc->buf_count = 0;
1390 nandc->buf_start = 0;
1391 host->use_ecc = false;
1392 host->last_command = command;
1394 clear_read_regs(nandc);
1396 if (command == NAND_CMD_RESET || command == NAND_CMD_READID ||
1397 command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1)
1398 clear_bam_transaction(nandc);
1402 * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
1403 * privately maintained status byte, this status byte can be read after
1404 * NAND_CMD_STATUS is called
1406 static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
1408 struct nand_chip *chip = &host->chip;
1409 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1410 struct nand_ecc_ctrl *ecc = &chip->ecc;
1411 int num_cw;
1412 int i;
1414 num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
1415 nandc_read_buffer_sync(nandc, true);
1417 for (i = 0; i < num_cw; i++) {
1418 u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
1420 if (flash_status & FS_MPU_ERR)
1421 host->status &= ~NAND_STATUS_WP;
1423 if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
1424 (flash_status &
1425 FS_DEVICE_STS_ERR)))
1426 host->status |= NAND_STATUS_FAIL;
1430 static void post_command(struct qcom_nand_host *host, int command)
1432 struct nand_chip *chip = &host->chip;
1433 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1435 switch (command) {
1436 case NAND_CMD_READID:
1437 nandc_read_buffer_sync(nandc, true);
1438 memcpy(nandc->data_buffer, nandc->reg_read_buf,
1439 nandc->buf_count);
1440 break;
1441 case NAND_CMD_PAGEPROG:
1442 case NAND_CMD_ERASE1:
1443 parse_erase_write_errors(host, command);
1444 break;
1445 default:
1446 break;
1451 * Implements chip->legacy.cmdfunc. It's only used for a limited set of
1452 * commands. The rest of the commands wouldn't be called by upper layers.
1453 * For example, NAND_CMD_READOOB would never be called because we have our own
1454 * versions of read_oob ops for nand_ecc_ctrl.
1456 static void qcom_nandc_command(struct nand_chip *chip, unsigned int command,
1457 int column, int page_addr)
1459 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1460 struct nand_ecc_ctrl *ecc = &chip->ecc;
1461 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1462 bool wait = false;
1463 int ret = 0;
1465 pre_command(host, command);
1467 switch (command) {
1468 case NAND_CMD_RESET:
1469 ret = reset(host);
1470 wait = true;
1471 break;
1473 case NAND_CMD_READID:
1474 nandc->buf_count = 4;
1475 ret = read_id(host, column);
1476 wait = true;
1477 break;
1479 case NAND_CMD_PARAM:
1480 ret = nandc_param(host);
1481 wait = true;
1482 break;
1484 case NAND_CMD_ERASE1:
1485 ret = erase_block(host, page_addr);
1486 wait = true;
1487 break;
1489 case NAND_CMD_READ0:
1490 /* we read the entire page for now */
1491 WARN_ON(column != 0);
1493 host->use_ecc = true;
1494 set_address(host, 0, page_addr);
1495 update_rw_regs(host, ecc->steps, true);
1496 break;
1498 case NAND_CMD_SEQIN:
1499 WARN_ON(column != 0);
1500 set_address(host, 0, page_addr);
1501 break;
1503 case NAND_CMD_PAGEPROG:
1504 case NAND_CMD_STATUS:
1505 case NAND_CMD_NONE:
1506 default:
1507 break;
1510 if (ret) {
1511 dev_err(nandc->dev, "failure executing command %d\n",
1512 command);
1513 free_descs(nandc);
1514 return;
1517 if (wait) {
1518 ret = submit_descs(nandc);
1519 if (ret)
1520 dev_err(nandc->dev,
1521 "failure submitting descs for command %d\n",
1522 command);
1525 free_descs(nandc);
1527 post_command(host, command);
1531 * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
1532 * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
1534 * when using RS ECC, the HW reports the same erros when reading an erased CW,
1535 * but it notifies that it is an erased CW by placing special characters at
1536 * certain offsets in the buffer.
1538 * verify if the page is erased or not, and fix up the page for RS ECC by
1539 * replacing the special characters with 0xff.
1541 static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
1543 u8 empty1, empty2;
1546 * an erased page flags an error in NAND_FLASH_STATUS, check if the page
1547 * is erased by looking for 0x54s at offsets 3 and 175 from the
1548 * beginning of each codeword
1551 empty1 = data_buf[3];
1552 empty2 = data_buf[175];
1555 * if the erased codework markers, if they exist override them with
1556 * 0xffs
1558 if ((empty1 == 0x54 && empty2 == 0xff) ||
1559 (empty1 == 0xff && empty2 == 0x54)) {
1560 data_buf[3] = 0xff;
1561 data_buf[175] = 0xff;
1565 * check if the entire chunk contains 0xffs or not. if it doesn't, then
1566 * restore the original values at the special offsets
1568 if (memchr_inv(data_buf, 0xff, data_len)) {
1569 data_buf[3] = empty1;
1570 data_buf[175] = empty2;
1572 return false;
1575 return true;
1578 struct read_stats {
1579 __le32 flash;
1580 __le32 buffer;
1581 __le32 erased_cw;
1584 /* reads back FLASH_STATUS register set by the controller */
1585 static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
1587 struct nand_chip *chip = &host->chip;
1588 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1589 int i;
1591 nandc_read_buffer_sync(nandc, true);
1593 for (i = 0; i < cw_cnt; i++) {
1594 u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
1596 if (flash & (FS_OP_ERR | FS_MPU_ERR))
1597 return -EIO;
1600 return 0;
1603 /* performs raw read for one codeword */
1604 static int
1605 qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
1606 u8 *data_buf, u8 *oob_buf, int page, int cw)
1608 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1609 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1610 struct nand_ecc_ctrl *ecc = &chip->ecc;
1611 int data_size1, data_size2, oob_size1, oob_size2;
1612 int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
1614 nand_read_page_op(chip, page, 0, NULL, 0);
1615 host->use_ecc = false;
1617 clear_bam_transaction(nandc);
1618 set_address(host, host->cw_size * cw, page);
1619 update_rw_regs(host, 1, true);
1620 config_nand_page_read(nandc);
1622 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1623 oob_size1 = host->bbm_size;
1625 if (cw == (ecc->steps - 1)) {
1626 data_size2 = ecc->size - data_size1 -
1627 ((ecc->steps - 1) * 4);
1628 oob_size2 = (ecc->steps * 4) + host->ecc_bytes_hw +
1629 host->spare_bytes;
1630 } else {
1631 data_size2 = host->cw_data - data_size1;
1632 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1635 if (nandc->props->is_bam) {
1636 nandc_set_read_loc(nandc, 0, read_loc, data_size1, 0);
1637 read_loc += data_size1;
1639 nandc_set_read_loc(nandc, 1, read_loc, oob_size1, 0);
1640 read_loc += oob_size1;
1642 nandc_set_read_loc(nandc, 2, read_loc, data_size2, 0);
1643 read_loc += data_size2;
1645 nandc_set_read_loc(nandc, 3, read_loc, oob_size2, 1);
1648 config_nand_cw_read(nandc, false);
1650 read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
1651 reg_off += data_size1;
1653 read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
1654 reg_off += oob_size1;
1656 read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
1657 reg_off += data_size2;
1659 read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
1661 ret = submit_descs(nandc);
1662 free_descs(nandc);
1663 if (ret) {
1664 dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
1665 return ret;
1668 return check_flash_errors(host, 1);
1672 * Bitflips can happen in erased codewords also so this function counts the
1673 * number of 0 in each CW for which ECC engine returns the uncorrectable
1674 * error. The page will be assumed as erased if this count is less than or
1675 * equal to the ecc->strength for each CW.
1677 * 1. Both DATA and OOB need to be checked for number of 0. The
1678 * top-level API can be called with only data buf or OOB buf so use
1679 * chip->data_buf if data buf is null and chip->oob_poi if oob buf
1680 * is null for copying the raw bytes.
1681 * 2. Perform raw read for all the CW which has uncorrectable errors.
1682 * 3. For each CW, check the number of 0 in cw_data and usable OOB bytes.
1683 * The BBM and spare bytes bit flip won’t affect the ECC so don’t check
1684 * the number of bitflips in this area.
1686 static int
1687 check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
1688 u8 *oob_buf, unsigned long uncorrectable_cws,
1689 int page, unsigned int max_bitflips)
1691 struct nand_chip *chip = &host->chip;
1692 struct mtd_info *mtd = nand_to_mtd(chip);
1693 struct nand_ecc_ctrl *ecc = &chip->ecc;
1694 u8 *cw_data_buf, *cw_oob_buf;
1695 int cw, data_size, oob_size, ret = 0;
1697 if (!data_buf)
1698 data_buf = nand_get_data_buf(chip);
1700 if (!oob_buf) {
1701 nand_get_data_buf(chip);
1702 oob_buf = chip->oob_poi;
1705 for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) {
1706 if (cw == (ecc->steps - 1)) {
1707 data_size = ecc->size - ((ecc->steps - 1) * 4);
1708 oob_size = (ecc->steps * 4) + host->ecc_bytes_hw;
1709 } else {
1710 data_size = host->cw_data;
1711 oob_size = host->ecc_bytes_hw;
1714 /* determine starting buffer address for current CW */
1715 cw_data_buf = data_buf + (cw * host->cw_data);
1716 cw_oob_buf = oob_buf + (cw * ecc->bytes);
1718 ret = qcom_nandc_read_cw_raw(mtd, chip, cw_data_buf,
1719 cw_oob_buf, page, cw);
1720 if (ret)
1721 return ret;
1724 * make sure it isn't an erased page reported
1725 * as not-erased by HW because of a few bitflips
1727 ret = nand_check_erased_ecc_chunk(cw_data_buf, data_size,
1728 cw_oob_buf + host->bbm_size,
1729 oob_size, NULL,
1730 0, ecc->strength);
1731 if (ret < 0) {
1732 mtd->ecc_stats.failed++;
1733 } else {
1734 mtd->ecc_stats.corrected += ret;
1735 max_bitflips = max_t(unsigned int, max_bitflips, ret);
1739 return max_bitflips;
1743 * reads back status registers set by the controller to notify page read
1744 * errors. this is equivalent to what 'ecc->correct()' would do.
1746 static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
1747 u8 *oob_buf, int page)
1749 struct nand_chip *chip = &host->chip;
1750 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1751 struct mtd_info *mtd = nand_to_mtd(chip);
1752 struct nand_ecc_ctrl *ecc = &chip->ecc;
1753 unsigned int max_bitflips = 0, uncorrectable_cws = 0;
1754 struct read_stats *buf;
1755 bool flash_op_err = false, erased;
1756 int i;
1757 u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
1759 buf = (struct read_stats *)nandc->reg_read_buf;
1760 nandc_read_buffer_sync(nandc, true);
1762 for (i = 0; i < ecc->steps; i++, buf++) {
1763 u32 flash, buffer, erased_cw;
1764 int data_len, oob_len;
1766 if (i == (ecc->steps - 1)) {
1767 data_len = ecc->size - ((ecc->steps - 1) << 2);
1768 oob_len = ecc->steps << 2;
1769 } else {
1770 data_len = host->cw_data;
1771 oob_len = 0;
1774 flash = le32_to_cpu(buf->flash);
1775 buffer = le32_to_cpu(buf->buffer);
1776 erased_cw = le32_to_cpu(buf->erased_cw);
1779 * Check ECC failure for each codeword. ECC failure can
1780 * happen in either of the following conditions
1781 * 1. If number of bitflips are greater than ECC engine
1782 * capability.
1783 * 2. If this codeword contains all 0xff for which erased
1784 * codeword detection check will be done.
1786 if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
1788 * For BCH ECC, ignore erased codeword errors, if
1789 * ERASED_CW bits are set.
1791 if (host->bch_enabled) {
1792 erased = (erased_cw & ERASED_CW) == ERASED_CW ?
1793 true : false;
1795 * For RS ECC, HW reports the erased CW by placing
1796 * special characters at certain offsets in the buffer.
1797 * These special characters will be valid only if
1798 * complete page is read i.e. data_buf is not NULL.
1800 } else if (data_buf) {
1801 erased = erased_chunk_check_and_fixup(data_buf,
1802 data_len);
1803 } else {
1804 erased = false;
1807 if (!erased)
1808 uncorrectable_cws |= BIT(i);
1810 * Check if MPU or any other operational error (timeout,
1811 * device failure, etc.) happened for this codeword and
1812 * make flash_op_err true. If flash_op_err is set, then
1813 * EIO will be returned for page read.
1815 } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
1816 flash_op_err = true;
1818 * No ECC or operational errors happened. Check the number of
1819 * bits corrected and update the ecc_stats.corrected.
1821 } else {
1822 unsigned int stat;
1824 stat = buffer & BS_CORRECTABLE_ERR_MSK;
1825 mtd->ecc_stats.corrected += stat;
1826 max_bitflips = max(max_bitflips, stat);
1829 if (data_buf)
1830 data_buf += data_len;
1831 if (oob_buf)
1832 oob_buf += oob_len + ecc->bytes;
1835 if (flash_op_err)
1836 return -EIO;
1838 if (!uncorrectable_cws)
1839 return max_bitflips;
1841 return check_for_erased_page(host, data_buf_start, oob_buf_start,
1842 uncorrectable_cws, page,
1843 max_bitflips);
1847 * helper to perform the actual page read operation, used by ecc->read_page(),
1848 * ecc->read_oob()
1850 static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
1851 u8 *oob_buf, int page)
1853 struct nand_chip *chip = &host->chip;
1854 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1855 struct nand_ecc_ctrl *ecc = &chip->ecc;
1856 u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
1857 int i, ret;
1859 config_nand_page_read(nandc);
1861 /* queue cmd descs for each codeword */
1862 for (i = 0; i < ecc->steps; i++) {
1863 int data_size, oob_size;
1865 if (i == (ecc->steps - 1)) {
1866 data_size = ecc->size - ((ecc->steps - 1) << 2);
1867 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1868 host->spare_bytes;
1869 } else {
1870 data_size = host->cw_data;
1871 oob_size = host->ecc_bytes_hw + host->spare_bytes;
1874 if (nandc->props->is_bam) {
1875 if (data_buf && oob_buf) {
1876 nandc_set_read_loc(nandc, 0, 0, data_size, 0);
1877 nandc_set_read_loc(nandc, 1, data_size,
1878 oob_size, 1);
1879 } else if (data_buf) {
1880 nandc_set_read_loc(nandc, 0, 0, data_size, 1);
1881 } else {
1882 nandc_set_read_loc(nandc, 0, data_size,
1883 oob_size, 1);
1887 config_nand_cw_read(nandc, true);
1889 if (data_buf)
1890 read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
1891 data_size, 0);
1894 * when ecc is enabled, the controller doesn't read the real
1895 * or dummy bad block markers in each chunk. To maintain a
1896 * consistent layout across RAW and ECC reads, we just
1897 * leave the real/dummy BBM offsets empty (i.e, filled with
1898 * 0xffs)
1900 if (oob_buf) {
1901 int j;
1903 for (j = 0; j < host->bbm_size; j++)
1904 *oob_buf++ = 0xff;
1906 read_data_dma(nandc, FLASH_BUF_ACC + data_size,
1907 oob_buf, oob_size, 0);
1910 if (data_buf)
1911 data_buf += data_size;
1912 if (oob_buf)
1913 oob_buf += oob_size;
1916 ret = submit_descs(nandc);
1917 free_descs(nandc);
1919 if (ret) {
1920 dev_err(nandc->dev, "failure to read page/oob\n");
1921 return ret;
1924 return parse_read_errors(host, data_buf_start, oob_buf_start, page);
1928 * a helper that copies the last step/codeword of a page (containing free oob)
1929 * into our local buffer
1931 static int copy_last_cw(struct qcom_nand_host *host, int page)
1933 struct nand_chip *chip = &host->chip;
1934 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1935 struct nand_ecc_ctrl *ecc = &chip->ecc;
1936 int size;
1937 int ret;
1939 clear_read_regs(nandc);
1941 size = host->use_ecc ? host->cw_data : host->cw_size;
1943 /* prepare a clean read buffer */
1944 memset(nandc->data_buffer, 0xff, size);
1946 set_address(host, host->cw_size * (ecc->steps - 1), page);
1947 update_rw_regs(host, 1, true);
1949 config_nand_single_cw_page_read(nandc, host->use_ecc);
1951 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
1953 ret = submit_descs(nandc);
1954 if (ret)
1955 dev_err(nandc->dev, "failed to copy last codeword\n");
1957 free_descs(nandc);
1959 return ret;
1962 /* implements ecc->read_page() */
1963 static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf,
1964 int oob_required, int page)
1966 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1967 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1968 u8 *data_buf, *oob_buf = NULL;
1970 nand_read_page_op(chip, page, 0, NULL, 0);
1971 data_buf = buf;
1972 oob_buf = oob_required ? chip->oob_poi : NULL;
1974 clear_bam_transaction(nandc);
1976 return read_page_ecc(host, data_buf, oob_buf, page);
1979 /* implements ecc->read_page_raw() */
1980 static int qcom_nandc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
1981 int oob_required, int page)
1983 struct mtd_info *mtd = nand_to_mtd(chip);
1984 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1985 struct nand_ecc_ctrl *ecc = &chip->ecc;
1986 int cw, ret;
1987 u8 *data_buf = buf, *oob_buf = chip->oob_poi;
1989 for (cw = 0; cw < ecc->steps; cw++) {
1990 ret = qcom_nandc_read_cw_raw(mtd, chip, data_buf, oob_buf,
1991 page, cw);
1992 if (ret)
1993 return ret;
1995 data_buf += host->cw_data;
1996 oob_buf += ecc->bytes;
1999 return 0;
2002 /* implements ecc->read_oob() */
2003 static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
2005 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2006 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2007 struct nand_ecc_ctrl *ecc = &chip->ecc;
2009 clear_read_regs(nandc);
2010 clear_bam_transaction(nandc);
2012 host->use_ecc = true;
2013 set_address(host, 0, page);
2014 update_rw_regs(host, ecc->steps, true);
2016 return read_page_ecc(host, NULL, chip->oob_poi, page);
2019 /* implements ecc->write_page() */
2020 static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
2021 int oob_required, int page)
2023 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2024 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2025 struct nand_ecc_ctrl *ecc = &chip->ecc;
2026 u8 *data_buf, *oob_buf;
2027 int i, ret;
2029 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2031 clear_read_regs(nandc);
2032 clear_bam_transaction(nandc);
2034 data_buf = (u8 *)buf;
2035 oob_buf = chip->oob_poi;
2037 host->use_ecc = true;
2038 update_rw_regs(host, ecc->steps, false);
2039 config_nand_page_write(nandc);
2041 for (i = 0; i < ecc->steps; i++) {
2042 int data_size, oob_size;
2044 if (i == (ecc->steps - 1)) {
2045 data_size = ecc->size - ((ecc->steps - 1) << 2);
2046 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
2047 host->spare_bytes;
2048 } else {
2049 data_size = host->cw_data;
2050 oob_size = ecc->bytes;
2054 write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
2055 i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
2058 * when ECC is enabled, we don't really need to write anything
2059 * to oob for the first n - 1 codewords since these oob regions
2060 * just contain ECC bytes that's written by the controller
2061 * itself. For the last codeword, we skip the bbm positions and
2062 * write to the free oob area.
2064 if (i == (ecc->steps - 1)) {
2065 oob_buf += host->bbm_size;
2067 write_data_dma(nandc, FLASH_BUF_ACC + data_size,
2068 oob_buf, oob_size, 0);
2071 config_nand_cw_write(nandc);
2073 data_buf += data_size;
2074 oob_buf += oob_size;
2077 ret = submit_descs(nandc);
2078 if (ret)
2079 dev_err(nandc->dev, "failure to write page\n");
2081 free_descs(nandc);
2083 if (!ret)
2084 ret = nand_prog_page_end_op(chip);
2086 return ret;
2089 /* implements ecc->write_page_raw() */
2090 static int qcom_nandc_write_page_raw(struct nand_chip *chip,
2091 const uint8_t *buf, int oob_required,
2092 int page)
2094 struct mtd_info *mtd = nand_to_mtd(chip);
2095 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2096 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2097 struct nand_ecc_ctrl *ecc = &chip->ecc;
2098 u8 *data_buf, *oob_buf;
2099 int i, ret;
2101 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2102 clear_read_regs(nandc);
2103 clear_bam_transaction(nandc);
2105 data_buf = (u8 *)buf;
2106 oob_buf = chip->oob_poi;
2108 host->use_ecc = false;
2109 update_rw_regs(host, ecc->steps, false);
2110 config_nand_page_write(nandc);
2112 for (i = 0; i < ecc->steps; i++) {
2113 int data_size1, data_size2, oob_size1, oob_size2;
2114 int reg_off = FLASH_BUF_ACC;
2116 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
2117 oob_size1 = host->bbm_size;
2119 if (i == (ecc->steps - 1)) {
2120 data_size2 = ecc->size - data_size1 -
2121 ((ecc->steps - 1) << 2);
2122 oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
2123 host->spare_bytes;
2124 } else {
2125 data_size2 = host->cw_data - data_size1;
2126 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
2129 write_data_dma(nandc, reg_off, data_buf, data_size1,
2130 NAND_BAM_NO_EOT);
2131 reg_off += data_size1;
2132 data_buf += data_size1;
2134 write_data_dma(nandc, reg_off, oob_buf, oob_size1,
2135 NAND_BAM_NO_EOT);
2136 reg_off += oob_size1;
2137 oob_buf += oob_size1;
2139 write_data_dma(nandc, reg_off, data_buf, data_size2,
2140 NAND_BAM_NO_EOT);
2141 reg_off += data_size2;
2142 data_buf += data_size2;
2144 write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
2145 oob_buf += oob_size2;
2147 config_nand_cw_write(nandc);
2150 ret = submit_descs(nandc);
2151 if (ret)
2152 dev_err(nandc->dev, "failure to write raw page\n");
2154 free_descs(nandc);
2156 if (!ret)
2157 ret = nand_prog_page_end_op(chip);
2159 return ret;
2163 * implements ecc->write_oob()
2165 * the NAND controller cannot write only data or only OOB within a codeword
2166 * since ECC is calculated for the combined codeword. So update the OOB from
2167 * chip->oob_poi, and pad the data area with OxFF before writing.
2169 static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
2171 struct mtd_info *mtd = nand_to_mtd(chip);
2172 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2173 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2174 struct nand_ecc_ctrl *ecc = &chip->ecc;
2175 u8 *oob = chip->oob_poi;
2176 int data_size, oob_size;
2177 int ret;
2179 host->use_ecc = true;
2180 clear_bam_transaction(nandc);
2182 /* calculate the data and oob size for the last codeword/step */
2183 data_size = ecc->size - ((ecc->steps - 1) << 2);
2184 oob_size = mtd->oobavail;
2186 memset(nandc->data_buffer, 0xff, host->cw_data);
2187 /* override new oob content to last codeword */
2188 mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
2189 0, mtd->oobavail);
2191 set_address(host, host->cw_size * (ecc->steps - 1), page);
2192 update_rw_regs(host, 1, false);
2194 config_nand_page_write(nandc);
2195 write_data_dma(nandc, FLASH_BUF_ACC,
2196 nandc->data_buffer, data_size + oob_size, 0);
2197 config_nand_cw_write(nandc);
2199 ret = submit_descs(nandc);
2201 free_descs(nandc);
2203 if (ret) {
2204 dev_err(nandc->dev, "failure to write oob\n");
2205 return -EIO;
2208 return nand_prog_page_end_op(chip);
2211 static int qcom_nandc_block_bad(struct nand_chip *chip, loff_t ofs)
2213 struct mtd_info *mtd = nand_to_mtd(chip);
2214 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2215 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2216 struct nand_ecc_ctrl *ecc = &chip->ecc;
2217 int page, ret, bbpos, bad = 0;
2219 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2222 * configure registers for a raw sub page read, the address is set to
2223 * the beginning of the last codeword, we don't care about reading ecc
2224 * portion of oob. we just want the first few bytes from this codeword
2225 * that contains the BBM
2227 host->use_ecc = false;
2229 clear_bam_transaction(nandc);
2230 ret = copy_last_cw(host, page);
2231 if (ret)
2232 goto err;
2234 if (check_flash_errors(host, 1)) {
2235 dev_warn(nandc->dev, "error when trying to read BBM\n");
2236 goto err;
2239 bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
2241 bad = nandc->data_buffer[bbpos] != 0xff;
2243 if (chip->options & NAND_BUSWIDTH_16)
2244 bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
2245 err:
2246 return bad;
2249 static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
2251 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2252 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2253 struct nand_ecc_ctrl *ecc = &chip->ecc;
2254 int page, ret;
2256 clear_read_regs(nandc);
2257 clear_bam_transaction(nandc);
2260 * to mark the BBM as bad, we flash the entire last codeword with 0s.
2261 * we don't care about the rest of the content in the codeword since
2262 * we aren't going to use this block again
2264 memset(nandc->data_buffer, 0x00, host->cw_size);
2266 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2268 /* prepare write */
2269 host->use_ecc = false;
2270 set_address(host, host->cw_size * (ecc->steps - 1), page);
2271 update_rw_regs(host, 1, false);
2273 config_nand_page_write(nandc);
2274 write_data_dma(nandc, FLASH_BUF_ACC,
2275 nandc->data_buffer, host->cw_size, 0);
2276 config_nand_cw_write(nandc);
2278 ret = submit_descs(nandc);
2280 free_descs(nandc);
2282 if (ret) {
2283 dev_err(nandc->dev, "failure to update BBM\n");
2284 return -EIO;
2287 return nand_prog_page_end_op(chip);
2291 * the three functions below implement chip->legacy.read_byte(),
2292 * chip->legacy.read_buf() and chip->legacy.write_buf() respectively. these
2293 * aren't used for reading/writing page data, they are used for smaller data
2294 * like reading id, status etc
2296 static uint8_t qcom_nandc_read_byte(struct nand_chip *chip)
2298 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2299 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2300 u8 *buf = nandc->data_buffer;
2301 u8 ret = 0x0;
2303 if (host->last_command == NAND_CMD_STATUS) {
2304 ret = host->status;
2306 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2308 return ret;
2311 if (nandc->buf_start < nandc->buf_count)
2312 ret = buf[nandc->buf_start++];
2314 return ret;
2317 static void qcom_nandc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
2319 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2320 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2322 memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
2323 nandc->buf_start += real_len;
2326 static void qcom_nandc_write_buf(struct nand_chip *chip, const uint8_t *buf,
2327 int len)
2329 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2330 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2332 memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
2334 nandc->buf_start += real_len;
2337 /* we support only one external chip for now */
2338 static void qcom_nandc_select_chip(struct nand_chip *chip, int chipnr)
2340 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2342 if (chipnr <= 0)
2343 return;
2345 dev_warn(nandc->dev, "invalid chip select\n");
2349 * NAND controller page layout info
2351 * Layout with ECC enabled:
2353 * |----------------------| |---------------------------------|
2354 * | xx.......yy| | *********xx.......yy|
2355 * | DATA xx..ECC..yy| | DATA **SPARE**xx..ECC..yy|
2356 * | (516) xx.......yy| | (516-n*4) **(n*4)**xx.......yy|
2357 * | xx.......yy| | *********xx.......yy|
2358 * |----------------------| |---------------------------------|
2359 * codeword 1,2..n-1 codeword n
2360 * <---(528/532 Bytes)--> <-------(528/532 Bytes)--------->
2362 * n = Number of codewords in the page
2363 * . = ECC bytes
2364 * * = Spare/free bytes
2365 * x = Unused byte(s)
2366 * y = Reserved byte(s)
2368 * 2K page: n = 4, spare = 16 bytes
2369 * 4K page: n = 8, spare = 32 bytes
2370 * 8K page: n = 16, spare = 64 bytes
2372 * the qcom nand controller operates at a sub page/codeword level. each
2373 * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
2374 * the number of ECC bytes vary based on the ECC strength and the bus width.
2376 * the first n - 1 codewords contains 516 bytes of user data, the remaining
2377 * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
2378 * both user data and spare(oobavail) bytes that sum up to 516 bytes.
2380 * When we access a page with ECC enabled, the reserved bytes(s) are not
2381 * accessible at all. When reading, we fill up these unreadable positions
2382 * with 0xffs. When writing, the controller skips writing the inaccessible
2383 * bytes.
2385 * Layout with ECC disabled:
2387 * |------------------------------| |---------------------------------------|
2388 * | yy xx.......| | bb *********xx.......|
2389 * | DATA1 yy DATA2 xx..ECC..| | DATA1 bb DATA2 **SPARE**xx..ECC..|
2390 * | (size1) yy (size2) xx.......| | (size1) bb (size2) **(n*4)**xx.......|
2391 * | yy xx.......| | bb *********xx.......|
2392 * |------------------------------| |---------------------------------------|
2393 * codeword 1,2..n-1 codeword n
2394 * <-------(528/532 Bytes)------> <-----------(528/532 Bytes)----------->
2396 * n = Number of codewords in the page
2397 * . = ECC bytes
2398 * * = Spare/free bytes
2399 * x = Unused byte(s)
2400 * y = Dummy Bad Bock byte(s)
2401 * b = Real Bad Block byte(s)
2402 * size1/size2 = function of codeword size and 'n'
2404 * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
2405 * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
2406 * Block Markers. In the last codeword, this position contains the real BBM
2408 * In order to have a consistent layout between RAW and ECC modes, we assume
2409 * the following OOB layout arrangement:
2411 * |-----------| |--------------------|
2412 * |yyxx.......| |bb*********xx.......|
2413 * |yyxx..ECC..| |bb*FREEOOB*xx..ECC..|
2414 * |yyxx.......| |bb*********xx.......|
2415 * |yyxx.......| |bb*********xx.......|
2416 * |-----------| |--------------------|
2417 * first n - 1 nth OOB region
2418 * OOB regions
2420 * n = Number of codewords in the page
2421 * . = ECC bytes
2422 * * = FREE OOB bytes
2423 * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
2424 * x = Unused byte(s)
2425 * b = Real bad block byte(s) (inaccessible when ECC enabled)
2427 * This layout is read as is when ECC is disabled. When ECC is enabled, the
2428 * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
2429 * and assumed as 0xffs when we read a page/oob. The ECC, unused and
2430 * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
2431 * the sum of the three).
2433 static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2434 struct mtd_oob_region *oobregion)
2436 struct nand_chip *chip = mtd_to_nand(mtd);
2437 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2438 struct nand_ecc_ctrl *ecc = &chip->ecc;
2440 if (section > 1)
2441 return -ERANGE;
2443 if (!section) {
2444 oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
2445 host->bbm_size;
2446 oobregion->offset = 0;
2447 } else {
2448 oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
2449 oobregion->offset = mtd->oobsize - oobregion->length;
2452 return 0;
2455 static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
2456 struct mtd_oob_region *oobregion)
2458 struct nand_chip *chip = mtd_to_nand(mtd);
2459 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2460 struct nand_ecc_ctrl *ecc = &chip->ecc;
2462 if (section)
2463 return -ERANGE;
2465 oobregion->length = ecc->steps * 4;
2466 oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
2468 return 0;
2471 static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
2472 .ecc = qcom_nand_ooblayout_ecc,
2473 .free = qcom_nand_ooblayout_free,
2476 static int
2477 qcom_nandc_calc_ecc_bytes(int step_size, int strength)
2479 return strength == 4 ? 12 : 16;
2481 NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps, qcom_nandc_calc_ecc_bytes,
2482 NANDC_STEP_SIZE, 4, 8);
2484 static int qcom_nand_attach_chip(struct nand_chip *chip)
2486 struct mtd_info *mtd = nand_to_mtd(chip);
2487 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2488 struct nand_ecc_ctrl *ecc = &chip->ecc;
2489 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2490 int cwperpage, bad_block_byte, ret;
2491 bool wide_bus;
2492 int ecc_mode = 1;
2494 /* controller only supports 512 bytes data steps */
2495 ecc->size = NANDC_STEP_SIZE;
2496 wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
2497 cwperpage = mtd->writesize / NANDC_STEP_SIZE;
2500 * Each CW has 4 available OOB bytes which will be protected with ECC
2501 * so remaining bytes can be used for ECC.
2503 ret = nand_ecc_choose_conf(chip, &qcom_nandc_ecc_caps,
2504 mtd->oobsize - (cwperpage * 4));
2505 if (ret) {
2506 dev_err(nandc->dev, "No valid ECC settings possible\n");
2507 return ret;
2510 if (ecc->strength >= 8) {
2511 /* 8 bit ECC defaults to BCH ECC on all platforms */
2512 host->bch_enabled = true;
2513 ecc_mode = 1;
2515 if (wide_bus) {
2516 host->ecc_bytes_hw = 14;
2517 host->spare_bytes = 0;
2518 host->bbm_size = 2;
2519 } else {
2520 host->ecc_bytes_hw = 13;
2521 host->spare_bytes = 2;
2522 host->bbm_size = 1;
2524 } else {
2526 * if the controller supports BCH for 4 bit ECC, the controller
2527 * uses lesser bytes for ECC. If RS is used, the ECC bytes is
2528 * always 10 bytes
2530 if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
2531 /* BCH */
2532 host->bch_enabled = true;
2533 ecc_mode = 0;
2535 if (wide_bus) {
2536 host->ecc_bytes_hw = 8;
2537 host->spare_bytes = 2;
2538 host->bbm_size = 2;
2539 } else {
2540 host->ecc_bytes_hw = 7;
2541 host->spare_bytes = 4;
2542 host->bbm_size = 1;
2544 } else {
2545 /* RS */
2546 host->ecc_bytes_hw = 10;
2548 if (wide_bus) {
2549 host->spare_bytes = 0;
2550 host->bbm_size = 2;
2551 } else {
2552 host->spare_bytes = 1;
2553 host->bbm_size = 1;
2559 * we consider ecc->bytes as the sum of all the non-data content in a
2560 * step. It gives us a clean representation of the oob area (even if
2561 * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
2562 * ECC and 12 bytes for 4 bit ECC
2564 ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
2566 ecc->read_page = qcom_nandc_read_page;
2567 ecc->read_page_raw = qcom_nandc_read_page_raw;
2568 ecc->read_oob = qcom_nandc_read_oob;
2569 ecc->write_page = qcom_nandc_write_page;
2570 ecc->write_page_raw = qcom_nandc_write_page_raw;
2571 ecc->write_oob = qcom_nandc_write_oob;
2573 ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2575 mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
2577 nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
2578 cwperpage);
2581 * DATA_UD_BYTES varies based on whether the read/write command protects
2582 * spare data with ECC too. We protect spare data by default, so we set
2583 * it to main + spare data, which are 512 and 4 bytes respectively.
2585 host->cw_data = 516;
2588 * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
2589 * for 8 bit ECC
2591 host->cw_size = host->cw_data + ecc->bytes;
2592 bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
2594 host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
2595 | host->cw_data << UD_SIZE_BYTES
2596 | 0 << DISABLE_STATUS_AFTER_WRITE
2597 | 5 << NUM_ADDR_CYCLES
2598 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
2599 | 0 << STATUS_BFR_READ
2600 | 1 << SET_RD_MODE_AFTER_STATUS
2601 | host->spare_bytes << SPARE_SIZE_BYTES;
2603 host->cfg1 = 7 << NAND_RECOVERY_CYCLES
2604 | 0 << CS_ACTIVE_BSY
2605 | bad_block_byte << BAD_BLOCK_BYTE_NUM
2606 | 0 << BAD_BLOCK_IN_SPARE_AREA
2607 | 2 << WR_RD_BSY_GAP
2608 | wide_bus << WIDE_FLASH
2609 | host->bch_enabled << ENABLE_BCH_ECC;
2611 host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
2612 | host->cw_size << UD_SIZE_BYTES
2613 | 5 << NUM_ADDR_CYCLES
2614 | 0 << SPARE_SIZE_BYTES;
2616 host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
2617 | 0 << CS_ACTIVE_BSY
2618 | 17 << BAD_BLOCK_BYTE_NUM
2619 | 1 << BAD_BLOCK_IN_SPARE_AREA
2620 | 2 << WR_RD_BSY_GAP
2621 | wide_bus << WIDE_FLASH
2622 | 1 << DEV0_CFG1_ECC_DISABLE;
2624 host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
2625 | 0 << ECC_SW_RESET
2626 | host->cw_data << ECC_NUM_DATA_BYTES
2627 | 1 << ECC_FORCE_CLK_OPEN
2628 | ecc_mode << ECC_MODE
2629 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
2631 host->ecc_buf_cfg = 0x203 << NUM_STEPS;
2633 host->clrflashstatus = FS_READY_BSY_N;
2634 host->clrreadstatus = 0xc0;
2635 nandc->regs->erased_cw_detect_cfg_clr =
2636 cpu_to_le32(CLR_ERASED_PAGE_DET);
2637 nandc->regs->erased_cw_detect_cfg_set =
2638 cpu_to_le32(SET_ERASED_PAGE_DET);
2640 dev_dbg(nandc->dev,
2641 "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
2642 host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
2643 host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
2644 cwperpage);
2646 return 0;
2649 static const struct nand_controller_ops qcom_nandc_ops = {
2650 .attach_chip = qcom_nand_attach_chip,
2653 static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
2655 if (nandc->props->is_bam) {
2656 if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
2657 dma_unmap_single(nandc->dev, nandc->reg_read_dma,
2658 MAX_REG_RD *
2659 sizeof(*nandc->reg_read_buf),
2660 DMA_FROM_DEVICE);
2662 if (nandc->tx_chan)
2663 dma_release_channel(nandc->tx_chan);
2665 if (nandc->rx_chan)
2666 dma_release_channel(nandc->rx_chan);
2668 if (nandc->cmd_chan)
2669 dma_release_channel(nandc->cmd_chan);
2670 } else {
2671 if (nandc->chan)
2672 dma_release_channel(nandc->chan);
2676 static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
2678 int ret;
2680 ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
2681 if (ret) {
2682 dev_err(nandc->dev, "failed to set DMA mask\n");
2683 return ret;
2687 * we use the internal buffer for reading ONFI params, reading small
2688 * data like ID and status, and preforming read-copy-write operations
2689 * when writing to a codeword partially. 532 is the maximum possible
2690 * size of a codeword for our nand controller
2692 nandc->buf_size = 532;
2694 nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
2695 GFP_KERNEL);
2696 if (!nandc->data_buffer)
2697 return -ENOMEM;
2699 nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
2700 GFP_KERNEL);
2701 if (!nandc->regs)
2702 return -ENOMEM;
2704 nandc->reg_read_buf = devm_kcalloc(nandc->dev,
2705 MAX_REG_RD, sizeof(*nandc->reg_read_buf),
2706 GFP_KERNEL);
2707 if (!nandc->reg_read_buf)
2708 return -ENOMEM;
2710 if (nandc->props->is_bam) {
2711 nandc->reg_read_dma =
2712 dma_map_single(nandc->dev, nandc->reg_read_buf,
2713 MAX_REG_RD *
2714 sizeof(*nandc->reg_read_buf),
2715 DMA_FROM_DEVICE);
2716 if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
2717 dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
2718 return -EIO;
2721 nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
2722 if (IS_ERR(nandc->tx_chan)) {
2723 ret = PTR_ERR(nandc->tx_chan);
2724 nandc->tx_chan = NULL;
2725 dev_err_probe(nandc->dev, ret,
2726 "tx DMA channel request failed\n");
2727 goto unalloc;
2730 nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
2731 if (IS_ERR(nandc->rx_chan)) {
2732 ret = PTR_ERR(nandc->rx_chan);
2733 nandc->rx_chan = NULL;
2734 dev_err_probe(nandc->dev, ret,
2735 "rx DMA channel request failed\n");
2736 goto unalloc;
2739 nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
2740 if (IS_ERR(nandc->cmd_chan)) {
2741 ret = PTR_ERR(nandc->cmd_chan);
2742 nandc->cmd_chan = NULL;
2743 dev_err_probe(nandc->dev, ret,
2744 "cmd DMA channel request failed\n");
2745 goto unalloc;
2749 * Initially allocate BAM transaction to read ONFI param page.
2750 * After detecting all the devices, this BAM transaction will
2751 * be freed and the next BAM tranasction will be allocated with
2752 * maximum codeword size
2754 nandc->max_cwperpage = 1;
2755 nandc->bam_txn = alloc_bam_transaction(nandc);
2756 if (!nandc->bam_txn) {
2757 dev_err(nandc->dev,
2758 "failed to allocate bam transaction\n");
2759 ret = -ENOMEM;
2760 goto unalloc;
2762 } else {
2763 nandc->chan = dma_request_chan(nandc->dev, "rxtx");
2764 if (IS_ERR(nandc->chan)) {
2765 ret = PTR_ERR(nandc->chan);
2766 nandc->chan = NULL;
2767 dev_err_probe(nandc->dev, ret,
2768 "rxtx DMA channel request failed\n");
2769 return ret;
2773 INIT_LIST_HEAD(&nandc->desc_list);
2774 INIT_LIST_HEAD(&nandc->host_list);
2776 nand_controller_init(&nandc->controller);
2777 nandc->controller.ops = &qcom_nandc_ops;
2779 return 0;
2780 unalloc:
2781 qcom_nandc_unalloc(nandc);
2782 return ret;
2785 /* one time setup of a few nand controller registers */
2786 static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
2788 u32 nand_ctrl;
2790 /* kill onenand */
2791 if (!nandc->props->is_qpic)
2792 nandc_write(nandc, SFLASHC_BURST_CFG, 0);
2794 if (!nandc->props->qpic_v2)
2795 nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
2796 NAND_DEV_CMD_VLD_VAL);
2798 /* enable ADM or BAM DMA */
2799 if (nandc->props->is_bam) {
2800 nand_ctrl = nandc_read(nandc, NAND_CTRL);
2803 *NAND_CTRL is an operational registers, and CPU
2804 * access to operational registers are read only
2805 * in BAM mode. So update the NAND_CTRL register
2806 * only if it is not in BAM mode. In most cases BAM
2807 * mode will be enabled in bootloader
2809 if (!(nand_ctrl & BAM_MODE_EN))
2810 nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
2811 } else {
2812 nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
2815 /* save the original values of these registers */
2816 if (!nandc->props->qpic_v2) {
2817 nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
2818 nandc->vld = NAND_DEV_CMD_VLD_VAL;
2821 return 0;
2824 static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
2825 struct qcom_nand_host *host,
2826 struct device_node *dn)
2828 struct nand_chip *chip = &host->chip;
2829 struct mtd_info *mtd = nand_to_mtd(chip);
2830 struct device *dev = nandc->dev;
2831 int ret;
2833 ret = of_property_read_u32(dn, "reg", &host->cs);
2834 if (ret) {
2835 dev_err(dev, "can't get chip-select\n");
2836 return -ENXIO;
2839 nand_set_flash_node(chip, dn);
2840 mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
2841 if (!mtd->name)
2842 return -ENOMEM;
2844 mtd->owner = THIS_MODULE;
2845 mtd->dev.parent = dev;
2847 chip->legacy.cmdfunc = qcom_nandc_command;
2848 chip->legacy.select_chip = qcom_nandc_select_chip;
2849 chip->legacy.read_byte = qcom_nandc_read_byte;
2850 chip->legacy.read_buf = qcom_nandc_read_buf;
2851 chip->legacy.write_buf = qcom_nandc_write_buf;
2852 chip->legacy.set_features = nand_get_set_features_notsupp;
2853 chip->legacy.get_features = nand_get_set_features_notsupp;
2856 * the bad block marker is readable only when we read the last codeword
2857 * of a page with ECC disabled. currently, the nand_base and nand_bbt
2858 * helpers don't allow us to read BB from a nand chip with ECC
2859 * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
2860 * and block_markbad helpers until we permanently switch to using
2861 * MTD_OPS_RAW for all drivers (with the help of badblockbits)
2863 chip->legacy.block_bad = qcom_nandc_block_bad;
2864 chip->legacy.block_markbad = qcom_nandc_block_markbad;
2866 chip->controller = &nandc->controller;
2867 chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
2868 NAND_SKIP_BBTSCAN;
2870 /* set up initial status value */
2871 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2873 ret = nand_scan(chip, 1);
2874 if (ret)
2875 return ret;
2877 if (nandc->props->is_bam) {
2878 free_bam_transaction(nandc);
2879 nandc->bam_txn = alloc_bam_transaction(nandc);
2880 if (!nandc->bam_txn) {
2881 dev_err(nandc->dev,
2882 "failed to allocate bam transaction\n");
2883 return -ENOMEM;
2887 ret = mtd_device_register(mtd, NULL, 0);
2888 if (ret)
2889 nand_cleanup(chip);
2891 return ret;
2894 static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
2896 struct device *dev = nandc->dev;
2897 struct device_node *dn = dev->of_node, *child;
2898 struct qcom_nand_host *host;
2899 int ret;
2901 for_each_available_child_of_node(dn, child) {
2902 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2903 if (!host) {
2904 of_node_put(child);
2905 return -ENOMEM;
2908 ret = qcom_nand_host_init_and_register(nandc, host, child);
2909 if (ret) {
2910 devm_kfree(dev, host);
2911 continue;
2914 list_add_tail(&host->node, &nandc->host_list);
2917 if (list_empty(&nandc->host_list))
2918 return -ENODEV;
2920 return 0;
2923 /* parse custom DT properties here */
2924 static int qcom_nandc_parse_dt(struct platform_device *pdev)
2926 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2927 struct device_node *np = nandc->dev->of_node;
2928 int ret;
2930 if (!nandc->props->is_bam) {
2931 ret = of_property_read_u32(np, "qcom,cmd-crci",
2932 &nandc->cmd_crci);
2933 if (ret) {
2934 dev_err(nandc->dev, "command CRCI unspecified\n");
2935 return ret;
2938 ret = of_property_read_u32(np, "qcom,data-crci",
2939 &nandc->data_crci);
2940 if (ret) {
2941 dev_err(nandc->dev, "data CRCI unspecified\n");
2942 return ret;
2946 return 0;
2949 static int qcom_nandc_probe(struct platform_device *pdev)
2951 struct qcom_nand_controller *nandc;
2952 const void *dev_data;
2953 struct device *dev = &pdev->dev;
2954 struct resource *res;
2955 int ret;
2957 nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
2958 if (!nandc)
2959 return -ENOMEM;
2961 platform_set_drvdata(pdev, nandc);
2962 nandc->dev = dev;
2964 dev_data = of_device_get_match_data(dev);
2965 if (!dev_data) {
2966 dev_err(&pdev->dev, "failed to get device data\n");
2967 return -ENODEV;
2970 nandc->props = dev_data;
2972 nandc->core_clk = devm_clk_get(dev, "core");
2973 if (IS_ERR(nandc->core_clk))
2974 return PTR_ERR(nandc->core_clk);
2976 nandc->aon_clk = devm_clk_get(dev, "aon");
2977 if (IS_ERR(nandc->aon_clk))
2978 return PTR_ERR(nandc->aon_clk);
2980 ret = qcom_nandc_parse_dt(pdev);
2981 if (ret)
2982 return ret;
2984 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2985 nandc->base = devm_ioremap_resource(dev, res);
2986 if (IS_ERR(nandc->base))
2987 return PTR_ERR(nandc->base);
2989 nandc->base_phys = res->start;
2990 nandc->base_dma = dma_map_resource(dev, res->start,
2991 resource_size(res),
2992 DMA_BIDIRECTIONAL, 0);
2993 if (!nandc->base_dma)
2994 return -ENXIO;
2996 ret = qcom_nandc_alloc(nandc);
2997 if (ret)
2998 goto err_nandc_alloc;
3000 ret = clk_prepare_enable(nandc->core_clk);
3001 if (ret)
3002 goto err_core_clk;
3004 ret = clk_prepare_enable(nandc->aon_clk);
3005 if (ret)
3006 goto err_aon_clk;
3008 ret = qcom_nandc_setup(nandc);
3009 if (ret)
3010 goto err_setup;
3012 ret = qcom_probe_nand_devices(nandc);
3013 if (ret)
3014 goto err_setup;
3016 return 0;
3018 err_setup:
3019 clk_disable_unprepare(nandc->aon_clk);
3020 err_aon_clk:
3021 clk_disable_unprepare(nandc->core_clk);
3022 err_core_clk:
3023 qcom_nandc_unalloc(nandc);
3024 err_nandc_alloc:
3025 dma_unmap_resource(dev, res->start, resource_size(res),
3026 DMA_BIDIRECTIONAL, 0);
3028 return ret;
3031 static int qcom_nandc_remove(struct platform_device *pdev)
3033 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
3034 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3035 struct qcom_nand_host *host;
3036 struct nand_chip *chip;
3037 int ret;
3039 list_for_each_entry(host, &nandc->host_list, node) {
3040 chip = &host->chip;
3041 ret = mtd_device_unregister(nand_to_mtd(chip));
3042 WARN_ON(ret);
3043 nand_cleanup(chip);
3046 qcom_nandc_unalloc(nandc);
3048 clk_disable_unprepare(nandc->aon_clk);
3049 clk_disable_unprepare(nandc->core_clk);
3051 dma_unmap_resource(&pdev->dev, nandc->base_dma, resource_size(res),
3052 DMA_BIDIRECTIONAL, 0);
3054 return 0;
3057 static const struct qcom_nandc_props ipq806x_nandc_props = {
3058 .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
3059 .is_bam = false,
3060 .dev_cmd_reg_start = 0x0,
3063 static const struct qcom_nandc_props ipq4019_nandc_props = {
3064 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3065 .is_bam = true,
3066 .is_qpic = true,
3067 .dev_cmd_reg_start = 0x0,
3070 static const struct qcom_nandc_props ipq8074_nandc_props = {
3071 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3072 .is_bam = true,
3073 .is_qpic = true,
3074 .dev_cmd_reg_start = 0x7000,
3077 static const struct qcom_nandc_props sdx55_nandc_props = {
3078 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3079 .is_bam = true,
3080 .is_qpic = true,
3081 .qpic_v2 = true,
3082 .dev_cmd_reg_start = 0x7000,
3086 * data will hold a struct pointer containing more differences once we support
3087 * more controller variants
3089 static const struct of_device_id qcom_nandc_of_match[] = {
3091 .compatible = "qcom,ipq806x-nand",
3092 .data = &ipq806x_nandc_props,
3095 .compatible = "qcom,ipq4019-nand",
3096 .data = &ipq4019_nandc_props,
3099 .compatible = "qcom,ipq6018-nand",
3100 .data = &ipq8074_nandc_props,
3103 .compatible = "qcom,ipq8074-nand",
3104 .data = &ipq8074_nandc_props,
3107 .compatible = "qcom,sdx55-nand",
3108 .data = &sdx55_nandc_props,
3112 MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
3114 static struct platform_driver qcom_nandc_driver = {
3115 .driver = {
3116 .name = "qcom-nandc",
3117 .of_match_table = qcom_nandc_of_match,
3119 .probe = qcom_nandc_probe,
3120 .remove = qcom_nandc_remove,
3122 module_platform_driver(qcom_nandc_driver);
3124 MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
3125 MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
3126 MODULE_LICENSE("GPL v2");