1 // SPDX-License-Identifier: GPL-2.0+
3 * Cadence NAND flash controller driver
5 * Copyright (C) 2019 Cadence
7 * Author: Piotr Sroka <piotrs@cadence.com>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/rawnand.h>
18 #include <linux/of_device.h>
19 #include <linux/iopoll.h>
20 #include <linux/slab.h>
23 * HPNFC can work in 3 modes:
24 * - PIO - can work in master or slave DMA
25 * - CDMA - needs Master DMA for accessing command descriptors.
26 * - Generic mode - can use only slave DMA.
27 * CDMA and PIO modes can be used to execute only base commands.
28 * Generic mode can be used to execute any command
29 * on NAND flash memory. Driver uses CDMA mode for
30 * block erasing, page reading, page programing.
31 * Generic mode is used for executing rest of commands.
34 #define MAX_ADDRESS_CYC 6
35 #define MAX_ERASE_ADDRESS_CYC 3
36 #define MAX_DATA_SIZE 0xFFFC
37 #define DMA_DATA_SIZE_ALIGN 8
39 /* Register definition. */
42 * Writing data to this register will initiate a new transaction
43 * of the NF controller.
45 #define CMD_REG0 0x0000
46 /* Command type field mask. */
47 #define CMD_REG0_CT GENMASK(31, 30)
48 /* Command type CDMA. */
49 #define CMD_REG0_CT_CDMA 0uL
50 /* Command type generic. */
51 #define CMD_REG0_CT_GEN 3uL
52 /* Command thread number field mask. */
53 #define CMD_REG0_TN GENMASK(27, 24)
55 /* Command register 2. */
56 #define CMD_REG2 0x0008
57 /* Command register 3. */
58 #define CMD_REG3 0x000C
59 /* Pointer register to select which thread status will be selected. */
60 #define CMD_STATUS_PTR 0x0010
61 /* Command status register for selected thread. */
62 #define CMD_STATUS 0x0014
64 /* Interrupt status register. */
65 #define INTR_STATUS 0x0110
66 #define INTR_STATUS_SDMA_ERR BIT(22)
67 #define INTR_STATUS_SDMA_TRIGG BIT(21)
68 #define INTR_STATUS_UNSUPP_CMD BIT(19)
69 #define INTR_STATUS_DDMA_TERR BIT(18)
70 #define INTR_STATUS_CDMA_TERR BIT(17)
71 #define INTR_STATUS_CDMA_IDL BIT(16)
73 /* Interrupt enable register. */
74 #define INTR_ENABLE 0x0114
75 #define INTR_ENABLE_INTR_EN BIT(31)
76 #define INTR_ENABLE_SDMA_ERR_EN BIT(22)
77 #define INTR_ENABLE_SDMA_TRIGG_EN BIT(21)
78 #define INTR_ENABLE_UNSUPP_CMD_EN BIT(19)
79 #define INTR_ENABLE_DDMA_TERR_EN BIT(18)
80 #define INTR_ENABLE_CDMA_TERR_EN BIT(17)
81 #define INTR_ENABLE_CDMA_IDLE_EN BIT(16)
83 /* Controller internal state. */
84 #define CTRL_STATUS 0x0118
85 #define CTRL_STATUS_INIT_COMP BIT(9)
86 #define CTRL_STATUS_CTRL_BUSY BIT(8)
88 /* Command Engine threads state. */
89 #define TRD_STATUS 0x0120
91 /* Command Engine interrupt thread error status. */
92 #define TRD_ERR_INT_STATUS 0x0128
93 /* Command Engine interrupt thread error enable. */
94 #define TRD_ERR_INT_STATUS_EN 0x0130
95 /* Command Engine interrupt thread complete status. */
96 #define TRD_COMP_INT_STATUS 0x0138
99 * Transfer config 0 register.
100 * Configures data transfer parameters.
102 #define TRAN_CFG_0 0x0400
103 /* Offset value from the beginning of the page. */
104 #define TRAN_CFG_0_OFFSET GENMASK(31, 16)
105 /* Numbers of sectors to transfer within singlNF device's page. */
106 #define TRAN_CFG_0_SEC_CNT GENMASK(7, 0)
109 * Transfer config 1 register.
110 * Configures data transfer parameters.
112 #define TRAN_CFG_1 0x0404
113 /* Size of last data sector. */
114 #define TRAN_CFG_1_LAST_SEC_SIZE GENMASK(31, 16)
115 /* Size of not-last data sector. */
116 #define TRAN_CFG_1_SECTOR_SIZE GENMASK(15, 0)
118 /* ECC engine configuration register 0. */
119 #define ECC_CONFIG_0 0x0428
120 /* Correction strength. */
121 #define ECC_CONFIG_0_CORR_STR GENMASK(10, 8)
122 /* Enable erased pages detection mechanism. */
123 #define ECC_CONFIG_0_ERASE_DET_EN BIT(1)
124 /* Enable controller ECC check bits generation and correction. */
125 #define ECC_CONFIG_0_ECC_EN BIT(0)
127 /* ECC engine configuration register 1. */
128 #define ECC_CONFIG_1 0x042C
130 /* Multiplane settings register. */
131 #define MULTIPLANE_CFG 0x0434
132 /* Cache operation settings. */
133 #define CACHE_CFG 0x0438
135 /* DMA settings register. */
136 #define DMA_SETINGS 0x043C
137 /* Enable SDMA error report on access unprepared slave DMA interface. */
138 #define DMA_SETINGS_SDMA_ERR_RSP BIT(17)
140 /* Transferred data block size for the slave DMA module. */
141 #define SDMA_SIZE 0x0440
143 /* Thread number associated with transferred data block
144 * for the slave DMA module.
146 #define SDMA_TRD_NUM 0x0444
147 /* Thread number mask. */
148 #define SDMA_TRD_NUM_SDMA_TRD GENMASK(2, 0)
150 #define CONTROL_DATA_CTRL 0x0494
151 /* Thread number mask. */
152 #define CONTROL_DATA_CTRL_SIZE GENMASK(15, 0)
154 #define CTRL_VERSION 0x800
155 #define CTRL_VERSION_REV GENMASK(7, 0)
157 /* Available hardware features of the controller. */
158 #define CTRL_FEATURES 0x804
159 /* Support for NV-DDR2/3 work mode. */
160 #define CTRL_FEATURES_NVDDR_2_3 BIT(28)
161 /* Support for NV-DDR work mode. */
162 #define CTRL_FEATURES_NVDDR BIT(27)
163 /* Support for asynchronous work mode. */
164 #define CTRL_FEATURES_ASYNC BIT(26)
165 /* Support for asynchronous work mode. */
166 #define CTRL_FEATURES_N_BANKS GENMASK(25, 24)
167 /* Slave and Master DMA data width. */
168 #define CTRL_FEATURES_DMA_DWITH64 BIT(21)
169 /* Availability of Control Data feature.*/
170 #define CTRL_FEATURES_CONTROL_DATA BIT(10)
172 /* BCH Engine identification register 0 - correction strengths. */
173 #define BCH_CFG_0 0x838
174 #define BCH_CFG_0_CORR_CAP_0 GENMASK(7, 0)
175 #define BCH_CFG_0_CORR_CAP_1 GENMASK(15, 8)
176 #define BCH_CFG_0_CORR_CAP_2 GENMASK(23, 16)
177 #define BCH_CFG_0_CORR_CAP_3 GENMASK(31, 24)
179 /* BCH Engine identification register 1 - correction strengths. */
180 #define BCH_CFG_1 0x83C
181 #define BCH_CFG_1_CORR_CAP_4 GENMASK(7, 0)
182 #define BCH_CFG_1_CORR_CAP_5 GENMASK(15, 8)
183 #define BCH_CFG_1_CORR_CAP_6 GENMASK(23, 16)
184 #define BCH_CFG_1_CORR_CAP_7 GENMASK(31, 24)
186 /* BCH Engine identification register 2 - sector sizes. */
187 #define BCH_CFG_2 0x840
188 #define BCH_CFG_2_SECT_0 GENMASK(15, 0)
189 #define BCH_CFG_2_SECT_1 GENMASK(31, 16)
191 /* BCH Engine identification register 3. */
192 #define BCH_CFG_3 0x844
193 #define BCH_CFG_3_METADATA_SIZE GENMASK(23, 16)
195 /* Ready/Busy# line status. */
196 #define RBN_SETINGS 0x1004
198 /* Common settings. */
199 #define COMMON_SET 0x1008
200 /* 16 bit device connected to the NAND Flash interface. */
201 #define COMMON_SET_DEVICE_16BIT BIT(8)
203 /* Skip_bytes registers. */
204 #define SKIP_BYTES_CONF 0x100C
205 #define SKIP_BYTES_MARKER_VALUE GENMASK(31, 16)
206 #define SKIP_BYTES_NUM_OF_BYTES GENMASK(7, 0)
208 #define SKIP_BYTES_OFFSET 0x1010
209 #define SKIP_BYTES_OFFSET_VALUE GENMASK(23, 0)
211 /* Timings configuration. */
212 #define ASYNC_TOGGLE_TIMINGS 0x101c
213 #define ASYNC_TOGGLE_TIMINGS_TRH GENMASK(28, 24)
214 #define ASYNC_TOGGLE_TIMINGS_TRP GENMASK(20, 16)
215 #define ASYNC_TOGGLE_TIMINGS_TWH GENMASK(12, 8)
216 #define ASYNC_TOGGLE_TIMINGS_TWP GENMASK(4, 0)
218 #define TIMINGS0 0x1024
219 #define TIMINGS0_TADL GENMASK(31, 24)
220 #define TIMINGS0_TCCS GENMASK(23, 16)
221 #define TIMINGS0_TWHR GENMASK(15, 8)
222 #define TIMINGS0_TRHW GENMASK(7, 0)
224 #define TIMINGS1 0x1028
225 #define TIMINGS1_TRHZ GENMASK(31, 24)
226 #define TIMINGS1_TWB GENMASK(23, 16)
227 #define TIMINGS1_TVDLY GENMASK(7, 0)
229 #define TIMINGS2 0x102c
230 #define TIMINGS2_TFEAT GENMASK(25, 16)
231 #define TIMINGS2_CS_HOLD_TIME GENMASK(13, 8)
232 #define TIMINGS2_CS_SETUP_TIME GENMASK(5, 0)
234 /* Configuration of the resynchronization of slave DLL of PHY. */
235 #define DLL_PHY_CTRL 0x1034
236 #define DLL_PHY_CTRL_DLL_RST_N BIT(24)
237 #define DLL_PHY_CTRL_EXTENDED_WR_MODE BIT(17)
238 #define DLL_PHY_CTRL_EXTENDED_RD_MODE BIT(16)
239 #define DLL_PHY_CTRL_RS_HIGH_WAIT_CNT GENMASK(11, 8)
240 #define DLL_PHY_CTRL_RS_IDLE_CNT GENMASK(7, 0)
242 /* Register controlling DQ related timing. */
243 #define PHY_DQ_TIMING 0x2000
244 /* Register controlling DSQ related timing. */
245 #define PHY_DQS_TIMING 0x2004
246 #define PHY_DQS_TIMING_DQS_SEL_OE_END GENMASK(3, 0)
247 #define PHY_DQS_TIMING_PHONY_DQS_SEL BIT(16)
248 #define PHY_DQS_TIMING_USE_PHONY_DQS BIT(20)
250 /* Register controlling the gate and loopback control related timing. */
251 #define PHY_GATE_LPBK_CTRL 0x2008
252 #define PHY_GATE_LPBK_CTRL_RDS GENMASK(24, 19)
254 /* Register holds the control for the master DLL logic. */
255 #define PHY_DLL_MASTER_CTRL 0x200C
256 #define PHY_DLL_MASTER_CTRL_BYPASS_MODE BIT(23)
258 /* Register holds the control for the slave DLL logic. */
259 #define PHY_DLL_SLAVE_CTRL 0x2010
261 /* This register handles the global control settings for the PHY. */
262 #define PHY_CTRL 0x2080
263 #define PHY_CTRL_SDR_DQS BIT(14)
264 #define PHY_CTRL_PHONY_DQS GENMASK(9, 4)
267 * This register handles the global control settings
268 * for the termination selects for reads.
270 #define PHY_TSEL 0x2084
272 /* Generic command layout. */
273 #define GCMD_LAY_CS GENMASK_ULL(11, 8)
275 * This bit informs the minicotroller if it has to wait for tWB
276 * after sending the last CMD/ADDR/DATA in the sequence.
278 #define GCMD_LAY_TWB BIT_ULL(6)
279 /* Type of generic instruction. */
280 #define GCMD_LAY_INSTR GENMASK_ULL(5, 0)
282 /* Generic CMD sequence type. */
283 #define GCMD_LAY_INSTR_CMD 0
284 /* Generic ADDR sequence type. */
285 #define GCMD_LAY_INSTR_ADDR 1
286 /* Generic data transfer sequence type. */
287 #define GCMD_LAY_INSTR_DATA 2
289 /* Input part of generic command type of input is command. */
290 #define GCMD_LAY_INPUT_CMD GENMASK_ULL(23, 16)
292 /* Generic command address sequence - address fields. */
293 #define GCMD_LAY_INPUT_ADDR GENMASK_ULL(63, 16)
294 /* Generic command address sequence - address size. */
295 #define GCMD_LAY_INPUT_ADDR_SIZE GENMASK_ULL(13, 11)
297 /* Transfer direction field of generic command data sequence. */
298 #define GCMD_DIR BIT_ULL(11)
299 /* Read transfer direction of generic command data sequence. */
300 #define GCMD_DIR_READ 0
301 /* Write transfer direction of generic command data sequence. */
302 #define GCMD_DIR_WRITE 1
304 /* ECC enabled flag of generic command data sequence - ECC enabled. */
305 #define GCMD_ECC_EN BIT_ULL(12)
306 /* Generic command data sequence - sector size. */
307 #define GCMD_SECT_SIZE GENMASK_ULL(31, 16)
308 /* Generic command data sequence - sector count. */
309 #define GCMD_SECT_CNT GENMASK_ULL(39, 32)
310 /* Generic command data sequence - last sector size. */
311 #define GCMD_LAST_SIZE GENMASK_ULL(55, 40)
313 /* CDMA descriptor fields. */
314 /* Erase command type of CDMA descriptor. */
315 #define CDMA_CT_ERASE 0x1000
316 /* Program page command type of CDMA descriptor. */
317 #define CDMA_CT_WR 0x2100
318 /* Read page command type of CDMA descriptor. */
319 #define CDMA_CT_RD 0x2200
321 /* Flash pointer memory shift. */
322 #define CDMA_CFPTR_MEM_SHIFT 24
323 /* Flash pointer memory mask. */
324 #define CDMA_CFPTR_MEM GENMASK(26, 24)
327 * Command DMA descriptor flags. If set causes issue interrupt after
328 * the completion of descriptor processing.
330 #define CDMA_CF_INT BIT(8)
332 * Command DMA descriptor flags - the next descriptor
333 * address field is valid and descriptor processing should continue.
335 #define CDMA_CF_CONT BIT(9)
336 /* DMA master flag of command DMA descriptor. */
337 #define CDMA_CF_DMA_MASTER BIT(10)
339 /* Operation complete status of command descriptor. */
340 #define CDMA_CS_COMP BIT(15)
341 /* Operation complete status of command descriptor. */
342 /* Command descriptor status - operation fail. */
343 #define CDMA_CS_FAIL BIT(14)
344 /* Command descriptor status - page erased. */
345 #define CDMA_CS_ERP BIT(11)
346 /* Command descriptor status - timeout occurred. */
347 #define CDMA_CS_TOUT BIT(10)
349 * Maximum amount of correction applied to one ECC sector.
350 * It is part of command descriptor status.
352 #define CDMA_CS_MAXERR GENMASK(9, 2)
353 /* Command descriptor status - uncorrectable ECC error. */
354 #define CDMA_CS_UNCE BIT(1)
355 /* Command descriptor status - descriptor error. */
356 #define CDMA_CS_ERR BIT(0)
358 /* Status of operation - OK. */
360 /* Status of operation - FAIL. */
362 /* Status of operation - uncorrectable ECC error. */
363 #define STAT_ECC_UNCORR 3
364 /* Status of operation - page erased. */
365 #define STAT_ERASED 5
366 /* Status of operation - correctable ECC error. */
367 #define STAT_ECC_CORR 6
368 /* Status of operation - unsuspected state. */
369 #define STAT_UNKNOWN 7
370 /* Status of operation - operation is not completed yet. */
371 #define STAT_BUSY 0xFF
373 #define BCH_MAX_NUM_CORR_CAPS 8
374 #define BCH_MAX_NUM_SECTOR_SIZES 2
376 struct cadence_nand_timings
{
377 u32 async_toggle_timings
;
384 u32 phy_gate_lpbk_ctrl
;
387 /* Command DMA descriptor. */
388 struct cadence_nand_cdma_desc
{
389 /* Next descriptor address. */
392 /* Flash address is a 32-bit address comprising of BANK and ROW ADDR. */
394 /*field appears in HPNFC version 13*/
398 /* Operation the controller needs to perform. */
401 /* Flags for operation of this command. */
405 /* System/host memory address required for data DMA commands. */
408 /* Status of operation. */
412 /* Address pointer to sync buffer location. */
413 u64 sync_flag_pointer
;
415 /* Controls the buffer sync mechanism. */
419 /* Control data pointer. */
423 /* Interrupt status. */
424 struct cadence_nand_irq_status
{
425 /* Thread operation complete status. */
427 /* Thread operation error. */
429 /* Controller status. */
433 /* Cadence NAND flash controller capabilities get from driver data. */
434 struct cadence_nand_dt_devdata
{
435 /* Skew value of the output signals of the NAND Flash interface. */
437 /* It informs if slave DMA interface is connected to DMA engine. */
438 unsigned int has_dma
:1;
441 /* Cadence NAND flash controller capabilities read from registers. */
442 struct cdns_nand_caps
{
443 /* Maximum number of banks supported by hardware. */
445 /* Slave and Master DMA data width in bytes (4 or 8). */
447 /* Control Data feature supported. */
448 bool data_control_supp
;
449 /* Is PHY type DLL. */
450 bool is_phy_type_dll
;
453 struct cdns_nand_ctrl
{
455 struct nand_controller controller
;
456 struct cadence_nand_cdma_desc
*cdma_desc
;
458 const struct cadence_nand_dt_devdata
*caps1
;
459 struct cdns_nand_caps caps2
;
461 dma_addr_t dma_cdma_desc
;
464 u8 curr_corr_str_idx
;
466 /* Register interface. */
475 /* Interrupts that have happened. */
476 struct cadence_nand_irq_status irq_status
;
477 /* Interrupts we are waiting for. */
478 struct cadence_nand_irq_status irq_mask
;
479 struct completion complete
;
480 /* Protect irq_mask and irq_status. */
483 int ecc_strengths
[BCH_MAX_NUM_CORR_CAPS
];
484 struct nand_ecc_step_info ecc_stepinfos
[BCH_MAX_NUM_SECTOR_SIZES
];
485 struct nand_ecc_caps ecc_caps
;
489 struct dma_chan
*dmac
;
493 * Estimated Board delay. The value includes the total
494 * round trip delay for the signals and is used for deciding on values
495 * associated with data read capture.
499 struct nand_chip
*selected_chip
;
501 unsigned long assigned_cs
;
502 struct list_head chips
;
503 u8 bch_metadata_size
;
506 struct cdns_nand_chip
{
507 struct cadence_nand_timings timings
;
508 struct nand_chip chip
;
510 struct list_head node
;
513 * part of oob area of NAND flash memory page.
514 * This part is available for user to read or write.
518 /* Sector size. There are few sectors per mtd->writesize */
524 /* Number of bytes reserved for BBM. */
526 /* ECC strength index. */
533 int (*calc_ecc_bytes
)(int step_size
, int strength
);
538 cdns_nand_chip
*to_cdns_nand_chip(struct nand_chip
*chip
)
540 return container_of(chip
, struct cdns_nand_chip
, chip
);
544 cdns_nand_ctrl
*to_cdns_nand_ctrl(struct nand_controller
*controller
)
546 return container_of(controller
, struct cdns_nand_ctrl
, controller
);
550 cadence_nand_dma_buf_ok(struct cdns_nand_ctrl
*cdns_ctrl
, const void *buf
,
553 u8 data_dma_width
= cdns_ctrl
->caps2
.data_dma_width
;
555 return buf
&& virt_addr_valid(buf
) &&
556 likely(IS_ALIGNED((uintptr_t)buf
, data_dma_width
)) &&
557 likely(IS_ALIGNED(buf_len
, DMA_DATA_SIZE_ALIGN
));
560 static int cadence_nand_wait_for_value(struct cdns_nand_ctrl
*cdns_ctrl
,
561 u32 reg_offset
, u32 timeout_us
,
562 u32 mask
, bool is_clear
)
567 ret
= readl_relaxed_poll_timeout(cdns_ctrl
->reg
+ reg_offset
,
568 val
, !(val
& mask
) == is_clear
,
572 dev_err(cdns_ctrl
->dev
,
573 "Timeout while waiting for reg %x with mask %x is clear %d\n",
574 reg_offset
, mask
, is_clear
);
580 static int cadence_nand_set_ecc_enable(struct cdns_nand_ctrl
*cdns_ctrl
,
585 if (cadence_nand_wait_for_value(cdns_ctrl
, CTRL_STATUS
,
587 CTRL_STATUS_CTRL_BUSY
, true))
590 reg
= readl_relaxed(cdns_ctrl
->reg
+ ECC_CONFIG_0
);
593 reg
|= ECC_CONFIG_0_ECC_EN
;
595 reg
&= ~ECC_CONFIG_0_ECC_EN
;
597 writel_relaxed(reg
, cdns_ctrl
->reg
+ ECC_CONFIG_0
);
602 static void cadence_nand_set_ecc_strength(struct cdns_nand_ctrl
*cdns_ctrl
,
607 if (cdns_ctrl
->curr_corr_str_idx
== corr_str_idx
)
610 reg
= readl_relaxed(cdns_ctrl
->reg
+ ECC_CONFIG_0
);
611 reg
&= ~ECC_CONFIG_0_CORR_STR
;
612 reg
|= FIELD_PREP(ECC_CONFIG_0_CORR_STR
, corr_str_idx
);
613 writel_relaxed(reg
, cdns_ctrl
->reg
+ ECC_CONFIG_0
);
615 cdns_ctrl
->curr_corr_str_idx
= corr_str_idx
;
618 static int cadence_nand_get_ecc_strength_idx(struct cdns_nand_ctrl
*cdns_ctrl
,
621 int i
, corr_str_idx
= -1;
623 for (i
= 0; i
< BCH_MAX_NUM_CORR_CAPS
; i
++) {
624 if (cdns_ctrl
->ecc_strengths
[i
] == strength
) {
633 static int cadence_nand_set_skip_marker_val(struct cdns_nand_ctrl
*cdns_ctrl
,
638 if (cadence_nand_wait_for_value(cdns_ctrl
, CTRL_STATUS
,
640 CTRL_STATUS_CTRL_BUSY
, true))
643 reg
= readl_relaxed(cdns_ctrl
->reg
+ SKIP_BYTES_CONF
);
644 reg
&= ~SKIP_BYTES_MARKER_VALUE
;
645 reg
|= FIELD_PREP(SKIP_BYTES_MARKER_VALUE
,
648 writel_relaxed(reg
, cdns_ctrl
->reg
+ SKIP_BYTES_CONF
);
653 static int cadence_nand_set_skip_bytes_conf(struct cdns_nand_ctrl
*cdns_ctrl
,
658 u32 reg
, skip_bytes_offset
;
660 if (cadence_nand_wait_for_value(cdns_ctrl
, CTRL_STATUS
,
662 CTRL_STATUS_CTRL_BUSY
, true))
670 reg
= readl_relaxed(cdns_ctrl
->reg
+ SKIP_BYTES_CONF
);
671 reg
&= ~SKIP_BYTES_NUM_OF_BYTES
;
672 reg
|= FIELD_PREP(SKIP_BYTES_NUM_OF_BYTES
,
674 skip_bytes_offset
= FIELD_PREP(SKIP_BYTES_OFFSET_VALUE
,
677 writel_relaxed(reg
, cdns_ctrl
->reg
+ SKIP_BYTES_CONF
);
678 writel_relaxed(skip_bytes_offset
, cdns_ctrl
->reg
+ SKIP_BYTES_OFFSET
);
683 /* Functions enables/disables hardware detection of erased data */
684 static void cadence_nand_set_erase_detection(struct cdns_nand_ctrl
*cdns_ctrl
,
686 u8 bitflips_threshold
)
690 reg
= readl_relaxed(cdns_ctrl
->reg
+ ECC_CONFIG_0
);
693 reg
|= ECC_CONFIG_0_ERASE_DET_EN
;
695 reg
&= ~ECC_CONFIG_0_ERASE_DET_EN
;
697 writel_relaxed(reg
, cdns_ctrl
->reg
+ ECC_CONFIG_0
);
698 writel_relaxed(bitflips_threshold
, cdns_ctrl
->reg
+ ECC_CONFIG_1
);
701 static int cadence_nand_set_access_width16(struct cdns_nand_ctrl
*cdns_ctrl
,
706 if (cadence_nand_wait_for_value(cdns_ctrl
, CTRL_STATUS
,
708 CTRL_STATUS_CTRL_BUSY
, true))
711 reg
= readl_relaxed(cdns_ctrl
->reg
+ COMMON_SET
);
714 reg
&= ~COMMON_SET_DEVICE_16BIT
;
716 reg
|= COMMON_SET_DEVICE_16BIT
;
717 writel_relaxed(reg
, cdns_ctrl
->reg
+ COMMON_SET
);
723 cadence_nand_clear_interrupt(struct cdns_nand_ctrl
*cdns_ctrl
,
724 struct cadence_nand_irq_status
*irq_status
)
726 writel_relaxed(irq_status
->status
, cdns_ctrl
->reg
+ INTR_STATUS
);
727 writel_relaxed(irq_status
->trd_status
,
728 cdns_ctrl
->reg
+ TRD_COMP_INT_STATUS
);
729 writel_relaxed(irq_status
->trd_error
,
730 cdns_ctrl
->reg
+ TRD_ERR_INT_STATUS
);
734 cadence_nand_read_int_status(struct cdns_nand_ctrl
*cdns_ctrl
,
735 struct cadence_nand_irq_status
*irq_status
)
737 irq_status
->status
= readl_relaxed(cdns_ctrl
->reg
+ INTR_STATUS
);
738 irq_status
->trd_status
= readl_relaxed(cdns_ctrl
->reg
739 + TRD_COMP_INT_STATUS
);
740 irq_status
->trd_error
= readl_relaxed(cdns_ctrl
->reg
741 + TRD_ERR_INT_STATUS
);
744 static u32
irq_detected(struct cdns_nand_ctrl
*cdns_ctrl
,
745 struct cadence_nand_irq_status
*irq_status
)
747 cadence_nand_read_int_status(cdns_ctrl
, irq_status
);
749 return irq_status
->status
|| irq_status
->trd_status
||
750 irq_status
->trd_error
;
753 static void cadence_nand_reset_irq(struct cdns_nand_ctrl
*cdns_ctrl
)
757 spin_lock_irqsave(&cdns_ctrl
->irq_lock
, flags
);
758 memset(&cdns_ctrl
->irq_status
, 0, sizeof(cdns_ctrl
->irq_status
));
759 memset(&cdns_ctrl
->irq_mask
, 0, sizeof(cdns_ctrl
->irq_mask
));
760 spin_unlock_irqrestore(&cdns_ctrl
->irq_lock
, flags
);
764 * This is the interrupt service routine. It handles all interrupts
765 * sent to this device.
767 static irqreturn_t
cadence_nand_isr(int irq
, void *dev_id
)
769 struct cdns_nand_ctrl
*cdns_ctrl
= dev_id
;
770 struct cadence_nand_irq_status irq_status
;
771 irqreturn_t result
= IRQ_NONE
;
773 spin_lock(&cdns_ctrl
->irq_lock
);
775 if (irq_detected(cdns_ctrl
, &irq_status
)) {
776 /* Handle interrupt. */
777 /* First acknowledge it. */
778 cadence_nand_clear_interrupt(cdns_ctrl
, &irq_status
);
779 /* Status in the device context for someone to read. */
780 cdns_ctrl
->irq_status
.status
|= irq_status
.status
;
781 cdns_ctrl
->irq_status
.trd_status
|= irq_status
.trd_status
;
782 cdns_ctrl
->irq_status
.trd_error
|= irq_status
.trd_error
;
783 /* Notify anyone who cares that it happened. */
784 complete(&cdns_ctrl
->complete
);
785 /* Tell the OS that we've handled this. */
786 result
= IRQ_HANDLED
;
788 spin_unlock(&cdns_ctrl
->irq_lock
);
793 static void cadence_nand_set_irq_mask(struct cdns_nand_ctrl
*cdns_ctrl
,
794 struct cadence_nand_irq_status
*irq_mask
)
796 writel_relaxed(INTR_ENABLE_INTR_EN
| irq_mask
->status
,
797 cdns_ctrl
->reg
+ INTR_ENABLE
);
799 writel_relaxed(irq_mask
->trd_error
,
800 cdns_ctrl
->reg
+ TRD_ERR_INT_STATUS_EN
);
804 cadence_nand_wait_for_irq(struct cdns_nand_ctrl
*cdns_ctrl
,
805 struct cadence_nand_irq_status
*irq_mask
,
806 struct cadence_nand_irq_status
*irq_status
)
808 unsigned long timeout
= msecs_to_jiffies(10000);
809 unsigned long time_left
;
811 time_left
= wait_for_completion_timeout(&cdns_ctrl
->complete
,
814 *irq_status
= cdns_ctrl
->irq_status
;
815 if (time_left
== 0) {
817 dev_err(cdns_ctrl
->dev
, "timeout occurred:\n");
818 dev_err(cdns_ctrl
->dev
, "\tstatus = 0x%x, mask = 0x%x\n",
819 irq_status
->status
, irq_mask
->status
);
820 dev_err(cdns_ctrl
->dev
,
821 "\ttrd_status = 0x%x, trd_status mask = 0x%x\n",
822 irq_status
->trd_status
, irq_mask
->trd_status
);
823 dev_err(cdns_ctrl
->dev
,
824 "\t trd_error = 0x%x, trd_error mask = 0x%x\n",
825 irq_status
->trd_error
, irq_mask
->trd_error
);
829 /* Execute generic command on NAND controller. */
830 static int cadence_nand_generic_cmd_send(struct cdns_nand_ctrl
*cdns_ctrl
,
834 u32 mini_ctrl_cmd_l
, mini_ctrl_cmd_h
, reg
;
836 mini_ctrl_cmd
|= FIELD_PREP(GCMD_LAY_CS
, chip_nr
);
837 mini_ctrl_cmd_l
= mini_ctrl_cmd
& 0xFFFFFFFF;
838 mini_ctrl_cmd_h
= mini_ctrl_cmd
>> 32;
840 if (cadence_nand_wait_for_value(cdns_ctrl
, CTRL_STATUS
,
842 CTRL_STATUS_CTRL_BUSY
, true))
845 cadence_nand_reset_irq(cdns_ctrl
);
847 writel_relaxed(mini_ctrl_cmd_l
, cdns_ctrl
->reg
+ CMD_REG2
);
848 writel_relaxed(mini_ctrl_cmd_h
, cdns_ctrl
->reg
+ CMD_REG3
);
850 /* Select generic command. */
851 reg
= FIELD_PREP(CMD_REG0_CT
, CMD_REG0_CT_GEN
);
853 reg
|= FIELD_PREP(CMD_REG0_TN
, 0);
856 writel_relaxed(reg
, cdns_ctrl
->reg
+ CMD_REG0
);
861 /* Wait for data on slave DMA interface. */
862 static int cadence_nand_wait_on_sdma(struct cdns_nand_ctrl
*cdns_ctrl
,
866 struct cadence_nand_irq_status irq_mask
, irq_status
;
868 irq_mask
.trd_status
= 0;
869 irq_mask
.trd_error
= 0;
870 irq_mask
.status
= INTR_STATUS_SDMA_TRIGG
871 | INTR_STATUS_SDMA_ERR
872 | INTR_STATUS_UNSUPP_CMD
;
874 cadence_nand_set_irq_mask(cdns_ctrl
, &irq_mask
);
875 cadence_nand_wait_for_irq(cdns_ctrl
, &irq_mask
, &irq_status
);
876 if (irq_status
.status
== 0) {
877 dev_err(cdns_ctrl
->dev
, "Timeout while waiting for SDMA\n");
881 if (irq_status
.status
& INTR_STATUS_SDMA_TRIGG
) {
882 *out_sdma_size
= readl_relaxed(cdns_ctrl
->reg
+ SDMA_SIZE
);
883 *out_sdma_trd
= readl_relaxed(cdns_ctrl
->reg
+ SDMA_TRD_NUM
);
885 FIELD_GET(SDMA_TRD_NUM_SDMA_TRD
, *out_sdma_trd
);
887 dev_err(cdns_ctrl
->dev
, "SDMA error - irq_status %x\n",
895 static void cadence_nand_get_caps(struct cdns_nand_ctrl
*cdns_ctrl
)
899 reg
= readl_relaxed(cdns_ctrl
->reg
+ CTRL_FEATURES
);
901 cdns_ctrl
->caps2
.max_banks
= 1 << FIELD_GET(CTRL_FEATURES_N_BANKS
, reg
);
903 if (FIELD_GET(CTRL_FEATURES_DMA_DWITH64
, reg
))
904 cdns_ctrl
->caps2
.data_dma_width
= 8;
906 cdns_ctrl
->caps2
.data_dma_width
= 4;
908 if (reg
& CTRL_FEATURES_CONTROL_DATA
)
909 cdns_ctrl
->caps2
.data_control_supp
= true;
911 if (reg
& (CTRL_FEATURES_NVDDR_2_3
912 | CTRL_FEATURES_NVDDR
))
913 cdns_ctrl
->caps2
.is_phy_type_dll
= true;
916 /* Prepare CDMA descriptor. */
918 cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl
*cdns_ctrl
,
919 char nf_mem
, u32 flash_ptr
, dma_addr_t mem_ptr
,
920 dma_addr_t ctrl_data_ptr
, u16 ctype
)
922 struct cadence_nand_cdma_desc
*cdma_desc
= cdns_ctrl
->cdma_desc
;
924 memset(cdma_desc
, 0, sizeof(struct cadence_nand_cdma_desc
));
926 /* Set fields for one descriptor. */
927 cdma_desc
->flash_pointer
= flash_ptr
;
928 if (cdns_ctrl
->ctrl_rev
>= 13)
929 cdma_desc
->bank
= nf_mem
;
931 cdma_desc
->flash_pointer
|= (nf_mem
<< CDMA_CFPTR_MEM_SHIFT
);
933 cdma_desc
->command_flags
|= CDMA_CF_DMA_MASTER
;
934 cdma_desc
->command_flags
|= CDMA_CF_INT
;
936 cdma_desc
->memory_pointer
= mem_ptr
;
937 cdma_desc
->status
= 0;
938 cdma_desc
->sync_flag_pointer
= 0;
939 cdma_desc
->sync_arguments
= 0;
941 cdma_desc
->command_type
= ctype
;
942 cdma_desc
->ctrl_data_ptr
= ctrl_data_ptr
;
945 static u8
cadence_nand_check_desc_error(struct cdns_nand_ctrl
*cdns_ctrl
,
948 if (desc_status
& CDMA_CS_ERP
)
951 if (desc_status
& CDMA_CS_UNCE
)
952 return STAT_ECC_UNCORR
;
954 if (desc_status
& CDMA_CS_ERR
) {
955 dev_err(cdns_ctrl
->dev
, ":CDMA desc error flag detected.\n");
959 if (FIELD_GET(CDMA_CS_MAXERR
, desc_status
))
960 return STAT_ECC_CORR
;
965 static int cadence_nand_cdma_finish(struct cdns_nand_ctrl
*cdns_ctrl
)
967 struct cadence_nand_cdma_desc
*desc_ptr
= cdns_ctrl
->cdma_desc
;
968 u8 status
= STAT_BUSY
;
970 if (desc_ptr
->status
& CDMA_CS_FAIL
) {
971 status
= cadence_nand_check_desc_error(cdns_ctrl
,
973 dev_err(cdns_ctrl
->dev
, ":CDMA error %x\n", desc_ptr
->status
);
974 } else if (desc_ptr
->status
& CDMA_CS_COMP
) {
975 /* Descriptor finished with no errors. */
976 if (desc_ptr
->command_flags
& CDMA_CF_CONT
) {
977 dev_info(cdns_ctrl
->dev
, "DMA unsupported flag is set");
978 status
= STAT_UNKNOWN
;
980 /* Last descriptor. */
988 static int cadence_nand_cdma_send(struct cdns_nand_ctrl
*cdns_ctrl
,
994 /* Wait for thread ready. */
995 status
= cadence_nand_wait_for_value(cdns_ctrl
, TRD_STATUS
,
1001 cadence_nand_reset_irq(cdns_ctrl
);
1002 reinit_completion(&cdns_ctrl
->complete
);
1004 writel_relaxed((u32
)cdns_ctrl
->dma_cdma_desc
,
1005 cdns_ctrl
->reg
+ CMD_REG2
);
1006 writel_relaxed(0, cdns_ctrl
->reg
+ CMD_REG3
);
1008 /* Select CDMA mode. */
1009 reg
= FIELD_PREP(CMD_REG0_CT
, CMD_REG0_CT_CDMA
);
1010 /* Thread number. */
1011 reg
|= FIELD_PREP(CMD_REG0_TN
, thread
);
1012 /* Issue command. */
1013 writel_relaxed(reg
, cdns_ctrl
->reg
+ CMD_REG0
);
1018 /* Send SDMA command and wait for finish. */
1020 cadence_nand_cdma_send_and_wait(struct cdns_nand_ctrl
*cdns_ctrl
,
1023 struct cadence_nand_irq_status irq_mask
, irq_status
= {0};
1026 irq_mask
.trd_status
= BIT(thread
);
1027 irq_mask
.trd_error
= BIT(thread
);
1028 irq_mask
.status
= INTR_STATUS_CDMA_TERR
;
1030 cadence_nand_set_irq_mask(cdns_ctrl
, &irq_mask
);
1032 status
= cadence_nand_cdma_send(cdns_ctrl
, thread
);
1036 cadence_nand_wait_for_irq(cdns_ctrl
, &irq_mask
, &irq_status
);
1038 if (irq_status
.status
== 0 && irq_status
.trd_status
== 0 &&
1039 irq_status
.trd_error
== 0) {
1040 dev_err(cdns_ctrl
->dev
, "CDMA command timeout\n");
1043 if (irq_status
.status
& irq_mask
.status
) {
1044 dev_err(cdns_ctrl
->dev
, "CDMA command failed\n");
1052 * ECC size depends on configured ECC strength and on maximum supported
1055 static int cadence_nand_calc_ecc_bytes(int max_step_size
, int strength
)
1057 int nbytes
= DIV_ROUND_UP(fls(8 * max_step_size
) * strength
, 8);
1059 return ALIGN(nbytes
, 2);
1062 #define CADENCE_NAND_CALC_ECC_BYTES(max_step_size) \
1064 cadence_nand_calc_ecc_bytes_##max_step_size(int step_size, \
1067 return cadence_nand_calc_ecc_bytes(max_step_size, strength);\
1070 CADENCE_NAND_CALC_ECC_BYTES(256)
1071 CADENCE_NAND_CALC_ECC_BYTES(512)
1072 CADENCE_NAND_CALC_ECC_BYTES(1024)
1073 CADENCE_NAND_CALC_ECC_BYTES(2048)
1074 CADENCE_NAND_CALC_ECC_BYTES(4096)
1076 /* Function reads BCH capabilities. */
1077 static int cadence_nand_read_bch_caps(struct cdns_nand_ctrl
*cdns_ctrl
)
1079 struct nand_ecc_caps
*ecc_caps
= &cdns_ctrl
->ecc_caps
;
1080 int max_step_size
= 0, nstrengths
, i
;
1083 reg
= readl_relaxed(cdns_ctrl
->reg
+ BCH_CFG_3
);
1084 cdns_ctrl
->bch_metadata_size
= FIELD_GET(BCH_CFG_3_METADATA_SIZE
, reg
);
1085 if (cdns_ctrl
->bch_metadata_size
< 4) {
1086 dev_err(cdns_ctrl
->dev
,
1087 "Driver needs at least 4 bytes of BCH meta data\n");
1091 reg
= readl_relaxed(cdns_ctrl
->reg
+ BCH_CFG_0
);
1092 cdns_ctrl
->ecc_strengths
[0] = FIELD_GET(BCH_CFG_0_CORR_CAP_0
, reg
);
1093 cdns_ctrl
->ecc_strengths
[1] = FIELD_GET(BCH_CFG_0_CORR_CAP_1
, reg
);
1094 cdns_ctrl
->ecc_strengths
[2] = FIELD_GET(BCH_CFG_0_CORR_CAP_2
, reg
);
1095 cdns_ctrl
->ecc_strengths
[3] = FIELD_GET(BCH_CFG_0_CORR_CAP_3
, reg
);
1097 reg
= readl_relaxed(cdns_ctrl
->reg
+ BCH_CFG_1
);
1098 cdns_ctrl
->ecc_strengths
[4] = FIELD_GET(BCH_CFG_1_CORR_CAP_4
, reg
);
1099 cdns_ctrl
->ecc_strengths
[5] = FIELD_GET(BCH_CFG_1_CORR_CAP_5
, reg
);
1100 cdns_ctrl
->ecc_strengths
[6] = FIELD_GET(BCH_CFG_1_CORR_CAP_6
, reg
);
1101 cdns_ctrl
->ecc_strengths
[7] = FIELD_GET(BCH_CFG_1_CORR_CAP_7
, reg
);
1103 reg
= readl_relaxed(cdns_ctrl
->reg
+ BCH_CFG_2
);
1104 cdns_ctrl
->ecc_stepinfos
[0].stepsize
=
1105 FIELD_GET(BCH_CFG_2_SECT_0
, reg
);
1107 cdns_ctrl
->ecc_stepinfos
[1].stepsize
=
1108 FIELD_GET(BCH_CFG_2_SECT_1
, reg
);
1111 for (i
= 0; i
< BCH_MAX_NUM_CORR_CAPS
; i
++) {
1112 if (cdns_ctrl
->ecc_strengths
[i
] != 0)
1116 ecc_caps
->nstepinfos
= 0;
1117 for (i
= 0; i
< BCH_MAX_NUM_SECTOR_SIZES
; i
++) {
1118 /* ECC strengths are common for all step infos. */
1119 cdns_ctrl
->ecc_stepinfos
[i
].nstrengths
= nstrengths
;
1120 cdns_ctrl
->ecc_stepinfos
[i
].strengths
=
1121 cdns_ctrl
->ecc_strengths
;
1123 if (cdns_ctrl
->ecc_stepinfos
[i
].stepsize
!= 0)
1124 ecc_caps
->nstepinfos
++;
1126 if (cdns_ctrl
->ecc_stepinfos
[i
].stepsize
> max_step_size
)
1127 max_step_size
= cdns_ctrl
->ecc_stepinfos
[i
].stepsize
;
1129 ecc_caps
->stepinfos
= &cdns_ctrl
->ecc_stepinfos
[0];
1131 switch (max_step_size
) {
1133 ecc_caps
->calc_ecc_bytes
= &cadence_nand_calc_ecc_bytes_256
;
1136 ecc_caps
->calc_ecc_bytes
= &cadence_nand_calc_ecc_bytes_512
;
1139 ecc_caps
->calc_ecc_bytes
= &cadence_nand_calc_ecc_bytes_1024
;
1142 ecc_caps
->calc_ecc_bytes
= &cadence_nand_calc_ecc_bytes_2048
;
1145 ecc_caps
->calc_ecc_bytes
= &cadence_nand_calc_ecc_bytes_4096
;
1148 dev_err(cdns_ctrl
->dev
,
1149 "Unsupported sector size(ecc step size) %d\n",
1157 /* Hardware initialization. */
1158 static int cadence_nand_hw_init(struct cdns_nand_ctrl
*cdns_ctrl
)
1163 status
= cadence_nand_wait_for_value(cdns_ctrl
, CTRL_STATUS
,
1165 CTRL_STATUS_INIT_COMP
, false);
1169 reg
= readl_relaxed(cdns_ctrl
->reg
+ CTRL_VERSION
);
1170 cdns_ctrl
->ctrl_rev
= FIELD_GET(CTRL_VERSION_REV
, reg
);
1172 dev_info(cdns_ctrl
->dev
,
1173 "%s: cadence nand controller version reg %x\n",
1176 /* Disable cache and multiplane. */
1177 writel_relaxed(0, cdns_ctrl
->reg
+ MULTIPLANE_CFG
);
1178 writel_relaxed(0, cdns_ctrl
->reg
+ CACHE_CFG
);
1180 /* Clear all interrupts. */
1181 writel_relaxed(0xFFFFFFFF, cdns_ctrl
->reg
+ INTR_STATUS
);
1183 cadence_nand_get_caps(cdns_ctrl
);
1184 if (cadence_nand_read_bch_caps(cdns_ctrl
))
1188 * Set IO width access to 8.
1189 * It is because during SW device discovering width access
1190 * is expected to be 8.
1192 status
= cadence_nand_set_access_width16(cdns_ctrl
, false);
1197 #define TT_MAIN_OOB_AREAS 2
1198 #define TT_RAW_PAGE 3
1200 #define TT_MAIN_OOB_AREA_EXT 5
1202 /* Prepare size of data to transfer. */
1204 cadence_nand_prepare_data_size(struct nand_chip
*chip
,
1207 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
1208 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
1209 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1210 u32 sec_size
= 0, offset
= 0, sec_cnt
= 1;
1211 u32 last_sec_size
= cdns_chip
->sector_size
;
1212 u32 data_ctrl_size
= 0;
1215 if (cdns_ctrl
->curr_trans_type
== transfer_type
)
1218 switch (transfer_type
) {
1219 case TT_MAIN_OOB_AREA_EXT
:
1220 sec_cnt
= cdns_chip
->sector_count
;
1221 sec_size
= cdns_chip
->sector_size
;
1222 data_ctrl_size
= cdns_chip
->avail_oob_size
;
1224 case TT_MAIN_OOB_AREAS
:
1225 sec_cnt
= cdns_chip
->sector_count
;
1226 last_sec_size
= cdns_chip
->sector_size
1227 + cdns_chip
->avail_oob_size
;
1228 sec_size
= cdns_chip
->sector_size
;
1231 last_sec_size
= mtd
->writesize
+ mtd
->oobsize
;
1234 offset
= mtd
->writesize
+ cdns_chip
->bbm_offs
;
1240 reg
|= FIELD_PREP(TRAN_CFG_0_OFFSET
, offset
);
1241 reg
|= FIELD_PREP(TRAN_CFG_0_SEC_CNT
, sec_cnt
);
1242 writel_relaxed(reg
, cdns_ctrl
->reg
+ TRAN_CFG_0
);
1245 reg
|= FIELD_PREP(TRAN_CFG_1_LAST_SEC_SIZE
, last_sec_size
);
1246 reg
|= FIELD_PREP(TRAN_CFG_1_SECTOR_SIZE
, sec_size
);
1247 writel_relaxed(reg
, cdns_ctrl
->reg
+ TRAN_CFG_1
);
1249 if (cdns_ctrl
->caps2
.data_control_supp
) {
1250 reg
= readl_relaxed(cdns_ctrl
->reg
+ CONTROL_DATA_CTRL
);
1251 reg
&= ~CONTROL_DATA_CTRL_SIZE
;
1252 reg
|= FIELD_PREP(CONTROL_DATA_CTRL_SIZE
, data_ctrl_size
);
1253 writel_relaxed(reg
, cdns_ctrl
->reg
+ CONTROL_DATA_CTRL
);
1256 cdns_ctrl
->curr_trans_type
= transfer_type
;
1260 cadence_nand_cdma_transfer(struct cdns_nand_ctrl
*cdns_ctrl
, u8 chip_nr
,
1261 int page
, void *buf
, void *ctrl_dat
, u32 buf_size
,
1262 u32 ctrl_dat_size
, enum dma_data_direction dir
,
1265 dma_addr_t dma_buf
, dma_ctrl_dat
= 0;
1266 u8 thread_nr
= chip_nr
;
1270 if (dir
== DMA_FROM_DEVICE
)
1275 cadence_nand_set_ecc_enable(cdns_ctrl
, with_ecc
);
1277 dma_buf
= dma_map_single(cdns_ctrl
->dev
, buf
, buf_size
, dir
);
1278 if (dma_mapping_error(cdns_ctrl
->dev
, dma_buf
)) {
1279 dev_err(cdns_ctrl
->dev
, "Failed to map DMA buffer\n");
1283 if (ctrl_dat
&& ctrl_dat_size
) {
1284 dma_ctrl_dat
= dma_map_single(cdns_ctrl
->dev
, ctrl_dat
,
1285 ctrl_dat_size
, dir
);
1286 if (dma_mapping_error(cdns_ctrl
->dev
, dma_ctrl_dat
)) {
1287 dma_unmap_single(cdns_ctrl
->dev
, dma_buf
,
1289 dev_err(cdns_ctrl
->dev
, "Failed to map DMA buffer\n");
1294 cadence_nand_cdma_desc_prepare(cdns_ctrl
, chip_nr
, page
,
1295 dma_buf
, dma_ctrl_dat
, ctype
);
1297 status
= cadence_nand_cdma_send_and_wait(cdns_ctrl
, thread_nr
);
1299 dma_unmap_single(cdns_ctrl
->dev
, dma_buf
,
1302 if (ctrl_dat
&& ctrl_dat_size
)
1303 dma_unmap_single(cdns_ctrl
->dev
, dma_ctrl_dat
,
1304 ctrl_dat_size
, dir
);
1308 return cadence_nand_cdma_finish(cdns_ctrl
);
1311 static void cadence_nand_set_timings(struct cdns_nand_ctrl
*cdns_ctrl
,
1312 struct cadence_nand_timings
*t
)
1314 writel_relaxed(t
->async_toggle_timings
,
1315 cdns_ctrl
->reg
+ ASYNC_TOGGLE_TIMINGS
);
1316 writel_relaxed(t
->timings0
, cdns_ctrl
->reg
+ TIMINGS0
);
1317 writel_relaxed(t
->timings1
, cdns_ctrl
->reg
+ TIMINGS1
);
1318 writel_relaxed(t
->timings2
, cdns_ctrl
->reg
+ TIMINGS2
);
1320 if (cdns_ctrl
->caps2
.is_phy_type_dll
)
1321 writel_relaxed(t
->dll_phy_ctrl
, cdns_ctrl
->reg
+ DLL_PHY_CTRL
);
1323 writel_relaxed(t
->phy_ctrl
, cdns_ctrl
->reg
+ PHY_CTRL
);
1325 if (cdns_ctrl
->caps2
.is_phy_type_dll
) {
1326 writel_relaxed(0, cdns_ctrl
->reg
+ PHY_TSEL
);
1327 writel_relaxed(2, cdns_ctrl
->reg
+ PHY_DQ_TIMING
);
1328 writel_relaxed(t
->phy_dqs_timing
,
1329 cdns_ctrl
->reg
+ PHY_DQS_TIMING
);
1330 writel_relaxed(t
->phy_gate_lpbk_ctrl
,
1331 cdns_ctrl
->reg
+ PHY_GATE_LPBK_CTRL
);
1332 writel_relaxed(PHY_DLL_MASTER_CTRL_BYPASS_MODE
,
1333 cdns_ctrl
->reg
+ PHY_DLL_MASTER_CTRL
);
1334 writel_relaxed(0, cdns_ctrl
->reg
+ PHY_DLL_SLAVE_CTRL
);
1338 static int cadence_nand_select_target(struct nand_chip
*chip
)
1340 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
1341 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
1343 if (chip
== cdns_ctrl
->selected_chip
)
1346 if (cadence_nand_wait_for_value(cdns_ctrl
, CTRL_STATUS
,
1348 CTRL_STATUS_CTRL_BUSY
, true))
1351 cadence_nand_set_timings(cdns_ctrl
, &cdns_chip
->timings
);
1353 cadence_nand_set_ecc_strength(cdns_ctrl
,
1354 cdns_chip
->corr_str_idx
);
1356 cadence_nand_set_erase_detection(cdns_ctrl
, true,
1357 chip
->ecc
.strength
);
1359 cdns_ctrl
->curr_trans_type
= -1;
1360 cdns_ctrl
->selected_chip
= chip
;
1365 static int cadence_nand_erase(struct nand_chip
*chip
, u32 page
)
1367 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
1368 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
1370 u8 thread_nr
= cdns_chip
->cs
[chip
->cur_cs
];
1372 cadence_nand_cdma_desc_prepare(cdns_ctrl
,
1373 cdns_chip
->cs
[chip
->cur_cs
],
1376 status
= cadence_nand_cdma_send_and_wait(cdns_ctrl
, thread_nr
);
1378 dev_err(cdns_ctrl
->dev
, "erase operation failed\n");
1382 status
= cadence_nand_cdma_finish(cdns_ctrl
);
1389 static int cadence_nand_read_bbm(struct nand_chip
*chip
, int page
, u8
*buf
)
1392 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
1393 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
1394 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1396 cadence_nand_prepare_data_size(chip
, TT_BBM
);
1398 cadence_nand_set_skip_bytes_conf(cdns_ctrl
, 0, 0, 0);
1401 * Read only bad block marker from offset
1402 * defined by a memory manufacturer.
1404 status
= cadence_nand_cdma_transfer(cdns_ctrl
,
1405 cdns_chip
->cs
[chip
->cur_cs
],
1406 page
, cdns_ctrl
->buf
, NULL
,
1408 0, DMA_FROM_DEVICE
, false);
1410 dev_err(cdns_ctrl
->dev
, "read BBM failed\n");
1414 memcpy(buf
+ cdns_chip
->bbm_offs
, cdns_ctrl
->buf
, cdns_chip
->bbm_len
);
1419 static int cadence_nand_write_page(struct nand_chip
*chip
,
1420 const u8
*buf
, int oob_required
,
1423 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
1424 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
1425 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1427 u16 marker_val
= 0xFFFF;
1429 status
= cadence_nand_select_target(chip
);
1433 cadence_nand_set_skip_bytes_conf(cdns_ctrl
, cdns_chip
->bbm_len
,
1435 + cdns_chip
->bbm_offs
,
1439 marker_val
= *(u16
*)(chip
->oob_poi
1440 + cdns_chip
->bbm_offs
);
1442 /* Set oob data to 0xFF. */
1443 memset(cdns_ctrl
->buf
+ mtd
->writesize
, 0xFF,
1444 cdns_chip
->avail_oob_size
);
1447 cadence_nand_set_skip_marker_val(cdns_ctrl
, marker_val
);
1449 cadence_nand_prepare_data_size(chip
, TT_MAIN_OOB_AREA_EXT
);
1451 if (cadence_nand_dma_buf_ok(cdns_ctrl
, buf
, mtd
->writesize
) &&
1452 cdns_ctrl
->caps2
.data_control_supp
) {
1456 oob
= chip
->oob_poi
;
1458 oob
= cdns_ctrl
->buf
+ mtd
->writesize
;
1460 status
= cadence_nand_cdma_transfer(cdns_ctrl
,
1461 cdns_chip
->cs
[chip
->cur_cs
],
1462 page
, (void *)buf
, oob
,
1464 cdns_chip
->avail_oob_size
,
1465 DMA_TO_DEVICE
, true);
1467 dev_err(cdns_ctrl
->dev
, "write page failed\n");
1475 /* Transfer the data to the oob area. */
1476 memcpy(cdns_ctrl
->buf
+ mtd
->writesize
, chip
->oob_poi
,
1477 cdns_chip
->avail_oob_size
);
1480 memcpy(cdns_ctrl
->buf
, buf
, mtd
->writesize
);
1482 cadence_nand_prepare_data_size(chip
, TT_MAIN_OOB_AREAS
);
1484 return cadence_nand_cdma_transfer(cdns_ctrl
,
1485 cdns_chip
->cs
[chip
->cur_cs
],
1486 page
, cdns_ctrl
->buf
, NULL
,
1488 + cdns_chip
->avail_oob_size
,
1489 0, DMA_TO_DEVICE
, true);
1492 static int cadence_nand_write_oob(struct nand_chip
*chip
, int page
)
1494 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
1495 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1497 memset(cdns_ctrl
->buf
, 0xFF, mtd
->writesize
);
1499 return cadence_nand_write_page(chip
, cdns_ctrl
->buf
, 1, page
);
1502 static int cadence_nand_write_page_raw(struct nand_chip
*chip
,
1503 const u8
*buf
, int oob_required
,
1506 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
1507 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
1508 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1509 int writesize
= mtd
->writesize
;
1510 int oobsize
= mtd
->oobsize
;
1511 int ecc_steps
= chip
->ecc
.steps
;
1512 int ecc_size
= chip
->ecc
.size
;
1513 int ecc_bytes
= chip
->ecc
.bytes
;
1514 void *tmp_buf
= cdns_ctrl
->buf
;
1515 int oob_skip
= cdns_chip
->bbm_len
;
1516 size_t size
= writesize
+ oobsize
;
1520 status
= cadence_nand_select_target(chip
);
1525 * Fill the buffer with 0xff first except the full page transfer.
1526 * This simplifies the logic.
1528 if (!buf
|| !oob_required
)
1529 memset(tmp_buf
, 0xff, size
);
1531 cadence_nand_set_skip_bytes_conf(cdns_ctrl
, 0, 0, 0);
1533 /* Arrange the buffer for syndrome payload/ecc layout. */
1535 for (i
= 0; i
< ecc_steps
; i
++) {
1536 pos
= i
* (ecc_size
+ ecc_bytes
);
1539 if (pos
>= writesize
)
1541 else if (pos
+ len
> writesize
)
1542 len
= writesize
- pos
;
1544 memcpy(tmp_buf
+ pos
, buf
, len
);
1546 if (len
< ecc_size
) {
1547 len
= ecc_size
- len
;
1548 memcpy(tmp_buf
+ writesize
+ oob_skip
, buf
,
1556 const u8
*oob
= chip
->oob_poi
;
1557 u32 oob_data_offset
= (cdns_chip
->sector_count
- 1) *
1558 (cdns_chip
->sector_size
+ chip
->ecc
.bytes
)
1559 + cdns_chip
->sector_size
+ oob_skip
;
1561 /* BBM at the beginning of the OOB area. */
1562 memcpy(tmp_buf
+ writesize
, oob
, oob_skip
);
1565 memcpy(tmp_buf
+ oob_data_offset
, oob
,
1566 cdns_chip
->avail_oob_size
);
1567 oob
+= cdns_chip
->avail_oob_size
;
1570 for (i
= 0; i
< ecc_steps
; i
++) {
1571 pos
= ecc_size
+ i
* (ecc_size
+ ecc_bytes
);
1572 if (i
== (ecc_steps
- 1))
1573 pos
+= cdns_chip
->avail_oob_size
;
1577 if (pos
>= writesize
)
1579 else if (pos
+ len
> writesize
)
1580 len
= writesize
- pos
;
1582 memcpy(tmp_buf
+ pos
, oob
, len
);
1584 if (len
< ecc_bytes
) {
1585 len
= ecc_bytes
- len
;
1586 memcpy(tmp_buf
+ writesize
+ oob_skip
, oob
,
1593 cadence_nand_prepare_data_size(chip
, TT_RAW_PAGE
);
1595 return cadence_nand_cdma_transfer(cdns_ctrl
,
1596 cdns_chip
->cs
[chip
->cur_cs
],
1597 page
, cdns_ctrl
->buf
, NULL
,
1600 0, DMA_TO_DEVICE
, false);
1603 static int cadence_nand_write_oob_raw(struct nand_chip
*chip
,
1606 return cadence_nand_write_page_raw(chip
, NULL
, true, page
);
1609 static int cadence_nand_read_page(struct nand_chip
*chip
,
1610 u8
*buf
, int oob_required
, int page
)
1612 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
1613 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
1614 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1616 int ecc_err_count
= 0;
1618 status
= cadence_nand_select_target(chip
);
1622 cadence_nand_set_skip_bytes_conf(cdns_ctrl
, cdns_chip
->bbm_len
,
1624 + cdns_chip
->bbm_offs
, 1);
1627 * If data buffer can be accessed by DMA and data_control feature
1628 * is supported then transfer data and oob directly.
1630 if (cadence_nand_dma_buf_ok(cdns_ctrl
, buf
, mtd
->writesize
) &&
1631 cdns_ctrl
->caps2
.data_control_supp
) {
1635 oob
= chip
->oob_poi
;
1637 oob
= cdns_ctrl
->buf
+ mtd
->writesize
;
1639 cadence_nand_prepare_data_size(chip
, TT_MAIN_OOB_AREA_EXT
);
1640 status
= cadence_nand_cdma_transfer(cdns_ctrl
,
1641 cdns_chip
->cs
[chip
->cur_cs
],
1644 cdns_chip
->avail_oob_size
,
1645 DMA_FROM_DEVICE
, true);
1646 /* Otherwise use bounce buffer. */
1648 cadence_nand_prepare_data_size(chip
, TT_MAIN_OOB_AREAS
);
1649 status
= cadence_nand_cdma_transfer(cdns_ctrl
,
1650 cdns_chip
->cs
[chip
->cur_cs
],
1651 page
, cdns_ctrl
->buf
,
1652 NULL
, mtd
->writesize
1653 + cdns_chip
->avail_oob_size
,
1654 0, DMA_FROM_DEVICE
, true);
1656 memcpy(buf
, cdns_ctrl
->buf
, mtd
->writesize
);
1658 memcpy(chip
->oob_poi
,
1659 cdns_ctrl
->buf
+ mtd
->writesize
,
1664 case STAT_ECC_UNCORR
:
1665 mtd
->ecc_stats
.failed
++;
1669 ecc_err_count
= FIELD_GET(CDMA_CS_MAXERR
,
1670 cdns_ctrl
->cdma_desc
->status
);
1671 mtd
->ecc_stats
.corrected
+= ecc_err_count
;
1677 dev_err(cdns_ctrl
->dev
, "read page failed\n");
1682 if (cadence_nand_read_bbm(chip
, page
, chip
->oob_poi
))
1685 return ecc_err_count
;
1688 /* Reads OOB data from the device. */
1689 static int cadence_nand_read_oob(struct nand_chip
*chip
, int page
)
1691 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
1693 return cadence_nand_read_page(chip
, cdns_ctrl
->buf
, 1, page
);
1696 static int cadence_nand_read_page_raw(struct nand_chip
*chip
,
1697 u8
*buf
, int oob_required
, int page
)
1699 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
1700 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
1701 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1702 int oob_skip
= cdns_chip
->bbm_len
;
1703 int writesize
= mtd
->writesize
;
1704 int ecc_steps
= chip
->ecc
.steps
;
1705 int ecc_size
= chip
->ecc
.size
;
1706 int ecc_bytes
= chip
->ecc
.bytes
;
1707 void *tmp_buf
= cdns_ctrl
->buf
;
1711 status
= cadence_nand_select_target(chip
);
1715 cadence_nand_set_skip_bytes_conf(cdns_ctrl
, 0, 0, 0);
1717 cadence_nand_prepare_data_size(chip
, TT_RAW_PAGE
);
1718 status
= cadence_nand_cdma_transfer(cdns_ctrl
,
1719 cdns_chip
->cs
[chip
->cur_cs
],
1720 page
, cdns_ctrl
->buf
, NULL
,
1723 0, DMA_FROM_DEVICE
, false);
1730 dev_err(cdns_ctrl
->dev
, "read raw page failed\n");
1734 /* Arrange the buffer for syndrome payload/ecc layout. */
1736 for (i
= 0; i
< ecc_steps
; i
++) {
1737 pos
= i
* (ecc_size
+ ecc_bytes
);
1740 if (pos
>= writesize
)
1742 else if (pos
+ len
> writesize
)
1743 len
= writesize
- pos
;
1745 memcpy(buf
, tmp_buf
+ pos
, len
);
1747 if (len
< ecc_size
) {
1748 len
= ecc_size
- len
;
1749 memcpy(buf
, tmp_buf
+ writesize
+ oob_skip
,
1757 u8
*oob
= chip
->oob_poi
;
1758 u32 oob_data_offset
= (cdns_chip
->sector_count
- 1) *
1759 (cdns_chip
->sector_size
+ chip
->ecc
.bytes
)
1760 + cdns_chip
->sector_size
+ oob_skip
;
1763 memcpy(oob
, tmp_buf
+ oob_data_offset
,
1764 cdns_chip
->avail_oob_size
);
1766 /* BBM at the beginning of the OOB area. */
1767 memcpy(oob
, tmp_buf
+ writesize
, oob_skip
);
1769 oob
+= cdns_chip
->avail_oob_size
;
1772 for (i
= 0; i
< ecc_steps
; i
++) {
1773 pos
= ecc_size
+ i
* (ecc_size
+ ecc_bytes
);
1776 if (i
== (ecc_steps
- 1))
1777 pos
+= cdns_chip
->avail_oob_size
;
1779 if (pos
>= writesize
)
1781 else if (pos
+ len
> writesize
)
1782 len
= writesize
- pos
;
1784 memcpy(oob
, tmp_buf
+ pos
, len
);
1786 if (len
< ecc_bytes
) {
1787 len
= ecc_bytes
- len
;
1788 memcpy(oob
, tmp_buf
+ writesize
+ oob_skip
,
1798 static int cadence_nand_read_oob_raw(struct nand_chip
*chip
,
1801 return cadence_nand_read_page_raw(chip
, NULL
, true, page
);
1804 static void cadence_nand_slave_dma_transfer_finished(void *data
)
1806 struct completion
*finished
= data
;
1811 static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl
*cdns_ctrl
,
1813 dma_addr_t dev_dma
, size_t len
,
1814 enum dma_data_direction dir
)
1816 DECLARE_COMPLETION_ONSTACK(finished
);
1817 struct dma_chan
*chan
;
1818 struct dma_device
*dma_dev
;
1819 dma_addr_t src_dma
, dst_dma
, buf_dma
;
1820 struct dma_async_tx_descriptor
*tx
;
1821 dma_cookie_t cookie
;
1823 chan
= cdns_ctrl
->dmac
;
1824 dma_dev
= chan
->device
;
1826 buf_dma
= dma_map_single(dma_dev
->dev
, buf
, len
, dir
);
1827 if (dma_mapping_error(dma_dev
->dev
, buf_dma
)) {
1828 dev_err(cdns_ctrl
->dev
, "Failed to map DMA buffer\n");
1832 if (dir
== DMA_FROM_DEVICE
) {
1833 src_dma
= cdns_ctrl
->io
.dma
;
1837 dst_dma
= cdns_ctrl
->io
.dma
;
1840 tx
= dmaengine_prep_dma_memcpy(cdns_ctrl
->dmac
, dst_dma
, src_dma
, len
,
1841 DMA_CTRL_ACK
| DMA_PREP_INTERRUPT
);
1843 dev_err(cdns_ctrl
->dev
, "Failed to prepare DMA memcpy\n");
1847 tx
->callback
= cadence_nand_slave_dma_transfer_finished
;
1848 tx
->callback_param
= &finished
;
1850 cookie
= dmaengine_submit(tx
);
1851 if (dma_submit_error(cookie
)) {
1852 dev_err(cdns_ctrl
->dev
, "Failed to do DMA tx_submit\n");
1856 dma_async_issue_pending(cdns_ctrl
->dmac
);
1857 wait_for_completion(&finished
);
1859 dma_unmap_single(cdns_ctrl
->dev
, buf_dma
, len
, dir
);
1864 dma_unmap_single(cdns_ctrl
->dev
, buf_dma
, len
, dir
);
1867 dev_dbg(cdns_ctrl
->dev
, "Fall back to CPU I/O\n");
1872 static int cadence_nand_read_buf(struct cdns_nand_ctrl
*cdns_ctrl
,
1879 /* Wait until slave DMA interface is ready to data transfer. */
1880 status
= cadence_nand_wait_on_sdma(cdns_ctrl
, &thread_nr
, &sdma_size
);
1884 if (!cdns_ctrl
->caps1
->has_dma
) {
1885 int len_in_words
= len
>> 2;
1887 /* read alingment data */
1888 ioread32_rep(cdns_ctrl
->io
.virt
, buf
, len_in_words
);
1889 if (sdma_size
> len
) {
1890 /* read rest data from slave DMA interface if any */
1891 ioread32_rep(cdns_ctrl
->io
.virt
, cdns_ctrl
->buf
,
1892 sdma_size
/ 4 - len_in_words
);
1893 /* copy rest of data */
1894 memcpy(buf
+ (len_in_words
<< 2), cdns_ctrl
->buf
,
1895 len
- (len_in_words
<< 2));
1900 if (cadence_nand_dma_buf_ok(cdns_ctrl
, buf
, len
)) {
1901 status
= cadence_nand_slave_dma_transfer(cdns_ctrl
, buf
,
1903 len
, DMA_FROM_DEVICE
);
1907 dev_warn(cdns_ctrl
->dev
,
1908 "Slave DMA transfer failed. Try again using bounce buffer.");
1911 /* If DMA transfer is not possible or failed then use bounce buffer. */
1912 status
= cadence_nand_slave_dma_transfer(cdns_ctrl
, cdns_ctrl
->buf
,
1914 sdma_size
, DMA_FROM_DEVICE
);
1917 dev_err(cdns_ctrl
->dev
, "Slave DMA transfer failed");
1921 memcpy(buf
, cdns_ctrl
->buf
, len
);
1926 static int cadence_nand_write_buf(struct cdns_nand_ctrl
*cdns_ctrl
,
1927 const u8
*buf
, int len
)
1933 /* Wait until slave DMA interface is ready to data transfer. */
1934 status
= cadence_nand_wait_on_sdma(cdns_ctrl
, &thread_nr
, &sdma_size
);
1938 if (!cdns_ctrl
->caps1
->has_dma
) {
1939 int len_in_words
= len
>> 2;
1941 iowrite32_rep(cdns_ctrl
->io
.virt
, buf
, len_in_words
);
1942 if (sdma_size
> len
) {
1943 /* copy rest of data */
1944 memcpy(cdns_ctrl
->buf
, buf
+ (len_in_words
<< 2),
1945 len
- (len_in_words
<< 2));
1946 /* write all expected by nand controller data */
1947 iowrite32_rep(cdns_ctrl
->io
.virt
, cdns_ctrl
->buf
,
1948 sdma_size
/ 4 - len_in_words
);
1954 if (cadence_nand_dma_buf_ok(cdns_ctrl
, buf
, len
)) {
1955 status
= cadence_nand_slave_dma_transfer(cdns_ctrl
, (void *)buf
,
1957 len
, DMA_TO_DEVICE
);
1961 dev_warn(cdns_ctrl
->dev
,
1962 "Slave DMA transfer failed. Try again using bounce buffer.");
1965 /* If DMA transfer is not possible or failed then use bounce buffer. */
1966 memcpy(cdns_ctrl
->buf
, buf
, len
);
1968 status
= cadence_nand_slave_dma_transfer(cdns_ctrl
, cdns_ctrl
->buf
,
1970 sdma_size
, DMA_TO_DEVICE
);
1973 dev_err(cdns_ctrl
->dev
, "Slave DMA transfer failed");
1978 static int cadence_nand_force_byte_access(struct nand_chip
*chip
,
1981 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
1985 * Callers of this function do not verify if the NAND is using a 16-bit
1986 * an 8-bit bus for normal operations, so we need to take care of that
1987 * here by leaving the configuration unchanged if the NAND does not have
1988 * the NAND_BUSWIDTH_16 flag set.
1990 if (!(chip
->options
& NAND_BUSWIDTH_16
))
1993 status
= cadence_nand_set_access_width16(cdns_ctrl
, !force_8bit
);
1998 static int cadence_nand_cmd_opcode(struct nand_chip
*chip
,
1999 const struct nand_subop
*subop
)
2001 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
2002 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
2003 const struct nand_op_instr
*instr
;
2004 unsigned int op_id
= 0;
2005 u64 mini_ctrl_cmd
= 0;
2008 instr
= &subop
->instrs
[op_id
];
2010 if (instr
->delay_ns
> 0)
2011 mini_ctrl_cmd
|= GCMD_LAY_TWB
;
2013 mini_ctrl_cmd
|= FIELD_PREP(GCMD_LAY_INSTR
,
2014 GCMD_LAY_INSTR_CMD
);
2015 mini_ctrl_cmd
|= FIELD_PREP(GCMD_LAY_INPUT_CMD
,
2016 instr
->ctx
.cmd
.opcode
);
2018 ret
= cadence_nand_generic_cmd_send(cdns_ctrl
,
2019 cdns_chip
->cs
[chip
->cur_cs
],
2022 dev_err(cdns_ctrl
->dev
, "send cmd %x failed\n",
2023 instr
->ctx
.cmd
.opcode
);
2028 static int cadence_nand_cmd_address(struct nand_chip
*chip
,
2029 const struct nand_subop
*subop
)
2031 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
2032 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
2033 const struct nand_op_instr
*instr
;
2034 unsigned int op_id
= 0;
2035 u64 mini_ctrl_cmd
= 0;
2036 unsigned int offset
, naddrs
;
2042 instr
= &subop
->instrs
[op_id
];
2044 if (instr
->delay_ns
> 0)
2045 mini_ctrl_cmd
|= GCMD_LAY_TWB
;
2047 mini_ctrl_cmd
|= FIELD_PREP(GCMD_LAY_INSTR
,
2048 GCMD_LAY_INSTR_ADDR
);
2050 offset
= nand_subop_get_addr_start_off(subop
, op_id
);
2051 naddrs
= nand_subop_get_num_addr_cyc(subop
, op_id
);
2052 addrs
= &instr
->ctx
.addr
.addrs
[offset
];
2054 for (i
= 0; i
< naddrs
; i
++)
2055 address
|= (u64
)addrs
[i
] << (8 * i
);
2057 mini_ctrl_cmd
|= FIELD_PREP(GCMD_LAY_INPUT_ADDR
,
2059 mini_ctrl_cmd
|= FIELD_PREP(GCMD_LAY_INPUT_ADDR_SIZE
,
2062 ret
= cadence_nand_generic_cmd_send(cdns_ctrl
,
2063 cdns_chip
->cs
[chip
->cur_cs
],
2066 dev_err(cdns_ctrl
->dev
, "send address %llx failed\n", address
);
2071 static int cadence_nand_cmd_erase(struct nand_chip
*chip
,
2072 const struct nand_subop
*subop
)
2076 if (subop
->instrs
[0].ctx
.cmd
.opcode
== NAND_CMD_ERASE1
) {
2078 const struct nand_op_instr
*instr
= NULL
;
2079 unsigned int offset
, naddrs
;
2083 instr
= &subop
->instrs
[1];
2084 offset
= nand_subop_get_addr_start_off(subop
, 1);
2085 naddrs
= nand_subop_get_num_addr_cyc(subop
, 1);
2086 addrs
= &instr
->ctx
.addr
.addrs
[offset
];
2088 for (i
= 0; i
< naddrs
; i
++)
2089 page
|= (u32
)addrs
[i
] << (8 * i
);
2091 return cadence_nand_erase(chip
, page
);
2095 * If it is not an erase operation then handle operation
2096 * by calling exec_op function.
2098 for (op_id
= 0; op_id
< subop
->ninstrs
; op_id
++) {
2100 const struct nand_operation nand_op
= {
2102 .instrs
= &subop
->instrs
[op_id
],
2104 ret
= chip
->controller
->ops
->exec_op(chip
, &nand_op
, false);
2112 static int cadence_nand_cmd_data(struct nand_chip
*chip
,
2113 const struct nand_subop
*subop
)
2115 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
2116 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
2117 const struct nand_op_instr
*instr
;
2118 unsigned int offset
, op_id
= 0;
2119 u64 mini_ctrl_cmd
= 0;
2123 instr
= &subop
->instrs
[op_id
];
2125 if (instr
->delay_ns
> 0)
2126 mini_ctrl_cmd
|= GCMD_LAY_TWB
;
2128 mini_ctrl_cmd
|= FIELD_PREP(GCMD_LAY_INSTR
,
2129 GCMD_LAY_INSTR_DATA
);
2131 if (instr
->type
== NAND_OP_DATA_OUT_INSTR
)
2132 mini_ctrl_cmd
|= FIELD_PREP(GCMD_DIR
,
2135 len
= nand_subop_get_data_len(subop
, op_id
);
2136 offset
= nand_subop_get_data_start_off(subop
, op_id
);
2137 mini_ctrl_cmd
|= FIELD_PREP(GCMD_SECT_CNT
, 1);
2138 mini_ctrl_cmd
|= FIELD_PREP(GCMD_LAST_SIZE
, len
);
2139 if (instr
->ctx
.data
.force_8bit
) {
2140 ret
= cadence_nand_force_byte_access(chip
, true);
2142 dev_err(cdns_ctrl
->dev
,
2143 "cannot change byte access generic data cmd failed\n");
2148 ret
= cadence_nand_generic_cmd_send(cdns_ctrl
,
2149 cdns_chip
->cs
[chip
->cur_cs
],
2152 dev_err(cdns_ctrl
->dev
, "send generic data cmd failed\n");
2156 if (instr
->type
== NAND_OP_DATA_IN_INSTR
) {
2157 void *buf
= instr
->ctx
.data
.buf
.in
+ offset
;
2159 ret
= cadence_nand_read_buf(cdns_ctrl
, buf
, len
);
2161 const void *buf
= instr
->ctx
.data
.buf
.out
+ offset
;
2163 ret
= cadence_nand_write_buf(cdns_ctrl
, buf
, len
);
2167 dev_err(cdns_ctrl
->dev
, "data transfer failed for generic command\n");
2171 if (instr
->ctx
.data
.force_8bit
) {
2172 ret
= cadence_nand_force_byte_access(chip
, false);
2174 dev_err(cdns_ctrl
->dev
,
2175 "cannot change byte access generic data cmd failed\n");
2182 static int cadence_nand_cmd_waitrdy(struct nand_chip
*chip
,
2183 const struct nand_subop
*subop
)
2186 unsigned int op_id
= 0;
2187 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
2188 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
2189 const struct nand_op_instr
*instr
= &subop
->instrs
[op_id
];
2190 u32 timeout_us
= instr
->ctx
.waitrdy
.timeout_ms
* 1000;
2192 status
= cadence_nand_wait_for_value(cdns_ctrl
, RBN_SETINGS
,
2194 BIT(cdns_chip
->cs
[chip
->cur_cs
]),
2199 static const struct nand_op_parser cadence_nand_op_parser
= NAND_OP_PARSER(
2200 NAND_OP_PARSER_PATTERN(
2201 cadence_nand_cmd_erase
,
2202 NAND_OP_PARSER_PAT_CMD_ELEM(false),
2203 NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ERASE_ADDRESS_CYC
),
2204 NAND_OP_PARSER_PAT_CMD_ELEM(false),
2205 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
2206 NAND_OP_PARSER_PATTERN(
2207 cadence_nand_cmd_opcode
,
2208 NAND_OP_PARSER_PAT_CMD_ELEM(false)),
2209 NAND_OP_PARSER_PATTERN(
2210 cadence_nand_cmd_address
,
2211 NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC
)),
2212 NAND_OP_PARSER_PATTERN(
2213 cadence_nand_cmd_data
,
2214 NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_DATA_SIZE
)),
2215 NAND_OP_PARSER_PATTERN(
2216 cadence_nand_cmd_data
,
2217 NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_DATA_SIZE
)),
2218 NAND_OP_PARSER_PATTERN(
2219 cadence_nand_cmd_waitrdy
,
2220 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false))
2223 static int cadence_nand_exec_op(struct nand_chip
*chip
,
2224 const struct nand_operation
*op
,
2228 int status
= cadence_nand_select_target(chip
);
2234 return nand_op_parser_exec_op(chip
, &cadence_nand_op_parser
, op
,
2238 static int cadence_nand_ooblayout_free(struct mtd_info
*mtd
, int section
,
2239 struct mtd_oob_region
*oobregion
)
2241 struct nand_chip
*chip
= mtd_to_nand(mtd
);
2242 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
2247 oobregion
->offset
= cdns_chip
->bbm_len
;
2248 oobregion
->length
= cdns_chip
->avail_oob_size
2249 - cdns_chip
->bbm_len
;
2254 static int cadence_nand_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
2255 struct mtd_oob_region
*oobregion
)
2257 struct nand_chip
*chip
= mtd_to_nand(mtd
);
2258 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
2263 oobregion
->offset
= cdns_chip
->avail_oob_size
;
2264 oobregion
->length
= chip
->ecc
.total
;
2269 static const struct mtd_ooblayout_ops cadence_nand_ooblayout_ops
= {
2270 .free
= cadence_nand_ooblayout_free
,
2271 .ecc
= cadence_nand_ooblayout_ecc
,
2274 static int calc_cycl(u32 timing
, u32 clock
)
2276 if (timing
== 0 || clock
== 0)
2279 if ((timing
% clock
) > 0)
2280 return timing
/ clock
;
2282 return timing
/ clock
- 1;
2285 /* Calculate max data valid window. */
2286 static inline u32
calc_tdvw_max(u32 trp_cnt
, u32 clk_period
, u32 trhoh_min
,
2287 u32 board_delay_skew_min
, u32 ext_mode
)
2292 return (trp_cnt
+ 1) * clk_period
+ trhoh_min
+
2293 board_delay_skew_min
;
2296 /* Calculate data valid window. */
2297 static inline u32
calc_tdvw(u32 trp_cnt
, u32 clk_period
, u32 trhoh_min
,
2298 u32 trea_max
, u32 ext_mode
)
2303 return (trp_cnt
+ 1) * clk_period
+ trhoh_min
- trea_max
;
2307 cadence_nand_setup_interface(struct nand_chip
*chip
, int chipnr
,
2308 const struct nand_interface_config
*conf
)
2310 const struct nand_sdr_timings
*sdr
;
2311 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
2312 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
2313 struct cadence_nand_timings
*t
= &cdns_chip
->timings
;
2315 u32 board_delay
= cdns_ctrl
->board_delay
;
2316 u32 clk_period
= DIV_ROUND_DOWN_ULL(1000000000000ULL,
2317 cdns_ctrl
->nf_clk_rate
);
2318 u32 tceh_cnt
, tcs_cnt
, tadl_cnt
, tccs_cnt
;
2319 u32 tfeat_cnt
, trhz_cnt
, tvdly_cnt
;
2320 u32 trhw_cnt
, twb_cnt
, twh_cnt
= 0, twhr_cnt
;
2321 u32 twp_cnt
= 0, trp_cnt
= 0, trh_cnt
= 0;
2322 u32 if_skew
= cdns_ctrl
->caps1
->if_skew
;
2323 u32 board_delay_skew_min
= board_delay
- if_skew
;
2324 u32 board_delay_skew_max
= board_delay
+ if_skew
;
2325 u32 dqs_sampl_res
, phony_dqs_mod
;
2326 u32 tdvw
, tdvw_min
, tdvw_max
;
2327 u32 ext_rd_mode
, ext_wr_mode
;
2328 u32 dll_phy_dqs_timing
= 0, phony_dqs_timing
= 0, rd_del_sel
= 0;
2331 sdr
= nand_get_sdr_timings(conf
);
2333 return PTR_ERR(sdr
);
2335 memset(t
, 0, sizeof(*t
));
2336 /* Sampling point calculation. */
2338 if (cdns_ctrl
->caps2
.is_phy_type_dll
)
2343 dqs_sampl_res
= clk_period
/ phony_dqs_mod
;
2345 tdvw_min
= sdr
->tREA_max
+ board_delay_skew_max
;
2347 * The idea of those calculation is to get the optimum value
2348 * for tRP and tRH timings. If it is NOT possible to sample data
2349 * with optimal tRP/tRH settings, the parameters will be extended.
2350 * If clk_period is 50ns (the lowest value) this condition is met
2351 * for asynchronous timing modes 1, 2, 3, 4 and 5.
2352 * If clk_period is 20ns the condition is met only
2353 * for asynchronous timing mode 5.
2355 if (sdr
->tRC_min
<= clk_period
&&
2356 sdr
->tRP_min
<= (clk_period
/ 2) &&
2357 sdr
->tREH_min
<= (clk_period
/ 2)) {
2358 /* Performance mode. */
2360 tdvw
= calc_tdvw(trp_cnt
, clk_period
, sdr
->tRHOH_min
,
2361 sdr
->tREA_max
, ext_rd_mode
);
2362 tdvw_max
= calc_tdvw_max(trp_cnt
, clk_period
, sdr
->tRHOH_min
,
2363 board_delay_skew_min
,
2366 * Check if data valid window and sampling point can be found
2367 * and is not on the edge (ie. we have hold margin).
2368 * If not extend the tRP timings.
2371 if (tdvw_max
<= tdvw_min
||
2372 (tdvw_max
% dqs_sampl_res
) == 0) {
2374 * No valid sampling point so the RE pulse need
2375 * to be widen widening by half clock cycle.
2381 * There is no valid window
2382 * to be able to sample data the tRP need to be widen.
2383 * Very safe calculations are performed here.
2385 trp_cnt
= (sdr
->tREA_max
+ board_delay_skew_max
2386 + dqs_sampl_res
) / clk_period
;
2391 /* Extended read mode. */
2395 trp_cnt
= calc_cycl(sdr
->tRP_min
, clk_period
);
2396 trh
= sdr
->tRC_min
- ((trp_cnt
+ 1) * clk_period
);
2397 if (sdr
->tREH_min
>= trh
)
2398 trh_cnt
= calc_cycl(sdr
->tREH_min
, clk_period
);
2400 trh_cnt
= calc_cycl(trh
, clk_period
);
2402 tdvw
= calc_tdvw(trp_cnt
, clk_period
, sdr
->tRHOH_min
,
2403 sdr
->tREA_max
, ext_rd_mode
);
2405 * Check if data valid window and sampling point can be found
2406 * or if it is at the edge check if previous is valid
2407 * - if not extend the tRP timings.
2410 tdvw_max
= calc_tdvw_max(trp_cnt
, clk_period
,
2412 board_delay_skew_min
,
2415 if ((((tdvw_max
/ dqs_sampl_res
)
2416 * dqs_sampl_res
) <= tdvw_min
) ||
2417 (((tdvw_max
% dqs_sampl_res
) == 0) &&
2418 (((tdvw_max
/ dqs_sampl_res
- 1)
2419 * dqs_sampl_res
) <= tdvw_min
))) {
2421 * Data valid window width is lower than
2422 * sampling resolution and do not hit any
2423 * sampling point to be sure the sampling point
2424 * will be found the RE low pulse width will be
2425 * extended by one clock cycle.
2427 trp_cnt
= trp_cnt
+ 1;
2431 * There is no valid window to be able to sample data.
2432 * The tRP need to be widen.
2433 * Very safe calculations are performed here.
2435 trp_cnt
= (sdr
->tREA_max
+ board_delay_skew_max
2436 + dqs_sampl_res
) / clk_period
;
2440 tdvw_max
= calc_tdvw_max(trp_cnt
, clk_period
,
2442 board_delay_skew_min
, ext_rd_mode
);
2444 if (sdr
->tWC_min
<= clk_period
&&
2445 (sdr
->tWP_min
+ if_skew
) <= (clk_period
/ 2) &&
2446 (sdr
->tWH_min
+ if_skew
) <= (clk_period
/ 2)) {
2452 twp_cnt
= calc_cycl(sdr
->tWP_min
+ if_skew
, clk_period
);
2453 if ((twp_cnt
+ 1) * clk_period
< (sdr
->tALS_min
+ if_skew
))
2454 twp_cnt
= calc_cycl(sdr
->tALS_min
+ if_skew
,
2457 twh
= (sdr
->tWC_min
- (twp_cnt
+ 1) * clk_period
);
2458 if (sdr
->tWH_min
>= twh
)
2461 twh_cnt
= calc_cycl(twh
+ if_skew
, clk_period
);
2464 reg
= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRH
, trh_cnt
);
2465 reg
|= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRP
, trp_cnt
);
2466 reg
|= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWH
, twh_cnt
);
2467 reg
|= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWP
, twp_cnt
);
2468 t
->async_toggle_timings
= reg
;
2469 dev_dbg(cdns_ctrl
->dev
, "ASYNC_TOGGLE_TIMINGS_SDR\t%x\n", reg
);
2471 tadl_cnt
= calc_cycl((sdr
->tADL_min
+ if_skew
), clk_period
);
2472 tccs_cnt
= calc_cycl((sdr
->tCCS_min
+ if_skew
), clk_period
);
2473 twhr_cnt
= calc_cycl((sdr
->tWHR_min
+ if_skew
), clk_period
);
2474 trhw_cnt
= calc_cycl((sdr
->tRHW_min
+ if_skew
), clk_period
);
2475 reg
= FIELD_PREP(TIMINGS0_TADL
, tadl_cnt
);
2478 * If timing exceeds delay field in timing register
2479 * then use maximum value.
2481 if (FIELD_FIT(TIMINGS0_TCCS
, tccs_cnt
))
2482 reg
|= FIELD_PREP(TIMINGS0_TCCS
, tccs_cnt
);
2484 reg
|= TIMINGS0_TCCS
;
2486 reg
|= FIELD_PREP(TIMINGS0_TWHR
, twhr_cnt
);
2487 reg
|= FIELD_PREP(TIMINGS0_TRHW
, trhw_cnt
);
2489 dev_dbg(cdns_ctrl
->dev
, "TIMINGS0_SDR\t%x\n", reg
);
2491 /* The following is related to single signal so skew is not needed. */
2492 trhz_cnt
= calc_cycl(sdr
->tRHZ_max
, clk_period
);
2493 trhz_cnt
= trhz_cnt
+ 1;
2494 twb_cnt
= calc_cycl((sdr
->tWB_max
+ board_delay
), clk_period
);
2496 * Because of the two stage syncflop the value must be increased by 3
2497 * first value is related with sync, second value is related
2498 * with output if delay.
2500 twb_cnt
= twb_cnt
+ 3 + 5;
2502 * The following is related to the we edge of the random data input
2503 * sequence so skew is not needed.
2505 tvdly_cnt
= calc_cycl(500000 + if_skew
, clk_period
);
2506 reg
= FIELD_PREP(TIMINGS1_TRHZ
, trhz_cnt
);
2507 reg
|= FIELD_PREP(TIMINGS1_TWB
, twb_cnt
);
2508 reg
|= FIELD_PREP(TIMINGS1_TVDLY
, tvdly_cnt
);
2510 dev_dbg(cdns_ctrl
->dev
, "TIMINGS1_SDR\t%x\n", reg
);
2512 tfeat_cnt
= calc_cycl(sdr
->tFEAT_max
, clk_period
);
2513 if (tfeat_cnt
< twb_cnt
)
2514 tfeat_cnt
= twb_cnt
;
2516 tceh_cnt
= calc_cycl(sdr
->tCEH_min
, clk_period
);
2517 tcs_cnt
= calc_cycl((sdr
->tCS_min
+ if_skew
), clk_period
);
2519 reg
= FIELD_PREP(TIMINGS2_TFEAT
, tfeat_cnt
);
2520 reg
|= FIELD_PREP(TIMINGS2_CS_HOLD_TIME
, tceh_cnt
);
2521 reg
|= FIELD_PREP(TIMINGS2_CS_SETUP_TIME
, tcs_cnt
);
2523 dev_dbg(cdns_ctrl
->dev
, "TIMINGS2_SDR\t%x\n", reg
);
2525 if (cdns_ctrl
->caps2
.is_phy_type_dll
) {
2526 reg
= DLL_PHY_CTRL_DLL_RST_N
;
2528 reg
|= DLL_PHY_CTRL_EXTENDED_WR_MODE
;
2530 reg
|= DLL_PHY_CTRL_EXTENDED_RD_MODE
;
2532 reg
|= FIELD_PREP(DLL_PHY_CTRL_RS_HIGH_WAIT_CNT
, 7);
2533 reg
|= FIELD_PREP(DLL_PHY_CTRL_RS_IDLE_CNT
, 7);
2534 t
->dll_phy_ctrl
= reg
;
2535 dev_dbg(cdns_ctrl
->dev
, "DLL_PHY_CTRL_SDR\t%x\n", reg
);
2538 /* Sampling point calculation. */
2539 if ((tdvw_max
% dqs_sampl_res
) > 0)
2540 sampling_point
= tdvw_max
/ dqs_sampl_res
;
2542 sampling_point
= (tdvw_max
/ dqs_sampl_res
- 1);
2544 if (sampling_point
* dqs_sampl_res
> tdvw_min
) {
2545 dll_phy_dqs_timing
=
2546 FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_END
, 4);
2547 dll_phy_dqs_timing
|= PHY_DQS_TIMING_USE_PHONY_DQS
;
2548 phony_dqs_timing
= sampling_point
/ phony_dqs_mod
;
2550 if ((sampling_point
% 2) > 0) {
2551 dll_phy_dqs_timing
|= PHY_DQS_TIMING_PHONY_DQS_SEL
;
2552 if ((tdvw_max
% dqs_sampl_res
) == 0)
2554 * Calculation for sampling point at the edge
2555 * of data and being odd number.
2557 phony_dqs_timing
= (tdvw_max
/ dqs_sampl_res
)
2558 / phony_dqs_mod
- 1;
2560 if (!cdns_ctrl
->caps2
.is_phy_type_dll
)
2566 rd_del_sel
= phony_dqs_timing
+ 3;
2568 dev_warn(cdns_ctrl
->dev
,
2569 "ERROR : cannot find valid sampling point\n");
2572 reg
= FIELD_PREP(PHY_CTRL_PHONY_DQS
, phony_dqs_timing
);
2573 if (cdns_ctrl
->caps2
.is_phy_type_dll
)
2574 reg
|= PHY_CTRL_SDR_DQS
;
2576 dev_dbg(cdns_ctrl
->dev
, "PHY_CTRL_REG_SDR\t%x\n", reg
);
2578 if (cdns_ctrl
->caps2
.is_phy_type_dll
) {
2579 dev_dbg(cdns_ctrl
->dev
, "PHY_TSEL_REG_SDR\t%x\n", 0);
2580 dev_dbg(cdns_ctrl
->dev
, "PHY_DQ_TIMING_REG_SDR\t%x\n", 2);
2581 dev_dbg(cdns_ctrl
->dev
, "PHY_DQS_TIMING_REG_SDR\t%x\n",
2582 dll_phy_dqs_timing
);
2583 t
->phy_dqs_timing
= dll_phy_dqs_timing
;
2585 reg
= FIELD_PREP(PHY_GATE_LPBK_CTRL_RDS
, rd_del_sel
);
2586 dev_dbg(cdns_ctrl
->dev
, "PHY_GATE_LPBK_CTRL_REG_SDR\t%x\n",
2588 t
->phy_gate_lpbk_ctrl
= reg
;
2590 dev_dbg(cdns_ctrl
->dev
, "PHY_DLL_MASTER_CTRL_REG_SDR\t%lx\n",
2591 PHY_DLL_MASTER_CTRL_BYPASS_MODE
);
2592 dev_dbg(cdns_ctrl
->dev
, "PHY_DLL_SLAVE_CTRL_REG_SDR\t%x\n", 0);
2598 static int cadence_nand_attach_chip(struct nand_chip
*chip
)
2600 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
2601 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
2603 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2606 if (chip
->options
& NAND_BUSWIDTH_16
) {
2607 ret
= cadence_nand_set_access_width16(cdns_ctrl
, true);
2612 chip
->bbt_options
|= NAND_BBT_USE_FLASH
;
2613 chip
->bbt_options
|= NAND_BBT_NO_OOB
;
2614 chip
->ecc
.engine_type
= NAND_ECC_ENGINE_TYPE_ON_HOST
;
2616 chip
->options
|= NAND_NO_SUBPAGE_WRITE
;
2618 cdns_chip
->bbm_offs
= chip
->badblockpos
;
2619 cdns_chip
->bbm_offs
&= ~0x01;
2620 /* this value should be even number */
2621 cdns_chip
->bbm_len
= 2;
2623 ret
= nand_ecc_choose_conf(chip
,
2624 &cdns_ctrl
->ecc_caps
,
2625 mtd
->oobsize
- cdns_chip
->bbm_len
);
2627 dev_err(cdns_ctrl
->dev
, "ECC configuration failed\n");
2631 dev_dbg(cdns_ctrl
->dev
,
2632 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
2633 chip
->ecc
.size
, chip
->ecc
.strength
, chip
->ecc
.bytes
);
2635 /* Error correction configuration. */
2636 cdns_chip
->sector_size
= chip
->ecc
.size
;
2637 cdns_chip
->sector_count
= mtd
->writesize
/ cdns_chip
->sector_size
;
2638 ecc_size
= cdns_chip
->sector_count
* chip
->ecc
.bytes
;
2640 cdns_chip
->avail_oob_size
= mtd
->oobsize
- ecc_size
;
2642 if (cdns_chip
->avail_oob_size
> cdns_ctrl
->bch_metadata_size
)
2643 cdns_chip
->avail_oob_size
= cdns_ctrl
->bch_metadata_size
;
2645 if ((cdns_chip
->avail_oob_size
+ cdns_chip
->bbm_len
+ ecc_size
)
2647 cdns_chip
->avail_oob_size
-= 4;
2649 ret
= cadence_nand_get_ecc_strength_idx(cdns_ctrl
, chip
->ecc
.strength
);
2653 cdns_chip
->corr_str_idx
= (u8
)ret
;
2655 if (cadence_nand_wait_for_value(cdns_ctrl
, CTRL_STATUS
,
2657 CTRL_STATUS_CTRL_BUSY
, true))
2660 cadence_nand_set_ecc_strength(cdns_ctrl
,
2661 cdns_chip
->corr_str_idx
);
2663 cadence_nand_set_erase_detection(cdns_ctrl
, true,
2664 chip
->ecc
.strength
);
2666 /* Override the default read operations. */
2667 chip
->ecc
.read_page
= cadence_nand_read_page
;
2668 chip
->ecc
.read_page_raw
= cadence_nand_read_page_raw
;
2669 chip
->ecc
.write_page
= cadence_nand_write_page
;
2670 chip
->ecc
.write_page_raw
= cadence_nand_write_page_raw
;
2671 chip
->ecc
.read_oob
= cadence_nand_read_oob
;
2672 chip
->ecc
.write_oob
= cadence_nand_write_oob
;
2673 chip
->ecc
.read_oob_raw
= cadence_nand_read_oob_raw
;
2674 chip
->ecc
.write_oob_raw
= cadence_nand_write_oob_raw
;
2676 if ((mtd
->writesize
+ mtd
->oobsize
) > cdns_ctrl
->buf_size
)
2677 cdns_ctrl
->buf_size
= mtd
->writesize
+ mtd
->oobsize
;
2679 /* Is 32-bit DMA supported? */
2680 ret
= dma_set_mask(cdns_ctrl
->dev
, DMA_BIT_MASK(32));
2682 dev_err(cdns_ctrl
->dev
, "no usable DMA configuration\n");
2686 mtd_set_ooblayout(mtd
, &cadence_nand_ooblayout_ops
);
2691 static const struct nand_controller_ops cadence_nand_controller_ops
= {
2692 .attach_chip
= cadence_nand_attach_chip
,
2693 .exec_op
= cadence_nand_exec_op
,
2694 .setup_interface
= cadence_nand_setup_interface
,
2697 static int cadence_nand_chip_init(struct cdns_nand_ctrl
*cdns_ctrl
,
2698 struct device_node
*np
)
2700 struct cdns_nand_chip
*cdns_chip
;
2701 struct mtd_info
*mtd
;
2702 struct nand_chip
*chip
;
2706 nsels
= of_property_count_elems_of_size(np
, "reg", sizeof(u32
));
2708 dev_err(cdns_ctrl
->dev
, "missing/invalid reg property\n");
2712 /* Allocate the nand chip structure. */
2713 cdns_chip
= devm_kzalloc(cdns_ctrl
->dev
, sizeof(*cdns_chip
) +
2714 (nsels
* sizeof(u8
)),
2717 dev_err(cdns_ctrl
->dev
, "could not allocate chip structure\n");
2721 cdns_chip
->nsels
= nsels
;
2723 for (i
= 0; i
< nsels
; i
++) {
2724 /* Retrieve CS id. */
2725 ret
= of_property_read_u32_index(np
, "reg", i
, &cs
);
2727 dev_err(cdns_ctrl
->dev
,
2728 "could not retrieve reg property: %d\n",
2733 if (cs
>= cdns_ctrl
->caps2
.max_banks
) {
2734 dev_err(cdns_ctrl
->dev
,
2735 "invalid reg value: %u (max CS = %d)\n",
2736 cs
, cdns_ctrl
->caps2
.max_banks
);
2740 if (test_and_set_bit(cs
, &cdns_ctrl
->assigned_cs
)) {
2741 dev_err(cdns_ctrl
->dev
,
2742 "CS %d already assigned\n", cs
);
2746 cdns_chip
->cs
[i
] = cs
;
2749 chip
= &cdns_chip
->chip
;
2750 chip
->controller
= &cdns_ctrl
->controller
;
2751 nand_set_flash_node(chip
, np
);
2753 mtd
= nand_to_mtd(chip
);
2754 mtd
->dev
.parent
= cdns_ctrl
->dev
;
2757 * Default to HW ECC engine mode. If the nand-ecc-mode property is given
2758 * in the DT node, this entry will be overwritten in nand_scan_ident().
2760 chip
->ecc
.engine_type
= NAND_ECC_ENGINE_TYPE_ON_HOST
;
2762 ret
= nand_scan(chip
, cdns_chip
->nsels
);
2764 dev_err(cdns_ctrl
->dev
, "could not scan the nand chip\n");
2768 ret
= mtd_device_register(mtd
, NULL
, 0);
2770 dev_err(cdns_ctrl
->dev
,
2771 "failed to register mtd device: %d\n", ret
);
2776 list_add_tail(&cdns_chip
->node
, &cdns_ctrl
->chips
);
2781 static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl
*cdns_ctrl
)
2783 struct cdns_nand_chip
*entry
, *temp
;
2784 struct nand_chip
*chip
;
2787 list_for_each_entry_safe(entry
, temp
, &cdns_ctrl
->chips
, node
) {
2788 chip
= &entry
->chip
;
2789 ret
= mtd_device_unregister(nand_to_mtd(chip
));
2792 list_del(&entry
->node
);
2796 static int cadence_nand_chips_init(struct cdns_nand_ctrl
*cdns_ctrl
)
2798 struct device_node
*np
= cdns_ctrl
->dev
->of_node
;
2799 struct device_node
*nand_np
;
2800 int max_cs
= cdns_ctrl
->caps2
.max_banks
;
2803 nchips
= of_get_child_count(np
);
2805 if (nchips
> max_cs
) {
2806 dev_err(cdns_ctrl
->dev
,
2807 "too many NAND chips: %d (max = %d CS)\n",
2812 for_each_child_of_node(np
, nand_np
) {
2813 ret
= cadence_nand_chip_init(cdns_ctrl
, nand_np
);
2815 of_node_put(nand_np
);
2816 cadence_nand_chips_cleanup(cdns_ctrl
);
2825 cadence_nand_irq_cleanup(int irqnum
, struct cdns_nand_ctrl
*cdns_ctrl
)
2827 /* Disable interrupts. */
2828 writel_relaxed(INTR_ENABLE_INTR_EN
, cdns_ctrl
->reg
+ INTR_ENABLE
);
2831 static int cadence_nand_init(struct cdns_nand_ctrl
*cdns_ctrl
)
2833 dma_cap_mask_t mask
;
2836 cdns_ctrl
->cdma_desc
= dma_alloc_coherent(cdns_ctrl
->dev
,
2837 sizeof(*cdns_ctrl
->cdma_desc
),
2838 &cdns_ctrl
->dma_cdma_desc
,
2840 if (!cdns_ctrl
->dma_cdma_desc
)
2843 cdns_ctrl
->buf_size
= SZ_16K
;
2844 cdns_ctrl
->buf
= kmalloc(cdns_ctrl
->buf_size
, GFP_KERNEL
);
2845 if (!cdns_ctrl
->buf
) {
2850 if (devm_request_irq(cdns_ctrl
->dev
, cdns_ctrl
->irq
, cadence_nand_isr
,
2851 IRQF_SHARED
, "cadence-nand-controller",
2853 dev_err(cdns_ctrl
->dev
, "Unable to allocate IRQ\n");
2858 spin_lock_init(&cdns_ctrl
->irq_lock
);
2859 init_completion(&cdns_ctrl
->complete
);
2861 ret
= cadence_nand_hw_init(cdns_ctrl
);
2866 dma_cap_set(DMA_MEMCPY
, mask
);
2868 if (cdns_ctrl
->caps1
->has_dma
) {
2869 cdns_ctrl
->dmac
= dma_request_channel(mask
, NULL
, NULL
);
2870 if (!cdns_ctrl
->dmac
) {
2871 dev_err(cdns_ctrl
->dev
,
2872 "Unable to get a DMA channel\n");
2878 nand_controller_init(&cdns_ctrl
->controller
);
2879 INIT_LIST_HEAD(&cdns_ctrl
->chips
);
2881 cdns_ctrl
->controller
.ops
= &cadence_nand_controller_ops
;
2882 cdns_ctrl
->curr_corr_str_idx
= 0xFF;
2884 ret
= cadence_nand_chips_init(cdns_ctrl
);
2886 dev_err(cdns_ctrl
->dev
, "Failed to register MTD: %d\n",
2888 goto dma_release_chnl
;
2891 kfree(cdns_ctrl
->buf
);
2892 cdns_ctrl
->buf
= kzalloc(cdns_ctrl
->buf_size
, GFP_KERNEL
);
2893 if (!cdns_ctrl
->buf
) {
2895 goto dma_release_chnl
;
2901 if (cdns_ctrl
->dmac
)
2902 dma_release_channel(cdns_ctrl
->dmac
);
2905 cadence_nand_irq_cleanup(cdns_ctrl
->irq
, cdns_ctrl
);
2908 kfree(cdns_ctrl
->buf
);
2911 dma_free_coherent(cdns_ctrl
->dev
, sizeof(struct cadence_nand_cdma_desc
),
2912 cdns_ctrl
->cdma_desc
, cdns_ctrl
->dma_cdma_desc
);
2917 /* Driver exit point. */
2918 static void cadence_nand_remove(struct cdns_nand_ctrl
*cdns_ctrl
)
2920 cadence_nand_chips_cleanup(cdns_ctrl
);
2921 cadence_nand_irq_cleanup(cdns_ctrl
->irq
, cdns_ctrl
);
2922 kfree(cdns_ctrl
->buf
);
2923 dma_free_coherent(cdns_ctrl
->dev
, sizeof(struct cadence_nand_cdma_desc
),
2924 cdns_ctrl
->cdma_desc
, cdns_ctrl
->dma_cdma_desc
);
2926 if (cdns_ctrl
->dmac
)
2927 dma_release_channel(cdns_ctrl
->dmac
);
2930 struct cadence_nand_dt
{
2931 struct cdns_nand_ctrl cdns_ctrl
;
2935 static const struct cadence_nand_dt_devdata cadence_nand_default
= {
2940 static const struct of_device_id cadence_nand_dt_ids
[] = {
2942 .compatible
= "cdns,hp-nfc",
2943 .data
= &cadence_nand_default
2947 MODULE_DEVICE_TABLE(of
, cadence_nand_dt_ids
);
2949 static int cadence_nand_dt_probe(struct platform_device
*ofdev
)
2951 struct resource
*res
;
2952 struct cadence_nand_dt
*dt
;
2953 struct cdns_nand_ctrl
*cdns_ctrl
;
2955 const struct of_device_id
*of_id
;
2956 const struct cadence_nand_dt_devdata
*devdata
;
2959 of_id
= of_match_device(cadence_nand_dt_ids
, &ofdev
->dev
);
2961 ofdev
->id_entry
= of_id
->data
;
2962 devdata
= of_id
->data
;
2964 pr_err("Failed to find the right device id.\n");
2968 dt
= devm_kzalloc(&ofdev
->dev
, sizeof(*dt
), GFP_KERNEL
);
2972 cdns_ctrl
= &dt
->cdns_ctrl
;
2973 cdns_ctrl
->caps1
= devdata
;
2975 cdns_ctrl
->dev
= &ofdev
->dev
;
2976 cdns_ctrl
->irq
= platform_get_irq(ofdev
, 0);
2977 if (cdns_ctrl
->irq
< 0)
2978 return cdns_ctrl
->irq
;
2980 dev_info(cdns_ctrl
->dev
, "IRQ: nr %d\n", cdns_ctrl
->irq
);
2982 cdns_ctrl
->reg
= devm_platform_ioremap_resource(ofdev
, 0);
2983 if (IS_ERR(cdns_ctrl
->reg
))
2984 return PTR_ERR(cdns_ctrl
->reg
);
2986 res
= platform_get_resource(ofdev
, IORESOURCE_MEM
, 1);
2987 cdns_ctrl
->io
.dma
= res
->start
;
2988 cdns_ctrl
->io
.virt
= devm_ioremap_resource(&ofdev
->dev
, res
);
2989 if (IS_ERR(cdns_ctrl
->io
.virt
))
2990 return PTR_ERR(cdns_ctrl
->io
.virt
);
2992 dt
->clk
= devm_clk_get(cdns_ctrl
->dev
, "nf_clk");
2993 if (IS_ERR(dt
->clk
))
2994 return PTR_ERR(dt
->clk
);
2996 cdns_ctrl
->nf_clk_rate
= clk_get_rate(dt
->clk
);
2998 ret
= of_property_read_u32(ofdev
->dev
.of_node
,
2999 "cdns,board-delay-ps", &val
);
3002 dev_info(cdns_ctrl
->dev
,
3003 "missing cdns,board-delay-ps property, %d was set\n",
3006 cdns_ctrl
->board_delay
= val
;
3008 ret
= cadence_nand_init(cdns_ctrl
);
3012 platform_set_drvdata(ofdev
, dt
);
3016 static int cadence_nand_dt_remove(struct platform_device
*ofdev
)
3018 struct cadence_nand_dt
*dt
= platform_get_drvdata(ofdev
);
3020 cadence_nand_remove(&dt
->cdns_ctrl
);
3025 static struct platform_driver cadence_nand_dt_driver
= {
3026 .probe
= cadence_nand_dt_probe
,
3027 .remove
= cadence_nand_dt_remove
,
3029 .name
= "cadence-nand-controller",
3030 .of_match_table
= cadence_nand_dt_ids
,
3034 module_platform_driver(cadence_nand_dt_driver
);
3036 MODULE_AUTHOR("Piotr Sroka <piotrs@cadence.com>");
3037 MODULE_LICENSE("GPL v2");
3038 MODULE_DESCRIPTION("Driver for Cadence NAND flash controller");