1 // SPDX-License-Identifier: GPL-2.0+
3 * Cadence NAND flash controller driver
5 * Copyright (C) 2019 Cadence
7 * Author: Piotr Sroka <piotrs@cadence.com>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/rawnand.h>
18 #include <linux/of_device.h>
19 #include <linux/iopoll.h>
22 * HPNFC can work in 3 modes:
23 * - PIO - can work in master or slave DMA
24 * - CDMA - needs Master DMA for accessing command descriptors.
25 * - Generic mode - can use only slave DMA.
26 * CDMA and PIO modes can be used to execute only base commands.
27 * Generic mode can be used to execute any command
28 * on NAND flash memory. Driver uses CDMA mode for
29 * block erasing, page reading, page programing.
30 * Generic mode is used for executing rest of commands.
33 #define MAX_OOB_SIZE_PER_SECTOR 32
34 #define MAX_ADDRESS_CYC 6
35 #define MAX_ERASE_ADDRESS_CYC 3
36 #define MAX_DATA_SIZE 0xFFFC
37 #define DMA_DATA_SIZE_ALIGN 8
39 /* Register definition. */
42 * Writing data to this register will initiate a new transaction
43 * of the NF controller.
45 #define CMD_REG0 0x0000
46 /* Command type field mask. */
47 #define CMD_REG0_CT GENMASK(31, 30)
48 /* Command type CDMA. */
49 #define CMD_REG0_CT_CDMA 0uL
50 /* Command type generic. */
51 #define CMD_REG0_CT_GEN 3uL
52 /* Command thread number field mask. */
53 #define CMD_REG0_TN GENMASK(27, 24)
55 /* Command register 2. */
56 #define CMD_REG2 0x0008
57 /* Command register 3. */
58 #define CMD_REG3 0x000C
59 /* Pointer register to select which thread status will be selected. */
60 #define CMD_STATUS_PTR 0x0010
61 /* Command status register for selected thread. */
62 #define CMD_STATUS 0x0014
64 /* Interrupt status register. */
65 #define INTR_STATUS 0x0110
66 #define INTR_STATUS_SDMA_ERR BIT(22)
67 #define INTR_STATUS_SDMA_TRIGG BIT(21)
68 #define INTR_STATUS_UNSUPP_CMD BIT(19)
69 #define INTR_STATUS_DDMA_TERR BIT(18)
70 #define INTR_STATUS_CDMA_TERR BIT(17)
71 #define INTR_STATUS_CDMA_IDL BIT(16)
73 /* Interrupt enable register. */
74 #define INTR_ENABLE 0x0114
75 #define INTR_ENABLE_INTR_EN BIT(31)
76 #define INTR_ENABLE_SDMA_ERR_EN BIT(22)
77 #define INTR_ENABLE_SDMA_TRIGG_EN BIT(21)
78 #define INTR_ENABLE_UNSUPP_CMD_EN BIT(19)
79 #define INTR_ENABLE_DDMA_TERR_EN BIT(18)
80 #define INTR_ENABLE_CDMA_TERR_EN BIT(17)
81 #define INTR_ENABLE_CDMA_IDLE_EN BIT(16)
83 /* Controller internal state. */
84 #define CTRL_STATUS 0x0118
85 #define CTRL_STATUS_INIT_COMP BIT(9)
86 #define CTRL_STATUS_CTRL_BUSY BIT(8)
88 /* Command Engine threads state. */
89 #define TRD_STATUS 0x0120
91 /* Command Engine interrupt thread error status. */
92 #define TRD_ERR_INT_STATUS 0x0128
93 /* Command Engine interrupt thread error enable. */
94 #define TRD_ERR_INT_STATUS_EN 0x0130
95 /* Command Engine interrupt thread complete status. */
96 #define TRD_COMP_INT_STATUS 0x0138
99 * Transfer config 0 register.
100 * Configures data transfer parameters.
102 #define TRAN_CFG_0 0x0400
103 /* Offset value from the beginning of the page. */
104 #define TRAN_CFG_0_OFFSET GENMASK(31, 16)
105 /* Numbers of sectors to transfer within singlNF device's page. */
106 #define TRAN_CFG_0_SEC_CNT GENMASK(7, 0)
109 * Transfer config 1 register.
110 * Configures data transfer parameters.
112 #define TRAN_CFG_1 0x0404
113 /* Size of last data sector. */
114 #define TRAN_CFG_1_LAST_SEC_SIZE GENMASK(31, 16)
115 /* Size of not-last data sector. */
116 #define TRAN_CFG_1_SECTOR_SIZE GENMASK(15, 0)
118 /* ECC engine configuration register 0. */
119 #define ECC_CONFIG_0 0x0428
120 /* Correction strength. */
121 #define ECC_CONFIG_0_CORR_STR GENMASK(10, 8)
122 /* Enable erased pages detection mechanism. */
123 #define ECC_CONFIG_0_ERASE_DET_EN BIT(1)
124 /* Enable controller ECC check bits generation and correction. */
125 #define ECC_CONFIG_0_ECC_EN BIT(0)
127 /* ECC engine configuration register 1. */
128 #define ECC_CONFIG_1 0x042C
130 /* Multiplane settings register. */
131 #define MULTIPLANE_CFG 0x0434
132 /* Cache operation settings. */
133 #define CACHE_CFG 0x0438
135 /* DMA settings register. */
136 #define DMA_SETINGS 0x043C
137 /* Enable SDMA error report on access unprepared slave DMA interface. */
138 #define DMA_SETINGS_SDMA_ERR_RSP BIT(17)
140 /* Transferred data block size for the slave DMA module. */
141 #define SDMA_SIZE 0x0440
143 /* Thread number associated with transferred data block
144 * for the slave DMA module.
146 #define SDMA_TRD_NUM 0x0444
147 /* Thread number mask. */
148 #define SDMA_TRD_NUM_SDMA_TRD GENMASK(2, 0)
150 #define CONTROL_DATA_CTRL 0x0494
151 /* Thread number mask. */
152 #define CONTROL_DATA_CTRL_SIZE GENMASK(15, 0)
154 #define CTRL_VERSION 0x800
155 #define CTRL_VERSION_REV GENMASK(7, 0)
157 /* Available hardware features of the controller. */
158 #define CTRL_FEATURES 0x804
159 /* Support for NV-DDR2/3 work mode. */
160 #define CTRL_FEATURES_NVDDR_2_3 BIT(28)
161 /* Support for NV-DDR work mode. */
162 #define CTRL_FEATURES_NVDDR BIT(27)
163 /* Support for asynchronous work mode. */
164 #define CTRL_FEATURES_ASYNC BIT(26)
165 /* Support for asynchronous work mode. */
166 #define CTRL_FEATURES_N_BANKS GENMASK(25, 24)
167 /* Slave and Master DMA data width. */
168 #define CTRL_FEATURES_DMA_DWITH64 BIT(21)
169 /* Availability of Control Data feature.*/
170 #define CTRL_FEATURES_CONTROL_DATA BIT(10)
172 /* BCH Engine identification register 0 - correction strengths. */
173 #define BCH_CFG_0 0x838
174 #define BCH_CFG_0_CORR_CAP_0 GENMASK(7, 0)
175 #define BCH_CFG_0_CORR_CAP_1 GENMASK(15, 8)
176 #define BCH_CFG_0_CORR_CAP_2 GENMASK(23, 16)
177 #define BCH_CFG_0_CORR_CAP_3 GENMASK(31, 24)
179 /* BCH Engine identification register 1 - correction strengths. */
180 #define BCH_CFG_1 0x83C
181 #define BCH_CFG_1_CORR_CAP_4 GENMASK(7, 0)
182 #define BCH_CFG_1_CORR_CAP_5 GENMASK(15, 8)
183 #define BCH_CFG_1_CORR_CAP_6 GENMASK(23, 16)
184 #define BCH_CFG_1_CORR_CAP_7 GENMASK(31, 24)
186 /* BCH Engine identification register 2 - sector sizes. */
187 #define BCH_CFG_2 0x840
188 #define BCH_CFG_2_SECT_0 GENMASK(15, 0)
189 #define BCH_CFG_2_SECT_1 GENMASK(31, 16)
191 /* BCH Engine identification register 3. */
192 #define BCH_CFG_3 0x844
194 /* Ready/Busy# line status. */
195 #define RBN_SETINGS 0x1004
197 /* Common settings. */
198 #define COMMON_SET 0x1008
199 /* 16 bit device connected to the NAND Flash interface. */
200 #define COMMON_SET_DEVICE_16BIT BIT(8)
202 /* Skip_bytes registers. */
203 #define SKIP_BYTES_CONF 0x100C
204 #define SKIP_BYTES_MARKER_VALUE GENMASK(31, 16)
205 #define SKIP_BYTES_NUM_OF_BYTES GENMASK(7, 0)
207 #define SKIP_BYTES_OFFSET 0x1010
208 #define SKIP_BYTES_OFFSET_VALUE GENMASK(23, 0)
210 /* Timings configuration. */
211 #define ASYNC_TOGGLE_TIMINGS 0x101c
212 #define ASYNC_TOGGLE_TIMINGS_TRH GENMASK(28, 24)
213 #define ASYNC_TOGGLE_TIMINGS_TRP GENMASK(20, 16)
214 #define ASYNC_TOGGLE_TIMINGS_TWH GENMASK(12, 8)
215 #define ASYNC_TOGGLE_TIMINGS_TWP GENMASK(4, 0)
217 #define TIMINGS0 0x1024
218 #define TIMINGS0_TADL GENMASK(31, 24)
219 #define TIMINGS0_TCCS GENMASK(23, 16)
220 #define TIMINGS0_TWHR GENMASK(15, 8)
221 #define TIMINGS0_TRHW GENMASK(7, 0)
223 #define TIMINGS1 0x1028
224 #define TIMINGS1_TRHZ GENMASK(31, 24)
225 #define TIMINGS1_TWB GENMASK(23, 16)
226 #define TIMINGS1_TVDLY GENMASK(7, 0)
228 #define TIMINGS2 0x102c
229 #define TIMINGS2_TFEAT GENMASK(25, 16)
230 #define TIMINGS2_CS_HOLD_TIME GENMASK(13, 8)
231 #define TIMINGS2_CS_SETUP_TIME GENMASK(5, 0)
233 /* Configuration of the resynchronization of slave DLL of PHY. */
234 #define DLL_PHY_CTRL 0x1034
235 #define DLL_PHY_CTRL_DLL_RST_N BIT(24)
236 #define DLL_PHY_CTRL_EXTENDED_WR_MODE BIT(17)
237 #define DLL_PHY_CTRL_EXTENDED_RD_MODE BIT(16)
238 #define DLL_PHY_CTRL_RS_HIGH_WAIT_CNT GENMASK(11, 8)
239 #define DLL_PHY_CTRL_RS_IDLE_CNT GENMASK(7, 0)
241 /* Register controlling DQ related timing. */
242 #define PHY_DQ_TIMING 0x2000
243 /* Register controlling DSQ related timing. */
244 #define PHY_DQS_TIMING 0x2004
245 #define PHY_DQS_TIMING_DQS_SEL_OE_END GENMASK(3, 0)
246 #define PHY_DQS_TIMING_PHONY_DQS_SEL BIT(16)
247 #define PHY_DQS_TIMING_USE_PHONY_DQS BIT(20)
249 /* Register controlling the gate and loopback control related timing. */
250 #define PHY_GATE_LPBK_CTRL 0x2008
251 #define PHY_GATE_LPBK_CTRL_RDS GENMASK(24, 19)
253 /* Register holds the control for the master DLL logic. */
254 #define PHY_DLL_MASTER_CTRL 0x200C
255 #define PHY_DLL_MASTER_CTRL_BYPASS_MODE BIT(23)
257 /* Register holds the control for the slave DLL logic. */
258 #define PHY_DLL_SLAVE_CTRL 0x2010
260 /* This register handles the global control settings for the PHY. */
261 #define PHY_CTRL 0x2080
262 #define PHY_CTRL_SDR_DQS BIT(14)
263 #define PHY_CTRL_PHONY_DQS GENMASK(9, 4)
266 * This register handles the global control settings
267 * for the termination selects for reads.
269 #define PHY_TSEL 0x2084
271 /* Generic command layout. */
272 #define GCMD_LAY_CS GENMASK_ULL(11, 8)
274 * This bit informs the minicotroller if it has to wait for tWB
275 * after sending the last CMD/ADDR/DATA in the sequence.
277 #define GCMD_LAY_TWB BIT_ULL(6)
278 /* Type of generic instruction. */
279 #define GCMD_LAY_INSTR GENMASK_ULL(5, 0)
281 /* Generic CMD sequence type. */
282 #define GCMD_LAY_INSTR_CMD 0
283 /* Generic ADDR sequence type. */
284 #define GCMD_LAY_INSTR_ADDR 1
285 /* Generic data transfer sequence type. */
286 #define GCMD_LAY_INSTR_DATA 2
288 /* Input part of generic command type of input is command. */
289 #define GCMD_LAY_INPUT_CMD GENMASK_ULL(23, 16)
291 /* Generic command address sequence - address fields. */
292 #define GCMD_LAY_INPUT_ADDR GENMASK_ULL(63, 16)
293 /* Generic command address sequence - address size. */
294 #define GCMD_LAY_INPUT_ADDR_SIZE GENMASK_ULL(13, 11)
296 /* Transfer direction field of generic command data sequence. */
297 #define GCMD_DIR BIT_ULL(11)
298 /* Read transfer direction of generic command data sequence. */
299 #define GCMD_DIR_READ 0
300 /* Write transfer direction of generic command data sequence. */
301 #define GCMD_DIR_WRITE 1
303 /* ECC enabled flag of generic command data sequence - ECC enabled. */
304 #define GCMD_ECC_EN BIT_ULL(12)
305 /* Generic command data sequence - sector size. */
306 #define GCMD_SECT_SIZE GENMASK_ULL(31, 16)
307 /* Generic command data sequence - sector count. */
308 #define GCMD_SECT_CNT GENMASK_ULL(39, 32)
309 /* Generic command data sequence - last sector size. */
310 #define GCMD_LAST_SIZE GENMASK_ULL(55, 40)
312 /* CDMA descriptor fields. */
313 /* Erase command type of CDMA descriptor. */
314 #define CDMA_CT_ERASE 0x1000
315 /* Program page command type of CDMA descriptor. */
316 #define CDMA_CT_WR 0x2100
317 /* Read page command type of CDMA descriptor. */
318 #define CDMA_CT_RD 0x2200
320 /* Flash pointer memory shift. */
321 #define CDMA_CFPTR_MEM_SHIFT 24
322 /* Flash pointer memory mask. */
323 #define CDMA_CFPTR_MEM GENMASK(26, 24)
326 * Command DMA descriptor flags. If set causes issue interrupt after
327 * the completion of descriptor processing.
329 #define CDMA_CF_INT BIT(8)
331 * Command DMA descriptor flags - the next descriptor
332 * address field is valid and descriptor processing should continue.
334 #define CDMA_CF_CONT BIT(9)
335 /* DMA master flag of command DMA descriptor. */
336 #define CDMA_CF_DMA_MASTER BIT(10)
338 /* Operation complete status of command descriptor. */
339 #define CDMA_CS_COMP BIT(15)
340 /* Operation complete status of command descriptor. */
341 /* Command descriptor status - operation fail. */
342 #define CDMA_CS_FAIL BIT(14)
343 /* Command descriptor status - page erased. */
344 #define CDMA_CS_ERP BIT(11)
345 /* Command descriptor status - timeout occurred. */
346 #define CDMA_CS_TOUT BIT(10)
348 * Maximum amount of correction applied to one ECC sector.
349 * It is part of command descriptor status.
351 #define CDMA_CS_MAXERR GENMASK(9, 2)
352 /* Command descriptor status - uncorrectable ECC error. */
353 #define CDMA_CS_UNCE BIT(1)
354 /* Command descriptor status - descriptor error. */
355 #define CDMA_CS_ERR BIT(0)
357 /* Status of operation - OK. */
359 /* Status of operation - FAIL. */
361 /* Status of operation - uncorrectable ECC error. */
362 #define STAT_ECC_UNCORR 3
363 /* Status of operation - page erased. */
364 #define STAT_ERASED 5
365 /* Status of operation - correctable ECC error. */
366 #define STAT_ECC_CORR 6
367 /* Status of operation - unsuspected state. */
368 #define STAT_UNKNOWN 7
369 /* Status of operation - operation is not completed yet. */
370 #define STAT_BUSY 0xFF
372 #define BCH_MAX_NUM_CORR_CAPS 8
373 #define BCH_MAX_NUM_SECTOR_SIZES 2
375 struct cadence_nand_timings
{
376 u32 async_toggle_timings
;
383 u32 phy_gate_lpbk_ctrl
;
386 /* Command DMA descriptor. */
387 struct cadence_nand_cdma_desc
{
388 /* Next descriptor address. */
391 /* Flash address is a 32-bit address comprising of BANK and ROW ADDR. */
393 /*field appears in HPNFC version 13*/
397 /* Operation the controller needs to perform. */
400 /* Flags for operation of this command. */
404 /* System/host memory address required for data DMA commands. */
407 /* Status of operation. */
411 /* Address pointer to sync buffer location. */
412 u64 sync_flag_pointer
;
414 /* Controls the buffer sync mechanism. */
418 /* Control data pointer. */
422 /* Interrupt status. */
423 struct cadence_nand_irq_status
{
424 /* Thread operation complete status. */
426 /* Thread operation error. */
428 /* Controller status. */
432 /* Cadence NAND flash controller capabilities get from driver data. */
433 struct cadence_nand_dt_devdata
{
434 /* Skew value of the output signals of the NAND Flash interface. */
436 /* It informs if slave DMA interface is connected to DMA engine. */
437 unsigned int has_dma
:1;
440 /* Cadence NAND flash controller capabilities read from registers. */
441 struct cdns_nand_caps
{
442 /* Maximum number of banks supported by hardware. */
444 /* Slave and Master DMA data width in bytes (4 or 8). */
446 /* Control Data feature supported. */
447 bool data_control_supp
;
448 /* Is PHY type DLL. */
449 bool is_phy_type_dll
;
452 struct cdns_nand_ctrl
{
454 struct nand_controller controller
;
455 struct cadence_nand_cdma_desc
*cdma_desc
;
457 const struct cadence_nand_dt_devdata
*caps1
;
458 struct cdns_nand_caps caps2
;
460 dma_addr_t dma_cdma_desc
;
463 u8 curr_corr_str_idx
;
465 /* Register interface. */
474 /* Interrupts that have happened. */
475 struct cadence_nand_irq_status irq_status
;
476 /* Interrupts we are waiting for. */
477 struct cadence_nand_irq_status irq_mask
;
478 struct completion complete
;
479 /* Protect irq_mask and irq_status. */
482 int ecc_strengths
[BCH_MAX_NUM_CORR_CAPS
];
483 struct nand_ecc_step_info ecc_stepinfos
[BCH_MAX_NUM_SECTOR_SIZES
];
484 struct nand_ecc_caps ecc_caps
;
488 struct dma_chan
*dmac
;
492 * Estimated Board delay. The value includes the total
493 * round trip delay for the signals and is used for deciding on values
494 * associated with data read capture.
498 struct nand_chip
*selected_chip
;
500 unsigned long assigned_cs
;
501 struct list_head chips
;
504 struct cdns_nand_chip
{
505 struct cadence_nand_timings timings
;
506 struct nand_chip chip
;
508 struct list_head node
;
511 * part of oob area of NAND flash memory page.
512 * This part is available for user to read or write.
516 /* Sector size. There are few sectors per mtd->writesize */
522 /* Number of bytes reserved for BBM. */
524 /* ECC strength index. */
531 int (*calc_ecc_bytes
)(int step_size
, int strength
);
536 cdns_nand_chip
*to_cdns_nand_chip(struct nand_chip
*chip
)
538 return container_of(chip
, struct cdns_nand_chip
, chip
);
542 cdns_nand_ctrl
*to_cdns_nand_ctrl(struct nand_controller
*controller
)
544 return container_of(controller
, struct cdns_nand_ctrl
, controller
);
548 cadence_nand_dma_buf_ok(struct cdns_nand_ctrl
*cdns_ctrl
, const void *buf
,
551 u8 data_dma_width
= cdns_ctrl
->caps2
.data_dma_width
;
553 return buf
&& virt_addr_valid(buf
) &&
554 likely(IS_ALIGNED((uintptr_t)buf
, data_dma_width
)) &&
555 likely(IS_ALIGNED(buf_len
, DMA_DATA_SIZE_ALIGN
));
558 static int cadence_nand_wait_for_value(struct cdns_nand_ctrl
*cdns_ctrl
,
559 u32 reg_offset
, u32 timeout_us
,
560 u32 mask
, bool is_clear
)
565 ret
= readl_relaxed_poll_timeout(cdns_ctrl
->reg
+ reg_offset
,
566 val
, !(val
& mask
) == is_clear
,
570 dev_err(cdns_ctrl
->dev
,
571 "Timeout while waiting for reg %x with mask %x is clear %d\n",
572 reg_offset
, mask
, is_clear
);
578 static int cadence_nand_set_ecc_enable(struct cdns_nand_ctrl
*cdns_ctrl
,
583 if (cadence_nand_wait_for_value(cdns_ctrl
, CTRL_STATUS
,
585 CTRL_STATUS_CTRL_BUSY
, true))
588 reg
= readl_relaxed(cdns_ctrl
->reg
+ ECC_CONFIG_0
);
591 reg
|= ECC_CONFIG_0_ECC_EN
;
593 reg
&= ~ECC_CONFIG_0_ECC_EN
;
595 writel_relaxed(reg
, cdns_ctrl
->reg
+ ECC_CONFIG_0
);
600 static void cadence_nand_set_ecc_strength(struct cdns_nand_ctrl
*cdns_ctrl
,
605 if (cdns_ctrl
->curr_corr_str_idx
== corr_str_idx
)
608 reg
= readl_relaxed(cdns_ctrl
->reg
+ ECC_CONFIG_0
);
609 reg
&= ~ECC_CONFIG_0_CORR_STR
;
610 reg
|= FIELD_PREP(ECC_CONFIG_0_CORR_STR
, corr_str_idx
);
611 writel_relaxed(reg
, cdns_ctrl
->reg
+ ECC_CONFIG_0
);
613 cdns_ctrl
->curr_corr_str_idx
= corr_str_idx
;
616 static int cadence_nand_get_ecc_strength_idx(struct cdns_nand_ctrl
*cdns_ctrl
,
619 int i
, corr_str_idx
= -1;
621 for (i
= 0; i
< BCH_MAX_NUM_CORR_CAPS
; i
++) {
622 if (cdns_ctrl
->ecc_strengths
[i
] == strength
) {
631 static int cadence_nand_set_skip_marker_val(struct cdns_nand_ctrl
*cdns_ctrl
,
636 if (cadence_nand_wait_for_value(cdns_ctrl
, CTRL_STATUS
,
638 CTRL_STATUS_CTRL_BUSY
, true))
641 reg
= readl_relaxed(cdns_ctrl
->reg
+ SKIP_BYTES_CONF
);
642 reg
&= ~SKIP_BYTES_MARKER_VALUE
;
643 reg
|= FIELD_PREP(SKIP_BYTES_MARKER_VALUE
,
646 writel_relaxed(reg
, cdns_ctrl
->reg
+ SKIP_BYTES_CONF
);
651 static int cadence_nand_set_skip_bytes_conf(struct cdns_nand_ctrl
*cdns_ctrl
,
656 u32 reg
, skip_bytes_offset
;
658 if (cadence_nand_wait_for_value(cdns_ctrl
, CTRL_STATUS
,
660 CTRL_STATUS_CTRL_BUSY
, true))
668 reg
= readl_relaxed(cdns_ctrl
->reg
+ SKIP_BYTES_CONF
);
669 reg
&= ~SKIP_BYTES_NUM_OF_BYTES
;
670 reg
|= FIELD_PREP(SKIP_BYTES_NUM_OF_BYTES
,
672 skip_bytes_offset
= FIELD_PREP(SKIP_BYTES_OFFSET_VALUE
,
675 writel_relaxed(reg
, cdns_ctrl
->reg
+ SKIP_BYTES_CONF
);
676 writel_relaxed(skip_bytes_offset
, cdns_ctrl
->reg
+ SKIP_BYTES_OFFSET
);
681 /* Functions enables/disables hardware detection of erased data */
682 static void cadence_nand_set_erase_detection(struct cdns_nand_ctrl
*cdns_ctrl
,
684 u8 bitflips_threshold
)
688 reg
= readl_relaxed(cdns_ctrl
->reg
+ ECC_CONFIG_0
);
691 reg
|= ECC_CONFIG_0_ERASE_DET_EN
;
693 reg
&= ~ECC_CONFIG_0_ERASE_DET_EN
;
695 writel_relaxed(reg
, cdns_ctrl
->reg
+ ECC_CONFIG_0
);
696 writel_relaxed(bitflips_threshold
, cdns_ctrl
->reg
+ ECC_CONFIG_1
);
699 static int cadence_nand_set_access_width16(struct cdns_nand_ctrl
*cdns_ctrl
,
704 if (cadence_nand_wait_for_value(cdns_ctrl
, CTRL_STATUS
,
706 CTRL_STATUS_CTRL_BUSY
, true))
709 reg
= readl_relaxed(cdns_ctrl
->reg
+ COMMON_SET
);
712 reg
&= ~COMMON_SET_DEVICE_16BIT
;
714 reg
|= COMMON_SET_DEVICE_16BIT
;
715 writel_relaxed(reg
, cdns_ctrl
->reg
+ COMMON_SET
);
721 cadence_nand_clear_interrupt(struct cdns_nand_ctrl
*cdns_ctrl
,
722 struct cadence_nand_irq_status
*irq_status
)
724 writel_relaxed(irq_status
->status
, cdns_ctrl
->reg
+ INTR_STATUS
);
725 writel_relaxed(irq_status
->trd_status
,
726 cdns_ctrl
->reg
+ TRD_COMP_INT_STATUS
);
727 writel_relaxed(irq_status
->trd_error
,
728 cdns_ctrl
->reg
+ TRD_ERR_INT_STATUS
);
732 cadence_nand_read_int_status(struct cdns_nand_ctrl
*cdns_ctrl
,
733 struct cadence_nand_irq_status
*irq_status
)
735 irq_status
->status
= readl_relaxed(cdns_ctrl
->reg
+ INTR_STATUS
);
736 irq_status
->trd_status
= readl_relaxed(cdns_ctrl
->reg
737 + TRD_COMP_INT_STATUS
);
738 irq_status
->trd_error
= readl_relaxed(cdns_ctrl
->reg
739 + TRD_ERR_INT_STATUS
);
742 static u32
irq_detected(struct cdns_nand_ctrl
*cdns_ctrl
,
743 struct cadence_nand_irq_status
*irq_status
)
745 cadence_nand_read_int_status(cdns_ctrl
, irq_status
);
747 return irq_status
->status
|| irq_status
->trd_status
||
748 irq_status
->trd_error
;
751 static void cadence_nand_reset_irq(struct cdns_nand_ctrl
*cdns_ctrl
)
755 spin_lock_irqsave(&cdns_ctrl
->irq_lock
, flags
);
756 memset(&cdns_ctrl
->irq_status
, 0, sizeof(cdns_ctrl
->irq_status
));
757 memset(&cdns_ctrl
->irq_mask
, 0, sizeof(cdns_ctrl
->irq_mask
));
758 spin_unlock_irqrestore(&cdns_ctrl
->irq_lock
, flags
);
762 * This is the interrupt service routine. It handles all interrupts
763 * sent to this device.
765 static irqreturn_t
cadence_nand_isr(int irq
, void *dev_id
)
767 struct cdns_nand_ctrl
*cdns_ctrl
= dev_id
;
768 struct cadence_nand_irq_status irq_status
;
769 irqreturn_t result
= IRQ_NONE
;
771 spin_lock(&cdns_ctrl
->irq_lock
);
773 if (irq_detected(cdns_ctrl
, &irq_status
)) {
774 /* Handle interrupt. */
775 /* First acknowledge it. */
776 cadence_nand_clear_interrupt(cdns_ctrl
, &irq_status
);
777 /* Status in the device context for someone to read. */
778 cdns_ctrl
->irq_status
.status
|= irq_status
.status
;
779 cdns_ctrl
->irq_status
.trd_status
|= irq_status
.trd_status
;
780 cdns_ctrl
->irq_status
.trd_error
|= irq_status
.trd_error
;
781 /* Notify anyone who cares that it happened. */
782 complete(&cdns_ctrl
->complete
);
783 /* Tell the OS that we've handled this. */
784 result
= IRQ_HANDLED
;
786 spin_unlock(&cdns_ctrl
->irq_lock
);
791 static void cadence_nand_set_irq_mask(struct cdns_nand_ctrl
*cdns_ctrl
,
792 struct cadence_nand_irq_status
*irq_mask
)
794 writel_relaxed(INTR_ENABLE_INTR_EN
| irq_mask
->status
,
795 cdns_ctrl
->reg
+ INTR_ENABLE
);
797 writel_relaxed(irq_mask
->trd_error
,
798 cdns_ctrl
->reg
+ TRD_ERR_INT_STATUS_EN
);
802 cadence_nand_wait_for_irq(struct cdns_nand_ctrl
*cdns_ctrl
,
803 struct cadence_nand_irq_status
*irq_mask
,
804 struct cadence_nand_irq_status
*irq_status
)
806 unsigned long timeout
= msecs_to_jiffies(10000);
807 unsigned long time_left
;
809 time_left
= wait_for_completion_timeout(&cdns_ctrl
->complete
,
812 *irq_status
= cdns_ctrl
->irq_status
;
813 if (time_left
== 0) {
815 dev_err(cdns_ctrl
->dev
, "timeout occurred:\n");
816 dev_err(cdns_ctrl
->dev
, "\tstatus = 0x%x, mask = 0x%x\n",
817 irq_status
->status
, irq_mask
->status
);
818 dev_err(cdns_ctrl
->dev
,
819 "\ttrd_status = 0x%x, trd_status mask = 0x%x\n",
820 irq_status
->trd_status
, irq_mask
->trd_status
);
821 dev_err(cdns_ctrl
->dev
,
822 "\t trd_error = 0x%x, trd_error mask = 0x%x\n",
823 irq_status
->trd_error
, irq_mask
->trd_error
);
827 /* Execute generic command on NAND controller. */
828 static int cadence_nand_generic_cmd_send(struct cdns_nand_ctrl
*cdns_ctrl
,
832 u32 mini_ctrl_cmd_l
, mini_ctrl_cmd_h
, reg
;
834 mini_ctrl_cmd
|= FIELD_PREP(GCMD_LAY_CS
, chip_nr
);
835 mini_ctrl_cmd_l
= mini_ctrl_cmd
& 0xFFFFFFFF;
836 mini_ctrl_cmd_h
= mini_ctrl_cmd
>> 32;
838 if (cadence_nand_wait_for_value(cdns_ctrl
, CTRL_STATUS
,
840 CTRL_STATUS_CTRL_BUSY
, true))
843 cadence_nand_reset_irq(cdns_ctrl
);
845 writel_relaxed(mini_ctrl_cmd_l
, cdns_ctrl
->reg
+ CMD_REG2
);
846 writel_relaxed(mini_ctrl_cmd_h
, cdns_ctrl
->reg
+ CMD_REG3
);
848 /* Select generic command. */
849 reg
= FIELD_PREP(CMD_REG0_CT
, CMD_REG0_CT_GEN
);
851 reg
|= FIELD_PREP(CMD_REG0_TN
, 0);
854 writel_relaxed(reg
, cdns_ctrl
->reg
+ CMD_REG0
);
859 /* Wait for data on slave DMA interface. */
860 static int cadence_nand_wait_on_sdma(struct cdns_nand_ctrl
*cdns_ctrl
,
864 struct cadence_nand_irq_status irq_mask
, irq_status
;
866 irq_mask
.trd_status
= 0;
867 irq_mask
.trd_error
= 0;
868 irq_mask
.status
= INTR_STATUS_SDMA_TRIGG
869 | INTR_STATUS_SDMA_ERR
870 | INTR_STATUS_UNSUPP_CMD
;
872 cadence_nand_set_irq_mask(cdns_ctrl
, &irq_mask
);
873 cadence_nand_wait_for_irq(cdns_ctrl
, &irq_mask
, &irq_status
);
874 if (irq_status
.status
== 0) {
875 dev_err(cdns_ctrl
->dev
, "Timeout while waiting for SDMA\n");
879 if (irq_status
.status
& INTR_STATUS_SDMA_TRIGG
) {
880 *out_sdma_size
= readl_relaxed(cdns_ctrl
->reg
+ SDMA_SIZE
);
881 *out_sdma_trd
= readl_relaxed(cdns_ctrl
->reg
+ SDMA_TRD_NUM
);
883 FIELD_GET(SDMA_TRD_NUM_SDMA_TRD
, *out_sdma_trd
);
885 dev_err(cdns_ctrl
->dev
, "SDMA error - irq_status %x\n",
893 static void cadence_nand_get_caps(struct cdns_nand_ctrl
*cdns_ctrl
)
897 reg
= readl_relaxed(cdns_ctrl
->reg
+ CTRL_FEATURES
);
899 cdns_ctrl
->caps2
.max_banks
= 1 << FIELD_GET(CTRL_FEATURES_N_BANKS
, reg
);
901 if (FIELD_GET(CTRL_FEATURES_DMA_DWITH64
, reg
))
902 cdns_ctrl
->caps2
.data_dma_width
= 8;
904 cdns_ctrl
->caps2
.data_dma_width
= 4;
906 if (reg
& CTRL_FEATURES_CONTROL_DATA
)
907 cdns_ctrl
->caps2
.data_control_supp
= true;
909 if (reg
& (CTRL_FEATURES_NVDDR_2_3
910 | CTRL_FEATURES_NVDDR
))
911 cdns_ctrl
->caps2
.is_phy_type_dll
= true;
914 /* Prepare CDMA descriptor. */
916 cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl
*cdns_ctrl
,
917 char nf_mem
, u32 flash_ptr
, dma_addr_t mem_ptr
,
918 dma_addr_t ctrl_data_ptr
, u16 ctype
)
920 struct cadence_nand_cdma_desc
*cdma_desc
= cdns_ctrl
->cdma_desc
;
922 memset(cdma_desc
, 0, sizeof(struct cadence_nand_cdma_desc
));
924 /* Set fields for one descriptor. */
925 cdma_desc
->flash_pointer
= flash_ptr
;
926 if (cdns_ctrl
->ctrl_rev
>= 13)
927 cdma_desc
->bank
= nf_mem
;
929 cdma_desc
->flash_pointer
|= (nf_mem
<< CDMA_CFPTR_MEM_SHIFT
);
931 cdma_desc
->command_flags
|= CDMA_CF_DMA_MASTER
;
932 cdma_desc
->command_flags
|= CDMA_CF_INT
;
934 cdma_desc
->memory_pointer
= mem_ptr
;
935 cdma_desc
->status
= 0;
936 cdma_desc
->sync_flag_pointer
= 0;
937 cdma_desc
->sync_arguments
= 0;
939 cdma_desc
->command_type
= ctype
;
940 cdma_desc
->ctrl_data_ptr
= ctrl_data_ptr
;
943 static u8
cadence_nand_check_desc_error(struct cdns_nand_ctrl
*cdns_ctrl
,
946 if (desc_status
& CDMA_CS_ERP
)
949 if (desc_status
& CDMA_CS_UNCE
)
950 return STAT_ECC_UNCORR
;
952 if (desc_status
& CDMA_CS_ERR
) {
953 dev_err(cdns_ctrl
->dev
, ":CDMA desc error flag detected.\n");
957 if (FIELD_GET(CDMA_CS_MAXERR
, desc_status
))
958 return STAT_ECC_CORR
;
963 static int cadence_nand_cdma_finish(struct cdns_nand_ctrl
*cdns_ctrl
)
965 struct cadence_nand_cdma_desc
*desc_ptr
= cdns_ctrl
->cdma_desc
;
966 u8 status
= STAT_BUSY
;
968 if (desc_ptr
->status
& CDMA_CS_FAIL
) {
969 status
= cadence_nand_check_desc_error(cdns_ctrl
,
971 dev_err(cdns_ctrl
->dev
, ":CDMA error %x\n", desc_ptr
->status
);
972 } else if (desc_ptr
->status
& CDMA_CS_COMP
) {
973 /* Descriptor finished with no errors. */
974 if (desc_ptr
->command_flags
& CDMA_CF_CONT
) {
975 dev_info(cdns_ctrl
->dev
, "DMA unsupported flag is set");
976 status
= STAT_UNKNOWN
;
978 /* Last descriptor. */
986 static int cadence_nand_cdma_send(struct cdns_nand_ctrl
*cdns_ctrl
,
992 /* Wait for thread ready. */
993 status
= cadence_nand_wait_for_value(cdns_ctrl
, TRD_STATUS
,
999 cadence_nand_reset_irq(cdns_ctrl
);
1001 writel_relaxed((u32
)cdns_ctrl
->dma_cdma_desc
,
1002 cdns_ctrl
->reg
+ CMD_REG2
);
1003 writel_relaxed(0, cdns_ctrl
->reg
+ CMD_REG3
);
1005 /* Select CDMA mode. */
1006 reg
= FIELD_PREP(CMD_REG0_CT
, CMD_REG0_CT_CDMA
);
1007 /* Thread number. */
1008 reg
|= FIELD_PREP(CMD_REG0_TN
, thread
);
1009 /* Issue command. */
1010 writel_relaxed(reg
, cdns_ctrl
->reg
+ CMD_REG0
);
1015 /* Send SDMA command and wait for finish. */
1017 cadence_nand_cdma_send_and_wait(struct cdns_nand_ctrl
*cdns_ctrl
,
1020 struct cadence_nand_irq_status irq_mask
, irq_status
= {0};
1023 irq_mask
.trd_status
= BIT(thread
);
1024 irq_mask
.trd_error
= BIT(thread
);
1025 irq_mask
.status
= INTR_STATUS_CDMA_TERR
;
1027 cadence_nand_set_irq_mask(cdns_ctrl
, &irq_mask
);
1029 status
= cadence_nand_cdma_send(cdns_ctrl
, thread
);
1033 cadence_nand_wait_for_irq(cdns_ctrl
, &irq_mask
, &irq_status
);
1035 if (irq_status
.status
== 0 && irq_status
.trd_status
== 0 &&
1036 irq_status
.trd_error
== 0) {
1037 dev_err(cdns_ctrl
->dev
, "CDMA command timeout\n");
1040 if (irq_status
.status
& irq_mask
.status
) {
1041 dev_err(cdns_ctrl
->dev
, "CDMA command failed\n");
1049 * ECC size depends on configured ECC strength and on maximum supported
1052 static int cadence_nand_calc_ecc_bytes(int max_step_size
, int strength
)
1054 int nbytes
= DIV_ROUND_UP(fls(8 * max_step_size
) * strength
, 8);
1056 return ALIGN(nbytes
, 2);
1059 #define CADENCE_NAND_CALC_ECC_BYTES(max_step_size) \
1061 cadence_nand_calc_ecc_bytes_##max_step_size(int step_size, \
1064 return cadence_nand_calc_ecc_bytes(max_step_size, strength);\
1067 CADENCE_NAND_CALC_ECC_BYTES(256)
1068 CADENCE_NAND_CALC_ECC_BYTES(512)
1069 CADENCE_NAND_CALC_ECC_BYTES(1024)
1070 CADENCE_NAND_CALC_ECC_BYTES(2048)
1071 CADENCE_NAND_CALC_ECC_BYTES(4096)
1073 /* Function reads BCH capabilities. */
1074 static int cadence_nand_read_bch_caps(struct cdns_nand_ctrl
*cdns_ctrl
)
1076 struct nand_ecc_caps
*ecc_caps
= &cdns_ctrl
->ecc_caps
;
1077 int max_step_size
= 0, nstrengths
, i
;
1080 reg
= readl_relaxed(cdns_ctrl
->reg
+ BCH_CFG_0
);
1081 cdns_ctrl
->ecc_strengths
[0] = FIELD_GET(BCH_CFG_0_CORR_CAP_0
, reg
);
1082 cdns_ctrl
->ecc_strengths
[1] = FIELD_GET(BCH_CFG_0_CORR_CAP_1
, reg
);
1083 cdns_ctrl
->ecc_strengths
[2] = FIELD_GET(BCH_CFG_0_CORR_CAP_2
, reg
);
1084 cdns_ctrl
->ecc_strengths
[3] = FIELD_GET(BCH_CFG_0_CORR_CAP_3
, reg
);
1086 reg
= readl_relaxed(cdns_ctrl
->reg
+ BCH_CFG_1
);
1087 cdns_ctrl
->ecc_strengths
[4] = FIELD_GET(BCH_CFG_1_CORR_CAP_4
, reg
);
1088 cdns_ctrl
->ecc_strengths
[5] = FIELD_GET(BCH_CFG_1_CORR_CAP_5
, reg
);
1089 cdns_ctrl
->ecc_strengths
[6] = FIELD_GET(BCH_CFG_1_CORR_CAP_6
, reg
);
1090 cdns_ctrl
->ecc_strengths
[7] = FIELD_GET(BCH_CFG_1_CORR_CAP_7
, reg
);
1092 reg
= readl_relaxed(cdns_ctrl
->reg
+ BCH_CFG_2
);
1093 cdns_ctrl
->ecc_stepinfos
[0].stepsize
=
1094 FIELD_GET(BCH_CFG_2_SECT_0
, reg
);
1096 cdns_ctrl
->ecc_stepinfos
[1].stepsize
=
1097 FIELD_GET(BCH_CFG_2_SECT_1
, reg
);
1100 for (i
= 0; i
< BCH_MAX_NUM_CORR_CAPS
; i
++) {
1101 if (cdns_ctrl
->ecc_strengths
[i
] != 0)
1105 ecc_caps
->nstepinfos
= 0;
1106 for (i
= 0; i
< BCH_MAX_NUM_SECTOR_SIZES
; i
++) {
1107 /* ECC strengths are common for all step infos. */
1108 cdns_ctrl
->ecc_stepinfos
[i
].nstrengths
= nstrengths
;
1109 cdns_ctrl
->ecc_stepinfos
[i
].strengths
=
1110 cdns_ctrl
->ecc_strengths
;
1112 if (cdns_ctrl
->ecc_stepinfos
[i
].stepsize
!= 0)
1113 ecc_caps
->nstepinfos
++;
1115 if (cdns_ctrl
->ecc_stepinfos
[i
].stepsize
> max_step_size
)
1116 max_step_size
= cdns_ctrl
->ecc_stepinfos
[i
].stepsize
;
1118 ecc_caps
->stepinfos
= &cdns_ctrl
->ecc_stepinfos
[0];
1120 switch (max_step_size
) {
1122 ecc_caps
->calc_ecc_bytes
= &cadence_nand_calc_ecc_bytes_256
;
1125 ecc_caps
->calc_ecc_bytes
= &cadence_nand_calc_ecc_bytes_512
;
1128 ecc_caps
->calc_ecc_bytes
= &cadence_nand_calc_ecc_bytes_1024
;
1131 ecc_caps
->calc_ecc_bytes
= &cadence_nand_calc_ecc_bytes_2048
;
1134 ecc_caps
->calc_ecc_bytes
= &cadence_nand_calc_ecc_bytes_4096
;
1137 dev_err(cdns_ctrl
->dev
,
1138 "Unsupported sector size(ecc step size) %d\n",
1146 /* Hardware initialization. */
1147 static int cadence_nand_hw_init(struct cdns_nand_ctrl
*cdns_ctrl
)
1152 status
= cadence_nand_wait_for_value(cdns_ctrl
, CTRL_STATUS
,
1154 CTRL_STATUS_INIT_COMP
, false);
1158 reg
= readl_relaxed(cdns_ctrl
->reg
+ CTRL_VERSION
);
1159 cdns_ctrl
->ctrl_rev
= FIELD_GET(CTRL_VERSION_REV
, reg
);
1161 dev_info(cdns_ctrl
->dev
,
1162 "%s: cadence nand controller version reg %x\n",
1165 /* Disable cache and multiplane. */
1166 writel_relaxed(0, cdns_ctrl
->reg
+ MULTIPLANE_CFG
);
1167 writel_relaxed(0, cdns_ctrl
->reg
+ CACHE_CFG
);
1169 /* Clear all interrupts. */
1170 writel_relaxed(0xFFFFFFFF, cdns_ctrl
->reg
+ INTR_STATUS
);
1172 cadence_nand_get_caps(cdns_ctrl
);
1173 cadence_nand_read_bch_caps(cdns_ctrl
);
1176 * Set IO width access to 8.
1177 * It is because during SW device discovering width access
1178 * is expected to be 8.
1180 status
= cadence_nand_set_access_width16(cdns_ctrl
, false);
1185 #define TT_MAIN_OOB_AREAS 2
1186 #define TT_RAW_PAGE 3
1188 #define TT_MAIN_OOB_AREA_EXT 5
1190 /* Prepare size of data to transfer. */
1192 cadence_nand_prepare_data_size(struct nand_chip
*chip
,
1195 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
1196 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
1197 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1198 u32 sec_size
= 0, offset
= 0, sec_cnt
= 1;
1199 u32 last_sec_size
= cdns_chip
->sector_size
;
1200 u32 data_ctrl_size
= 0;
1203 if (cdns_ctrl
->curr_trans_type
== transfer_type
)
1206 switch (transfer_type
) {
1207 case TT_MAIN_OOB_AREA_EXT
:
1208 sec_cnt
= cdns_chip
->sector_count
;
1209 sec_size
= cdns_chip
->sector_size
;
1210 data_ctrl_size
= cdns_chip
->avail_oob_size
;
1212 case TT_MAIN_OOB_AREAS
:
1213 sec_cnt
= cdns_chip
->sector_count
;
1214 last_sec_size
= cdns_chip
->sector_size
1215 + cdns_chip
->avail_oob_size
;
1216 sec_size
= cdns_chip
->sector_size
;
1219 last_sec_size
= mtd
->writesize
+ mtd
->oobsize
;
1222 offset
= mtd
->writesize
+ cdns_chip
->bbm_offs
;
1228 reg
|= FIELD_PREP(TRAN_CFG_0_OFFSET
, offset
);
1229 reg
|= FIELD_PREP(TRAN_CFG_0_SEC_CNT
, sec_cnt
);
1230 writel_relaxed(reg
, cdns_ctrl
->reg
+ TRAN_CFG_0
);
1233 reg
|= FIELD_PREP(TRAN_CFG_1_LAST_SEC_SIZE
, last_sec_size
);
1234 reg
|= FIELD_PREP(TRAN_CFG_1_SECTOR_SIZE
, sec_size
);
1235 writel_relaxed(reg
, cdns_ctrl
->reg
+ TRAN_CFG_1
);
1237 if (cdns_ctrl
->caps2
.data_control_supp
) {
1238 reg
= readl_relaxed(cdns_ctrl
->reg
+ CONTROL_DATA_CTRL
);
1239 reg
&= ~CONTROL_DATA_CTRL_SIZE
;
1240 reg
|= FIELD_PREP(CONTROL_DATA_CTRL_SIZE
, data_ctrl_size
);
1241 writel_relaxed(reg
, cdns_ctrl
->reg
+ CONTROL_DATA_CTRL
);
1244 cdns_ctrl
->curr_trans_type
= transfer_type
;
1248 cadence_nand_cdma_transfer(struct cdns_nand_ctrl
*cdns_ctrl
, u8 chip_nr
,
1249 int page
, void *buf
, void *ctrl_dat
, u32 buf_size
,
1250 u32 ctrl_dat_size
, enum dma_data_direction dir
,
1253 dma_addr_t dma_buf
, dma_ctrl_dat
= 0;
1254 u8 thread_nr
= chip_nr
;
1258 if (dir
== DMA_FROM_DEVICE
)
1263 cadence_nand_set_ecc_enable(cdns_ctrl
, with_ecc
);
1265 dma_buf
= dma_map_single(cdns_ctrl
->dev
, buf
, buf_size
, dir
);
1266 if (dma_mapping_error(cdns_ctrl
->dev
, dma_buf
)) {
1267 dev_err(cdns_ctrl
->dev
, "Failed to map DMA buffer\n");
1271 if (ctrl_dat
&& ctrl_dat_size
) {
1272 dma_ctrl_dat
= dma_map_single(cdns_ctrl
->dev
, ctrl_dat
,
1273 ctrl_dat_size
, dir
);
1274 if (dma_mapping_error(cdns_ctrl
->dev
, dma_ctrl_dat
)) {
1275 dma_unmap_single(cdns_ctrl
->dev
, dma_buf
,
1277 dev_err(cdns_ctrl
->dev
, "Failed to map DMA buffer\n");
1282 cadence_nand_cdma_desc_prepare(cdns_ctrl
, chip_nr
, page
,
1283 dma_buf
, dma_ctrl_dat
, ctype
);
1285 status
= cadence_nand_cdma_send_and_wait(cdns_ctrl
, thread_nr
);
1287 dma_unmap_single(cdns_ctrl
->dev
, dma_buf
,
1290 if (ctrl_dat
&& ctrl_dat_size
)
1291 dma_unmap_single(cdns_ctrl
->dev
, dma_ctrl_dat
,
1292 ctrl_dat_size
, dir
);
1296 return cadence_nand_cdma_finish(cdns_ctrl
);
1299 static void cadence_nand_set_timings(struct cdns_nand_ctrl
*cdns_ctrl
,
1300 struct cadence_nand_timings
*t
)
1302 writel_relaxed(t
->async_toggle_timings
,
1303 cdns_ctrl
->reg
+ ASYNC_TOGGLE_TIMINGS
);
1304 writel_relaxed(t
->timings0
, cdns_ctrl
->reg
+ TIMINGS0
);
1305 writel_relaxed(t
->timings1
, cdns_ctrl
->reg
+ TIMINGS1
);
1306 writel_relaxed(t
->timings2
, cdns_ctrl
->reg
+ TIMINGS2
);
1308 if (cdns_ctrl
->caps2
.is_phy_type_dll
)
1309 writel_relaxed(t
->dll_phy_ctrl
, cdns_ctrl
->reg
+ DLL_PHY_CTRL
);
1311 writel_relaxed(t
->phy_ctrl
, cdns_ctrl
->reg
+ PHY_CTRL
);
1313 if (cdns_ctrl
->caps2
.is_phy_type_dll
) {
1314 writel_relaxed(0, cdns_ctrl
->reg
+ PHY_TSEL
);
1315 writel_relaxed(2, cdns_ctrl
->reg
+ PHY_DQ_TIMING
);
1316 writel_relaxed(t
->phy_dqs_timing
,
1317 cdns_ctrl
->reg
+ PHY_DQS_TIMING
);
1318 writel_relaxed(t
->phy_gate_lpbk_ctrl
,
1319 cdns_ctrl
->reg
+ PHY_GATE_LPBK_CTRL
);
1320 writel_relaxed(PHY_DLL_MASTER_CTRL_BYPASS_MODE
,
1321 cdns_ctrl
->reg
+ PHY_DLL_MASTER_CTRL
);
1322 writel_relaxed(0, cdns_ctrl
->reg
+ PHY_DLL_SLAVE_CTRL
);
1326 static int cadence_nand_select_target(struct nand_chip
*chip
)
1328 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
1329 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
1331 if (chip
== cdns_ctrl
->selected_chip
)
1334 if (cadence_nand_wait_for_value(cdns_ctrl
, CTRL_STATUS
,
1336 CTRL_STATUS_CTRL_BUSY
, true))
1339 cadence_nand_set_timings(cdns_ctrl
, &cdns_chip
->timings
);
1341 cadence_nand_set_ecc_strength(cdns_ctrl
,
1342 cdns_chip
->corr_str_idx
);
1344 cadence_nand_set_erase_detection(cdns_ctrl
, true,
1345 chip
->ecc
.strength
);
1347 cdns_ctrl
->curr_trans_type
= -1;
1348 cdns_ctrl
->selected_chip
= chip
;
1353 static int cadence_nand_erase(struct nand_chip
*chip
, u32 page
)
1355 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
1356 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
1358 u8 thread_nr
= cdns_chip
->cs
[chip
->cur_cs
];
1360 cadence_nand_cdma_desc_prepare(cdns_ctrl
,
1361 cdns_chip
->cs
[chip
->cur_cs
],
1364 status
= cadence_nand_cdma_send_and_wait(cdns_ctrl
, thread_nr
);
1366 dev_err(cdns_ctrl
->dev
, "erase operation failed\n");
1370 status
= cadence_nand_cdma_finish(cdns_ctrl
);
1377 static int cadence_nand_read_bbm(struct nand_chip
*chip
, int page
, u8
*buf
)
1380 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
1381 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
1382 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1384 cadence_nand_prepare_data_size(chip
, TT_BBM
);
1386 cadence_nand_set_skip_bytes_conf(cdns_ctrl
, 0, 0, 0);
1389 * Read only bad block marker from offset
1390 * defined by a memory manufacturer.
1392 status
= cadence_nand_cdma_transfer(cdns_ctrl
,
1393 cdns_chip
->cs
[chip
->cur_cs
],
1394 page
, cdns_ctrl
->buf
, NULL
,
1396 0, DMA_FROM_DEVICE
, false);
1398 dev_err(cdns_ctrl
->dev
, "read BBM failed\n");
1402 memcpy(buf
+ cdns_chip
->bbm_offs
, cdns_ctrl
->buf
, cdns_chip
->bbm_len
);
1407 static int cadence_nand_write_page(struct nand_chip
*chip
,
1408 const u8
*buf
, int oob_required
,
1411 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
1412 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
1413 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1415 u16 marker_val
= 0xFFFF;
1417 status
= cadence_nand_select_target(chip
);
1421 cadence_nand_set_skip_bytes_conf(cdns_ctrl
, cdns_chip
->bbm_len
,
1423 + cdns_chip
->bbm_offs
,
1427 marker_val
= *(u16
*)(chip
->oob_poi
1428 + cdns_chip
->bbm_offs
);
1430 /* Set oob data to 0xFF. */
1431 memset(cdns_ctrl
->buf
+ mtd
->writesize
, 0xFF,
1432 cdns_chip
->avail_oob_size
);
1435 cadence_nand_set_skip_marker_val(cdns_ctrl
, marker_val
);
1437 cadence_nand_prepare_data_size(chip
, TT_MAIN_OOB_AREA_EXT
);
1439 if (cadence_nand_dma_buf_ok(cdns_ctrl
, buf
, mtd
->writesize
) &&
1440 cdns_ctrl
->caps2
.data_control_supp
) {
1444 oob
= chip
->oob_poi
;
1446 oob
= cdns_ctrl
->buf
+ mtd
->writesize
;
1448 status
= cadence_nand_cdma_transfer(cdns_ctrl
,
1449 cdns_chip
->cs
[chip
->cur_cs
],
1450 page
, (void *)buf
, oob
,
1452 cdns_chip
->avail_oob_size
,
1453 DMA_TO_DEVICE
, true);
1455 dev_err(cdns_ctrl
->dev
, "write page failed\n");
1463 /* Transfer the data to the oob area. */
1464 memcpy(cdns_ctrl
->buf
+ mtd
->writesize
, chip
->oob_poi
,
1465 cdns_chip
->avail_oob_size
);
1468 memcpy(cdns_ctrl
->buf
, buf
, mtd
->writesize
);
1470 cadence_nand_prepare_data_size(chip
, TT_MAIN_OOB_AREAS
);
1472 return cadence_nand_cdma_transfer(cdns_ctrl
,
1473 cdns_chip
->cs
[chip
->cur_cs
],
1474 page
, cdns_ctrl
->buf
, NULL
,
1476 + cdns_chip
->avail_oob_size
,
1477 0, DMA_TO_DEVICE
, true);
1480 static int cadence_nand_write_oob(struct nand_chip
*chip
, int page
)
1482 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
1483 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1485 memset(cdns_ctrl
->buf
, 0xFF, mtd
->writesize
);
1487 return cadence_nand_write_page(chip
, cdns_ctrl
->buf
, 1, page
);
1490 static int cadence_nand_write_page_raw(struct nand_chip
*chip
,
1491 const u8
*buf
, int oob_required
,
1494 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
1495 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
1496 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1497 int writesize
= mtd
->writesize
;
1498 int oobsize
= mtd
->oobsize
;
1499 int ecc_steps
= chip
->ecc
.steps
;
1500 int ecc_size
= chip
->ecc
.size
;
1501 int ecc_bytes
= chip
->ecc
.bytes
;
1502 void *tmp_buf
= cdns_ctrl
->buf
;
1503 int oob_skip
= cdns_chip
->bbm_len
;
1504 size_t size
= writesize
+ oobsize
;
1508 status
= cadence_nand_select_target(chip
);
1513 * Fill the buffer with 0xff first except the full page transfer.
1514 * This simplifies the logic.
1516 if (!buf
|| !oob_required
)
1517 memset(tmp_buf
, 0xff, size
);
1519 cadence_nand_set_skip_bytes_conf(cdns_ctrl
, 0, 0, 0);
1521 /* Arrange the buffer for syndrome payload/ecc layout. */
1523 for (i
= 0; i
< ecc_steps
; i
++) {
1524 pos
= i
* (ecc_size
+ ecc_bytes
);
1527 if (pos
>= writesize
)
1529 else if (pos
+ len
> writesize
)
1530 len
= writesize
- pos
;
1532 memcpy(tmp_buf
+ pos
, buf
, len
);
1534 if (len
< ecc_size
) {
1535 len
= ecc_size
- len
;
1536 memcpy(tmp_buf
+ writesize
+ oob_skip
, buf
,
1544 const u8
*oob
= chip
->oob_poi
;
1545 u32 oob_data_offset
= (cdns_chip
->sector_count
- 1) *
1546 (cdns_chip
->sector_size
+ chip
->ecc
.bytes
)
1547 + cdns_chip
->sector_size
+ oob_skip
;
1549 /* BBM at the beginning of the OOB area. */
1550 memcpy(tmp_buf
+ writesize
, oob
, oob_skip
);
1553 memcpy(tmp_buf
+ oob_data_offset
, oob
,
1554 cdns_chip
->avail_oob_size
);
1555 oob
+= cdns_chip
->avail_oob_size
;
1558 for (i
= 0; i
< ecc_steps
; i
++) {
1559 pos
= ecc_size
+ i
* (ecc_size
+ ecc_bytes
);
1560 if (i
== (ecc_steps
- 1))
1561 pos
+= cdns_chip
->avail_oob_size
;
1565 if (pos
>= writesize
)
1567 else if (pos
+ len
> writesize
)
1568 len
= writesize
- pos
;
1570 memcpy(tmp_buf
+ pos
, oob
, len
);
1572 if (len
< ecc_bytes
) {
1573 len
= ecc_bytes
- len
;
1574 memcpy(tmp_buf
+ writesize
+ oob_skip
, oob
,
1581 cadence_nand_prepare_data_size(chip
, TT_RAW_PAGE
);
1583 return cadence_nand_cdma_transfer(cdns_ctrl
,
1584 cdns_chip
->cs
[chip
->cur_cs
],
1585 page
, cdns_ctrl
->buf
, NULL
,
1588 0, DMA_TO_DEVICE
, false);
1591 static int cadence_nand_write_oob_raw(struct nand_chip
*chip
,
1594 return cadence_nand_write_page_raw(chip
, NULL
, true, page
);
1597 static int cadence_nand_read_page(struct nand_chip
*chip
,
1598 u8
*buf
, int oob_required
, int page
)
1600 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
1601 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
1602 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1604 int ecc_err_count
= 0;
1606 status
= cadence_nand_select_target(chip
);
1610 cadence_nand_set_skip_bytes_conf(cdns_ctrl
, cdns_chip
->bbm_len
,
1612 + cdns_chip
->bbm_offs
, 1);
1615 * If data buffer can be accessed by DMA and data_control feature
1616 * is supported then transfer data and oob directly.
1618 if (cadence_nand_dma_buf_ok(cdns_ctrl
, buf
, mtd
->writesize
) &&
1619 cdns_ctrl
->caps2
.data_control_supp
) {
1623 oob
= chip
->oob_poi
;
1625 oob
= cdns_ctrl
->buf
+ mtd
->writesize
;
1627 cadence_nand_prepare_data_size(chip
, TT_MAIN_OOB_AREA_EXT
);
1628 status
= cadence_nand_cdma_transfer(cdns_ctrl
,
1629 cdns_chip
->cs
[chip
->cur_cs
],
1632 cdns_chip
->avail_oob_size
,
1633 DMA_FROM_DEVICE
, true);
1634 /* Otherwise use bounce buffer. */
1636 cadence_nand_prepare_data_size(chip
, TT_MAIN_OOB_AREAS
);
1637 status
= cadence_nand_cdma_transfer(cdns_ctrl
,
1638 cdns_chip
->cs
[chip
->cur_cs
],
1639 page
, cdns_ctrl
->buf
,
1640 NULL
, mtd
->writesize
1641 + cdns_chip
->avail_oob_size
,
1642 0, DMA_FROM_DEVICE
, true);
1644 memcpy(buf
, cdns_ctrl
->buf
, mtd
->writesize
);
1646 memcpy(chip
->oob_poi
,
1647 cdns_ctrl
->buf
+ mtd
->writesize
,
1652 case STAT_ECC_UNCORR
:
1653 mtd
->ecc_stats
.failed
++;
1657 ecc_err_count
= FIELD_GET(CDMA_CS_MAXERR
,
1658 cdns_ctrl
->cdma_desc
->status
);
1659 mtd
->ecc_stats
.corrected
+= ecc_err_count
;
1665 dev_err(cdns_ctrl
->dev
, "read page failed\n");
1670 if (cadence_nand_read_bbm(chip
, page
, chip
->oob_poi
))
1673 return ecc_err_count
;
1676 /* Reads OOB data from the device. */
1677 static int cadence_nand_read_oob(struct nand_chip
*chip
, int page
)
1679 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
1681 return cadence_nand_read_page(chip
, cdns_ctrl
->buf
, 1, page
);
1684 static int cadence_nand_read_page_raw(struct nand_chip
*chip
,
1685 u8
*buf
, int oob_required
, int page
)
1687 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
1688 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
1689 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1690 int oob_skip
= cdns_chip
->bbm_len
;
1691 int writesize
= mtd
->writesize
;
1692 int ecc_steps
= chip
->ecc
.steps
;
1693 int ecc_size
= chip
->ecc
.size
;
1694 int ecc_bytes
= chip
->ecc
.bytes
;
1695 void *tmp_buf
= cdns_ctrl
->buf
;
1699 status
= cadence_nand_select_target(chip
);
1703 cadence_nand_set_skip_bytes_conf(cdns_ctrl
, 0, 0, 0);
1705 cadence_nand_prepare_data_size(chip
, TT_RAW_PAGE
);
1706 status
= cadence_nand_cdma_transfer(cdns_ctrl
,
1707 cdns_chip
->cs
[chip
->cur_cs
],
1708 page
, cdns_ctrl
->buf
, NULL
,
1711 0, DMA_FROM_DEVICE
, false);
1718 dev_err(cdns_ctrl
->dev
, "read raw page failed\n");
1722 /* Arrange the buffer for syndrome payload/ecc layout. */
1724 for (i
= 0; i
< ecc_steps
; i
++) {
1725 pos
= i
* (ecc_size
+ ecc_bytes
);
1728 if (pos
>= writesize
)
1730 else if (pos
+ len
> writesize
)
1731 len
= writesize
- pos
;
1733 memcpy(buf
, tmp_buf
+ pos
, len
);
1735 if (len
< ecc_size
) {
1736 len
= ecc_size
- len
;
1737 memcpy(buf
, tmp_buf
+ writesize
+ oob_skip
,
1745 u8
*oob
= chip
->oob_poi
;
1746 u32 oob_data_offset
= (cdns_chip
->sector_count
- 1) *
1747 (cdns_chip
->sector_size
+ chip
->ecc
.bytes
)
1748 + cdns_chip
->sector_size
+ oob_skip
;
1751 memcpy(oob
, tmp_buf
+ oob_data_offset
,
1752 cdns_chip
->avail_oob_size
);
1754 /* BBM at the beginning of the OOB area. */
1755 memcpy(oob
, tmp_buf
+ writesize
, oob_skip
);
1757 oob
+= cdns_chip
->avail_oob_size
;
1760 for (i
= 0; i
< ecc_steps
; i
++) {
1761 pos
= ecc_size
+ i
* (ecc_size
+ ecc_bytes
);
1764 if (i
== (ecc_steps
- 1))
1765 pos
+= cdns_chip
->avail_oob_size
;
1767 if (pos
>= writesize
)
1769 else if (pos
+ len
> writesize
)
1770 len
= writesize
- pos
;
1772 memcpy(oob
, tmp_buf
+ pos
, len
);
1774 if (len
< ecc_bytes
) {
1775 len
= ecc_bytes
- len
;
1776 memcpy(oob
, tmp_buf
+ writesize
+ oob_skip
,
1786 static int cadence_nand_read_oob_raw(struct nand_chip
*chip
,
1789 return cadence_nand_read_page_raw(chip
, NULL
, true, page
);
1792 static void cadence_nand_slave_dma_transfer_finished(void *data
)
1794 struct completion
*finished
= data
;
1799 static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl
*cdns_ctrl
,
1801 dma_addr_t dev_dma
, size_t len
,
1802 enum dma_data_direction dir
)
1804 DECLARE_COMPLETION_ONSTACK(finished
);
1805 struct dma_chan
*chan
;
1806 struct dma_device
*dma_dev
;
1807 dma_addr_t src_dma
, dst_dma
, buf_dma
;
1808 struct dma_async_tx_descriptor
*tx
;
1809 dma_cookie_t cookie
;
1811 chan
= cdns_ctrl
->dmac
;
1812 dma_dev
= chan
->device
;
1814 buf_dma
= dma_map_single(dma_dev
->dev
, buf
, len
, dir
);
1815 if (dma_mapping_error(dma_dev
->dev
, buf_dma
)) {
1816 dev_err(cdns_ctrl
->dev
, "Failed to map DMA buffer\n");
1820 if (dir
== DMA_FROM_DEVICE
) {
1821 src_dma
= cdns_ctrl
->io
.dma
;
1825 dst_dma
= cdns_ctrl
->io
.dma
;
1828 tx
= dmaengine_prep_dma_memcpy(cdns_ctrl
->dmac
, dst_dma
, src_dma
, len
,
1829 DMA_CTRL_ACK
| DMA_PREP_INTERRUPT
);
1831 dev_err(cdns_ctrl
->dev
, "Failed to prepare DMA memcpy\n");
1835 tx
->callback
= cadence_nand_slave_dma_transfer_finished
;
1836 tx
->callback_param
= &finished
;
1838 cookie
= dmaengine_submit(tx
);
1839 if (dma_submit_error(cookie
)) {
1840 dev_err(cdns_ctrl
->dev
, "Failed to do DMA tx_submit\n");
1844 dma_async_issue_pending(cdns_ctrl
->dmac
);
1845 wait_for_completion(&finished
);
1847 dma_unmap_single(cdns_ctrl
->dev
, buf_dma
, len
, dir
);
1852 dma_unmap_single(cdns_ctrl
->dev
, buf_dma
, len
, dir
);
1855 dev_dbg(cdns_ctrl
->dev
, "Fall back to CPU I/O\n");
1860 static int cadence_nand_read_buf(struct cdns_nand_ctrl
*cdns_ctrl
,
1867 /* Wait until slave DMA interface is ready to data transfer. */
1868 status
= cadence_nand_wait_on_sdma(cdns_ctrl
, &thread_nr
, &sdma_size
);
1872 if (!cdns_ctrl
->caps1
->has_dma
) {
1873 int len_in_words
= len
>> 2;
1875 /* read alingment data */
1876 ioread32_rep(cdns_ctrl
->io
.virt
, buf
, len_in_words
);
1877 if (sdma_size
> len
) {
1878 /* read rest data from slave DMA interface if any */
1879 ioread32_rep(cdns_ctrl
->io
.virt
, cdns_ctrl
->buf
,
1880 sdma_size
/ 4 - len_in_words
);
1881 /* copy rest of data */
1882 memcpy(buf
+ (len_in_words
<< 2), cdns_ctrl
->buf
,
1883 len
- (len_in_words
<< 2));
1888 if (cadence_nand_dma_buf_ok(cdns_ctrl
, buf
, len
)) {
1889 status
= cadence_nand_slave_dma_transfer(cdns_ctrl
, buf
,
1891 len
, DMA_FROM_DEVICE
);
1895 dev_warn(cdns_ctrl
->dev
,
1896 "Slave DMA transfer failed. Try again using bounce buffer.");
1899 /* If DMA transfer is not possible or failed then use bounce buffer. */
1900 status
= cadence_nand_slave_dma_transfer(cdns_ctrl
, cdns_ctrl
->buf
,
1902 sdma_size
, DMA_FROM_DEVICE
);
1905 dev_err(cdns_ctrl
->dev
, "Slave DMA transfer failed");
1909 memcpy(buf
, cdns_ctrl
->buf
, len
);
1914 static int cadence_nand_write_buf(struct cdns_nand_ctrl
*cdns_ctrl
,
1915 const u8
*buf
, int len
)
1921 /* Wait until slave DMA interface is ready to data transfer. */
1922 status
= cadence_nand_wait_on_sdma(cdns_ctrl
, &thread_nr
, &sdma_size
);
1926 if (!cdns_ctrl
->caps1
->has_dma
) {
1927 int len_in_words
= len
>> 2;
1929 iowrite32_rep(cdns_ctrl
->io
.virt
, buf
, len_in_words
);
1930 if (sdma_size
> len
) {
1931 /* copy rest of data */
1932 memcpy(cdns_ctrl
->buf
, buf
+ (len_in_words
<< 2),
1933 len
- (len_in_words
<< 2));
1934 /* write all expected by nand controller data */
1935 iowrite32_rep(cdns_ctrl
->io
.virt
, cdns_ctrl
->buf
,
1936 sdma_size
/ 4 - len_in_words
);
1942 if (cadence_nand_dma_buf_ok(cdns_ctrl
, buf
, len
)) {
1943 status
= cadence_nand_slave_dma_transfer(cdns_ctrl
, (void *)buf
,
1945 len
, DMA_TO_DEVICE
);
1949 dev_warn(cdns_ctrl
->dev
,
1950 "Slave DMA transfer failed. Try again using bounce buffer.");
1953 /* If DMA transfer is not possible or failed then use bounce buffer. */
1954 memcpy(cdns_ctrl
->buf
, buf
, len
);
1956 status
= cadence_nand_slave_dma_transfer(cdns_ctrl
, cdns_ctrl
->buf
,
1958 sdma_size
, DMA_TO_DEVICE
);
1961 dev_err(cdns_ctrl
->dev
, "Slave DMA transfer failed");
1966 static int cadence_nand_force_byte_access(struct nand_chip
*chip
,
1969 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
1973 * Callers of this function do not verify if the NAND is using a 16-bit
1974 * an 8-bit bus for normal operations, so we need to take care of that
1975 * here by leaving the configuration unchanged if the NAND does not have
1976 * the NAND_BUSWIDTH_16 flag set.
1978 if (!(chip
->options
& NAND_BUSWIDTH_16
))
1981 status
= cadence_nand_set_access_width16(cdns_ctrl
, !force_8bit
);
1986 static int cadence_nand_cmd_opcode(struct nand_chip
*chip
,
1987 const struct nand_subop
*subop
)
1989 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
1990 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
1991 const struct nand_op_instr
*instr
;
1992 unsigned int op_id
= 0;
1993 u64 mini_ctrl_cmd
= 0;
1996 instr
= &subop
->instrs
[op_id
];
1998 if (instr
->delay_ns
> 0)
1999 mini_ctrl_cmd
|= GCMD_LAY_TWB
;
2001 mini_ctrl_cmd
|= FIELD_PREP(GCMD_LAY_INSTR
,
2002 GCMD_LAY_INSTR_CMD
);
2003 mini_ctrl_cmd
|= FIELD_PREP(GCMD_LAY_INPUT_CMD
,
2004 instr
->ctx
.cmd
.opcode
);
2006 ret
= cadence_nand_generic_cmd_send(cdns_ctrl
,
2007 cdns_chip
->cs
[chip
->cur_cs
],
2010 dev_err(cdns_ctrl
->dev
, "send cmd %x failed\n",
2011 instr
->ctx
.cmd
.opcode
);
2016 static int cadence_nand_cmd_address(struct nand_chip
*chip
,
2017 const struct nand_subop
*subop
)
2019 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
2020 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
2021 const struct nand_op_instr
*instr
;
2022 unsigned int op_id
= 0;
2023 u64 mini_ctrl_cmd
= 0;
2024 unsigned int offset
, naddrs
;
2030 instr
= &subop
->instrs
[op_id
];
2032 if (instr
->delay_ns
> 0)
2033 mini_ctrl_cmd
|= GCMD_LAY_TWB
;
2035 mini_ctrl_cmd
|= FIELD_PREP(GCMD_LAY_INSTR
,
2036 GCMD_LAY_INSTR_ADDR
);
2038 offset
= nand_subop_get_addr_start_off(subop
, op_id
);
2039 naddrs
= nand_subop_get_num_addr_cyc(subop
, op_id
);
2040 addrs
= &instr
->ctx
.addr
.addrs
[offset
];
2042 for (i
= 0; i
< naddrs
; i
++)
2043 address
|= (u64
)addrs
[i
] << (8 * i
);
2045 mini_ctrl_cmd
|= FIELD_PREP(GCMD_LAY_INPUT_ADDR
,
2047 mini_ctrl_cmd
|= FIELD_PREP(GCMD_LAY_INPUT_ADDR_SIZE
,
2050 ret
= cadence_nand_generic_cmd_send(cdns_ctrl
,
2051 cdns_chip
->cs
[chip
->cur_cs
],
2054 dev_err(cdns_ctrl
->dev
, "send address %llx failed\n", address
);
2059 static int cadence_nand_cmd_erase(struct nand_chip
*chip
,
2060 const struct nand_subop
*subop
)
2064 if (subop
->instrs
[0].ctx
.cmd
.opcode
== NAND_CMD_ERASE1
) {
2066 const struct nand_op_instr
*instr
= NULL
;
2067 unsigned int offset
, naddrs
;
2071 instr
= &subop
->instrs
[1];
2072 offset
= nand_subop_get_addr_start_off(subop
, 1);
2073 naddrs
= nand_subop_get_num_addr_cyc(subop
, 1);
2074 addrs
= &instr
->ctx
.addr
.addrs
[offset
];
2076 for (i
= 0; i
< naddrs
; i
++)
2077 page
|= (u32
)addrs
[i
] << (8 * i
);
2079 return cadence_nand_erase(chip
, page
);
2083 * If it is not an erase operation then handle operation
2084 * by calling exec_op function.
2086 for (op_id
= 0; op_id
< subop
->ninstrs
; op_id
++) {
2088 const struct nand_operation nand_op
= {
2090 .instrs
= &subop
->instrs
[op_id
],
2092 ret
= chip
->controller
->ops
->exec_op(chip
, &nand_op
, false);
2100 static int cadence_nand_cmd_data(struct nand_chip
*chip
,
2101 const struct nand_subop
*subop
)
2103 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
2104 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
2105 const struct nand_op_instr
*instr
;
2106 unsigned int offset
, op_id
= 0;
2107 u64 mini_ctrl_cmd
= 0;
2111 instr
= &subop
->instrs
[op_id
];
2113 if (instr
->delay_ns
> 0)
2114 mini_ctrl_cmd
|= GCMD_LAY_TWB
;
2116 mini_ctrl_cmd
|= FIELD_PREP(GCMD_LAY_INSTR
,
2117 GCMD_LAY_INSTR_DATA
);
2119 if (instr
->type
== NAND_OP_DATA_OUT_INSTR
)
2120 mini_ctrl_cmd
|= FIELD_PREP(GCMD_DIR
,
2123 len
= nand_subop_get_data_len(subop
, op_id
);
2124 offset
= nand_subop_get_data_start_off(subop
, op_id
);
2125 mini_ctrl_cmd
|= FIELD_PREP(GCMD_SECT_CNT
, 1);
2126 mini_ctrl_cmd
|= FIELD_PREP(GCMD_LAST_SIZE
, len
);
2127 if (instr
->ctx
.data
.force_8bit
) {
2128 ret
= cadence_nand_force_byte_access(chip
, true);
2130 dev_err(cdns_ctrl
->dev
,
2131 "cannot change byte access generic data cmd failed\n");
2136 ret
= cadence_nand_generic_cmd_send(cdns_ctrl
,
2137 cdns_chip
->cs
[chip
->cur_cs
],
2140 dev_err(cdns_ctrl
->dev
, "send generic data cmd failed\n");
2144 if (instr
->type
== NAND_OP_DATA_IN_INSTR
) {
2145 void *buf
= instr
->ctx
.data
.buf
.in
+ offset
;
2147 ret
= cadence_nand_read_buf(cdns_ctrl
, buf
, len
);
2149 const void *buf
= instr
->ctx
.data
.buf
.out
+ offset
;
2151 ret
= cadence_nand_write_buf(cdns_ctrl
, buf
, len
);
2155 dev_err(cdns_ctrl
->dev
, "data transfer failed for generic command\n");
2159 if (instr
->ctx
.data
.force_8bit
) {
2160 ret
= cadence_nand_force_byte_access(chip
, false);
2162 dev_err(cdns_ctrl
->dev
,
2163 "cannot change byte access generic data cmd failed\n");
2170 static int cadence_nand_cmd_waitrdy(struct nand_chip
*chip
,
2171 const struct nand_subop
*subop
)
2174 unsigned int op_id
= 0;
2175 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
2176 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
2177 const struct nand_op_instr
*instr
= &subop
->instrs
[op_id
];
2178 u32 timeout_us
= instr
->ctx
.waitrdy
.timeout_ms
* 1000;
2180 status
= cadence_nand_wait_for_value(cdns_ctrl
, RBN_SETINGS
,
2182 BIT(cdns_chip
->cs
[chip
->cur_cs
]),
2187 static const struct nand_op_parser cadence_nand_op_parser
= NAND_OP_PARSER(
2188 NAND_OP_PARSER_PATTERN(
2189 cadence_nand_cmd_erase
,
2190 NAND_OP_PARSER_PAT_CMD_ELEM(false),
2191 NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ERASE_ADDRESS_CYC
),
2192 NAND_OP_PARSER_PAT_CMD_ELEM(false),
2193 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
2194 NAND_OP_PARSER_PATTERN(
2195 cadence_nand_cmd_opcode
,
2196 NAND_OP_PARSER_PAT_CMD_ELEM(false)),
2197 NAND_OP_PARSER_PATTERN(
2198 cadence_nand_cmd_address
,
2199 NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC
)),
2200 NAND_OP_PARSER_PATTERN(
2201 cadence_nand_cmd_data
,
2202 NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_DATA_SIZE
)),
2203 NAND_OP_PARSER_PATTERN(
2204 cadence_nand_cmd_data
,
2205 NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_DATA_SIZE
)),
2206 NAND_OP_PARSER_PATTERN(
2207 cadence_nand_cmd_waitrdy
,
2208 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false))
2211 static int cadence_nand_exec_op(struct nand_chip
*chip
,
2212 const struct nand_operation
*op
,
2215 int status
= cadence_nand_select_target(chip
);
2220 return nand_op_parser_exec_op(chip
, &cadence_nand_op_parser
, op
,
2224 static int cadence_nand_ooblayout_free(struct mtd_info
*mtd
, int section
,
2225 struct mtd_oob_region
*oobregion
)
2227 struct nand_chip
*chip
= mtd_to_nand(mtd
);
2228 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
2233 oobregion
->offset
= cdns_chip
->bbm_len
;
2234 oobregion
->length
= cdns_chip
->avail_oob_size
2235 - cdns_chip
->bbm_len
;
2240 static int cadence_nand_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
2241 struct mtd_oob_region
*oobregion
)
2243 struct nand_chip
*chip
= mtd_to_nand(mtd
);
2244 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
2249 oobregion
->offset
= cdns_chip
->avail_oob_size
;
2250 oobregion
->length
= chip
->ecc
.total
;
2255 static const struct mtd_ooblayout_ops cadence_nand_ooblayout_ops
= {
2256 .free
= cadence_nand_ooblayout_free
,
2257 .ecc
= cadence_nand_ooblayout_ecc
,
2260 static int calc_cycl(u32 timing
, u32 clock
)
2262 if (timing
== 0 || clock
== 0)
2265 if ((timing
% clock
) > 0)
2266 return timing
/ clock
;
2268 return timing
/ clock
- 1;
2271 /* Calculate max data valid window. */
2272 static inline u32
calc_tdvw_max(u32 trp_cnt
, u32 clk_period
, u32 trhoh_min
,
2273 u32 board_delay_skew_min
, u32 ext_mode
)
2278 return (trp_cnt
+ 1) * clk_period
+ trhoh_min
+
2279 board_delay_skew_min
;
2282 /* Calculate data valid window. */
2283 static inline u32
calc_tdvw(u32 trp_cnt
, u32 clk_period
, u32 trhoh_min
,
2284 u32 trea_max
, u32 ext_mode
)
2289 return (trp_cnt
+ 1) * clk_period
+ trhoh_min
- trea_max
;
2293 cadence_nand_setup_data_interface(struct nand_chip
*chip
, int chipnr
,
2294 const struct nand_data_interface
*conf
)
2296 const struct nand_sdr_timings
*sdr
;
2297 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
2298 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
2299 struct cadence_nand_timings
*t
= &cdns_chip
->timings
;
2301 u32 board_delay
= cdns_ctrl
->board_delay
;
2302 u32 clk_period
= DIV_ROUND_DOWN_ULL(1000000000000ULL,
2303 cdns_ctrl
->nf_clk_rate
);
2304 u32 tceh_cnt
, tcs_cnt
, tadl_cnt
, tccs_cnt
;
2305 u32 tfeat_cnt
, trhz_cnt
, tvdly_cnt
;
2306 u32 trhw_cnt
, twb_cnt
, twh_cnt
= 0, twhr_cnt
;
2307 u32 twp_cnt
= 0, trp_cnt
= 0, trh_cnt
= 0;
2308 u32 if_skew
= cdns_ctrl
->caps1
->if_skew
;
2309 u32 board_delay_skew_min
= board_delay
- if_skew
;
2310 u32 board_delay_skew_max
= board_delay
+ if_skew
;
2311 u32 dqs_sampl_res
, phony_dqs_mod
;
2312 u32 tdvw
, tdvw_min
, tdvw_max
;
2313 u32 ext_rd_mode
, ext_wr_mode
;
2314 u32 dll_phy_dqs_timing
= 0, phony_dqs_timing
= 0, rd_del_sel
= 0;
2317 sdr
= nand_get_sdr_timings(conf
);
2319 return PTR_ERR(sdr
);
2321 memset(t
, 0, sizeof(*t
));
2322 /* Sampling point calculation. */
2324 if (cdns_ctrl
->caps2
.is_phy_type_dll
)
2329 dqs_sampl_res
= clk_period
/ phony_dqs_mod
;
2331 tdvw_min
= sdr
->tREA_max
+ board_delay_skew_max
;
2333 * The idea of those calculation is to get the optimum value
2334 * for tRP and tRH timings. If it is NOT possible to sample data
2335 * with optimal tRP/tRH settings, the parameters will be extended.
2336 * If clk_period is 50ns (the lowest value) this condition is met
2337 * for asynchronous timing modes 1, 2, 3, 4 and 5.
2338 * If clk_period is 20ns the condition is met only
2339 * for asynchronous timing mode 5.
2341 if (sdr
->tRC_min
<= clk_period
&&
2342 sdr
->tRP_min
<= (clk_period
/ 2) &&
2343 sdr
->tREH_min
<= (clk_period
/ 2)) {
2344 /* Performance mode. */
2346 tdvw
= calc_tdvw(trp_cnt
, clk_period
, sdr
->tRHOH_min
,
2347 sdr
->tREA_max
, ext_rd_mode
);
2348 tdvw_max
= calc_tdvw_max(trp_cnt
, clk_period
, sdr
->tRHOH_min
,
2349 board_delay_skew_min
,
2352 * Check if data valid window and sampling point can be found
2353 * and is not on the edge (ie. we have hold margin).
2354 * If not extend the tRP timings.
2357 if (tdvw_max
<= tdvw_min
||
2358 (tdvw_max
% dqs_sampl_res
) == 0) {
2360 * No valid sampling point so the RE pulse need
2361 * to be widen widening by half clock cycle.
2367 * There is no valid window
2368 * to be able to sample data the tRP need to be widen.
2369 * Very safe calculations are performed here.
2371 trp_cnt
= (sdr
->tREA_max
+ board_delay_skew_max
2372 + dqs_sampl_res
) / clk_period
;
2377 /* Extended read mode. */
2381 trp_cnt
= calc_cycl(sdr
->tRP_min
, clk_period
);
2382 trh
= sdr
->tRC_min
- ((trp_cnt
+ 1) * clk_period
);
2383 if (sdr
->tREH_min
>= trh
)
2384 trh_cnt
= calc_cycl(sdr
->tREH_min
, clk_period
);
2386 trh_cnt
= calc_cycl(trh
, clk_period
);
2388 tdvw
= calc_tdvw(trp_cnt
, clk_period
, sdr
->tRHOH_min
,
2389 sdr
->tREA_max
, ext_rd_mode
);
2391 * Check if data valid window and sampling point can be found
2392 * or if it is at the edge check if previous is valid
2393 * - if not extend the tRP timings.
2396 tdvw_max
= calc_tdvw_max(trp_cnt
, clk_period
,
2398 board_delay_skew_min
,
2401 if ((((tdvw_max
/ dqs_sampl_res
)
2402 * dqs_sampl_res
) <= tdvw_min
) ||
2403 (((tdvw_max
% dqs_sampl_res
) == 0) &&
2404 (((tdvw_max
/ dqs_sampl_res
- 1)
2405 * dqs_sampl_res
) <= tdvw_min
))) {
2407 * Data valid window width is lower than
2408 * sampling resolution and do not hit any
2409 * sampling point to be sure the sampling point
2410 * will be found the RE low pulse width will be
2411 * extended by one clock cycle.
2413 trp_cnt
= trp_cnt
+ 1;
2417 * There is no valid window to be able to sample data.
2418 * The tRP need to be widen.
2419 * Very safe calculations are performed here.
2421 trp_cnt
= (sdr
->tREA_max
+ board_delay_skew_max
2422 + dqs_sampl_res
) / clk_period
;
2426 tdvw_max
= calc_tdvw_max(trp_cnt
, clk_period
,
2428 board_delay_skew_min
, ext_rd_mode
);
2430 if (sdr
->tWC_min
<= clk_period
&&
2431 (sdr
->tWP_min
+ if_skew
) <= (clk_period
/ 2) &&
2432 (sdr
->tWH_min
+ if_skew
) <= (clk_period
/ 2)) {
2438 twp_cnt
= calc_cycl(sdr
->tWP_min
+ if_skew
, clk_period
);
2439 if ((twp_cnt
+ 1) * clk_period
< (sdr
->tALS_min
+ if_skew
))
2440 twp_cnt
= calc_cycl(sdr
->tALS_min
+ if_skew
,
2443 twh
= (sdr
->tWC_min
- (twp_cnt
+ 1) * clk_period
);
2444 if (sdr
->tWH_min
>= twh
)
2447 twh_cnt
= calc_cycl(twh
+ if_skew
, clk_period
);
2450 reg
= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRH
, trh_cnt
);
2451 reg
|= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRP
, trp_cnt
);
2452 reg
|= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWH
, twh_cnt
);
2453 reg
|= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWP
, twp_cnt
);
2454 t
->async_toggle_timings
= reg
;
2455 dev_dbg(cdns_ctrl
->dev
, "ASYNC_TOGGLE_TIMINGS_SDR\t%x\n", reg
);
2457 tadl_cnt
= calc_cycl((sdr
->tADL_min
+ if_skew
), clk_period
);
2458 tccs_cnt
= calc_cycl((sdr
->tCCS_min
+ if_skew
), clk_period
);
2459 twhr_cnt
= calc_cycl((sdr
->tWHR_min
+ if_skew
), clk_period
);
2460 trhw_cnt
= calc_cycl((sdr
->tRHW_min
+ if_skew
), clk_period
);
2461 reg
= FIELD_PREP(TIMINGS0_TADL
, tadl_cnt
);
2464 * If timing exceeds delay field in timing register
2465 * then use maximum value.
2467 if (FIELD_FIT(TIMINGS0_TCCS
, tccs_cnt
))
2468 reg
|= FIELD_PREP(TIMINGS0_TCCS
, tccs_cnt
);
2470 reg
|= TIMINGS0_TCCS
;
2472 reg
|= FIELD_PREP(TIMINGS0_TWHR
, twhr_cnt
);
2473 reg
|= FIELD_PREP(TIMINGS0_TRHW
, trhw_cnt
);
2475 dev_dbg(cdns_ctrl
->dev
, "TIMINGS0_SDR\t%x\n", reg
);
2477 /* The following is related to single signal so skew is not needed. */
2478 trhz_cnt
= calc_cycl(sdr
->tRHZ_max
, clk_period
);
2479 trhz_cnt
= trhz_cnt
+ 1;
2480 twb_cnt
= calc_cycl((sdr
->tWB_max
+ board_delay
), clk_period
);
2482 * Because of the two stage syncflop the value must be increased by 3
2483 * first value is related with sync, second value is related
2484 * with output if delay.
2486 twb_cnt
= twb_cnt
+ 3 + 5;
2488 * The following is related to the we edge of the random data input
2489 * sequence so skew is not needed.
2491 tvdly_cnt
= calc_cycl(500000 + if_skew
, clk_period
);
2492 reg
= FIELD_PREP(TIMINGS1_TRHZ
, trhz_cnt
);
2493 reg
|= FIELD_PREP(TIMINGS1_TWB
, twb_cnt
);
2494 reg
|= FIELD_PREP(TIMINGS1_TVDLY
, tvdly_cnt
);
2496 dev_dbg(cdns_ctrl
->dev
, "TIMINGS1_SDR\t%x\n", reg
);
2498 tfeat_cnt
= calc_cycl(sdr
->tFEAT_max
, clk_period
);
2499 if (tfeat_cnt
< twb_cnt
)
2500 tfeat_cnt
= twb_cnt
;
2502 tceh_cnt
= calc_cycl(sdr
->tCEH_min
, clk_period
);
2503 tcs_cnt
= calc_cycl((sdr
->tCS_min
+ if_skew
), clk_period
);
2505 reg
= FIELD_PREP(TIMINGS2_TFEAT
, tfeat_cnt
);
2506 reg
|= FIELD_PREP(TIMINGS2_CS_HOLD_TIME
, tceh_cnt
);
2507 reg
|= FIELD_PREP(TIMINGS2_CS_SETUP_TIME
, tcs_cnt
);
2509 dev_dbg(cdns_ctrl
->dev
, "TIMINGS2_SDR\t%x\n", reg
);
2511 if (cdns_ctrl
->caps2
.is_phy_type_dll
) {
2512 reg
= DLL_PHY_CTRL_DLL_RST_N
;
2514 reg
|= DLL_PHY_CTRL_EXTENDED_WR_MODE
;
2516 reg
|= DLL_PHY_CTRL_EXTENDED_RD_MODE
;
2518 reg
|= FIELD_PREP(DLL_PHY_CTRL_RS_HIGH_WAIT_CNT
, 7);
2519 reg
|= FIELD_PREP(DLL_PHY_CTRL_RS_IDLE_CNT
, 7);
2520 t
->dll_phy_ctrl
= reg
;
2521 dev_dbg(cdns_ctrl
->dev
, "DLL_PHY_CTRL_SDR\t%x\n", reg
);
2524 /* Sampling point calculation. */
2525 if ((tdvw_max
% dqs_sampl_res
) > 0)
2526 sampling_point
= tdvw_max
/ dqs_sampl_res
;
2528 sampling_point
= (tdvw_max
/ dqs_sampl_res
- 1);
2530 if (sampling_point
* dqs_sampl_res
> tdvw_min
) {
2531 dll_phy_dqs_timing
=
2532 FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_END
, 4);
2533 dll_phy_dqs_timing
|= PHY_DQS_TIMING_USE_PHONY_DQS
;
2534 phony_dqs_timing
= sampling_point
/ phony_dqs_mod
;
2536 if ((sampling_point
% 2) > 0) {
2537 dll_phy_dqs_timing
|= PHY_DQS_TIMING_PHONY_DQS_SEL
;
2538 if ((tdvw_max
% dqs_sampl_res
) == 0)
2540 * Calculation for sampling point at the edge
2541 * of data and being odd number.
2543 phony_dqs_timing
= (tdvw_max
/ dqs_sampl_res
)
2544 / phony_dqs_mod
- 1;
2546 if (!cdns_ctrl
->caps2
.is_phy_type_dll
)
2552 rd_del_sel
= phony_dqs_timing
+ 3;
2554 dev_warn(cdns_ctrl
->dev
,
2555 "ERROR : cannot find valid sampling point\n");
2558 reg
= FIELD_PREP(PHY_CTRL_PHONY_DQS
, phony_dqs_timing
);
2559 if (cdns_ctrl
->caps2
.is_phy_type_dll
)
2560 reg
|= PHY_CTRL_SDR_DQS
;
2562 dev_dbg(cdns_ctrl
->dev
, "PHY_CTRL_REG_SDR\t%x\n", reg
);
2564 if (cdns_ctrl
->caps2
.is_phy_type_dll
) {
2565 dev_dbg(cdns_ctrl
->dev
, "PHY_TSEL_REG_SDR\t%x\n", 0);
2566 dev_dbg(cdns_ctrl
->dev
, "PHY_DQ_TIMING_REG_SDR\t%x\n", 2);
2567 dev_dbg(cdns_ctrl
->dev
, "PHY_DQS_TIMING_REG_SDR\t%x\n",
2568 dll_phy_dqs_timing
);
2569 t
->phy_dqs_timing
= dll_phy_dqs_timing
;
2571 reg
= FIELD_PREP(PHY_GATE_LPBK_CTRL_RDS
, rd_del_sel
);
2572 dev_dbg(cdns_ctrl
->dev
, "PHY_GATE_LPBK_CTRL_REG_SDR\t%x\n",
2574 t
->phy_gate_lpbk_ctrl
= reg
;
2576 dev_dbg(cdns_ctrl
->dev
, "PHY_DLL_MASTER_CTRL_REG_SDR\t%lx\n",
2577 PHY_DLL_MASTER_CTRL_BYPASS_MODE
);
2578 dev_dbg(cdns_ctrl
->dev
, "PHY_DLL_SLAVE_CTRL_REG_SDR\t%x\n", 0);
2584 int cadence_nand_attach_chip(struct nand_chip
*chip
)
2586 struct cdns_nand_ctrl
*cdns_ctrl
= to_cdns_nand_ctrl(chip
->controller
);
2587 struct cdns_nand_chip
*cdns_chip
= to_cdns_nand_chip(chip
);
2588 u32 ecc_size
= cdns_chip
->sector_count
* chip
->ecc
.bytes
;
2589 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2590 u32 max_oob_data_size
;
2593 if (chip
->options
& NAND_BUSWIDTH_16
) {
2594 ret
= cadence_nand_set_access_width16(cdns_ctrl
, true);
2599 chip
->bbt_options
|= NAND_BBT_USE_FLASH
;
2600 chip
->bbt_options
|= NAND_BBT_NO_OOB
;
2601 chip
->ecc
.mode
= NAND_ECC_HW
;
2603 chip
->options
|= NAND_NO_SUBPAGE_WRITE
;
2605 cdns_chip
->bbm_offs
= chip
->badblockpos
;
2606 if (chip
->options
& NAND_BUSWIDTH_16
) {
2607 cdns_chip
->bbm_offs
&= ~0x01;
2608 cdns_chip
->bbm_len
= 2;
2610 cdns_chip
->bbm_len
= 1;
2613 ret
= nand_ecc_choose_conf(chip
,
2614 &cdns_ctrl
->ecc_caps
,
2615 mtd
->oobsize
- cdns_chip
->bbm_len
);
2617 dev_err(cdns_ctrl
->dev
, "ECC configuration failed\n");
2621 dev_dbg(cdns_ctrl
->dev
,
2622 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
2623 chip
->ecc
.size
, chip
->ecc
.strength
, chip
->ecc
.bytes
);
2625 /* Error correction configuration. */
2626 cdns_chip
->sector_size
= chip
->ecc
.size
;
2627 cdns_chip
->sector_count
= mtd
->writesize
/ cdns_chip
->sector_size
;
2629 cdns_chip
->avail_oob_size
= mtd
->oobsize
- ecc_size
;
2631 max_oob_data_size
= MAX_OOB_SIZE_PER_SECTOR
;
2633 if (cdns_chip
->avail_oob_size
> max_oob_data_size
)
2634 cdns_chip
->avail_oob_size
= max_oob_data_size
;
2636 if ((cdns_chip
->avail_oob_size
+ cdns_chip
->bbm_len
+ ecc_size
)
2638 cdns_chip
->avail_oob_size
-= 4;
2640 ret
= cadence_nand_get_ecc_strength_idx(cdns_ctrl
, chip
->ecc
.strength
);
2644 cdns_chip
->corr_str_idx
= (u8
)ret
;
2646 if (cadence_nand_wait_for_value(cdns_ctrl
, CTRL_STATUS
,
2648 CTRL_STATUS_CTRL_BUSY
, true))
2651 cadence_nand_set_ecc_strength(cdns_ctrl
,
2652 cdns_chip
->corr_str_idx
);
2654 cadence_nand_set_erase_detection(cdns_ctrl
, true,
2655 chip
->ecc
.strength
);
2657 /* Override the default read operations. */
2658 chip
->ecc
.read_page
= cadence_nand_read_page
;
2659 chip
->ecc
.read_page_raw
= cadence_nand_read_page_raw
;
2660 chip
->ecc
.write_page
= cadence_nand_write_page
;
2661 chip
->ecc
.write_page_raw
= cadence_nand_write_page_raw
;
2662 chip
->ecc
.read_oob
= cadence_nand_read_oob
;
2663 chip
->ecc
.write_oob
= cadence_nand_write_oob
;
2664 chip
->ecc
.read_oob_raw
= cadence_nand_read_oob_raw
;
2665 chip
->ecc
.write_oob_raw
= cadence_nand_write_oob_raw
;
2667 if ((mtd
->writesize
+ mtd
->oobsize
) > cdns_ctrl
->buf_size
)
2668 cdns_ctrl
->buf_size
= mtd
->writesize
+ mtd
->oobsize
;
2670 /* Is 32-bit DMA supported? */
2671 ret
= dma_set_mask(cdns_ctrl
->dev
, DMA_BIT_MASK(32));
2673 dev_err(cdns_ctrl
->dev
, "no usable DMA configuration\n");
2677 mtd_set_ooblayout(mtd
, &cadence_nand_ooblayout_ops
);
2682 static const struct nand_controller_ops cadence_nand_controller_ops
= {
2683 .attach_chip
= cadence_nand_attach_chip
,
2684 .exec_op
= cadence_nand_exec_op
,
2685 .setup_data_interface
= cadence_nand_setup_data_interface
,
2688 static int cadence_nand_chip_init(struct cdns_nand_ctrl
*cdns_ctrl
,
2689 struct device_node
*np
)
2691 struct cdns_nand_chip
*cdns_chip
;
2692 struct mtd_info
*mtd
;
2693 struct nand_chip
*chip
;
2697 nsels
= of_property_count_elems_of_size(np
, "reg", sizeof(u32
));
2699 dev_err(cdns_ctrl
->dev
, "missing/invalid reg property\n");
2703 /* Allocate the nand chip structure. */
2704 cdns_chip
= devm_kzalloc(cdns_ctrl
->dev
, sizeof(*cdns_chip
) +
2705 (nsels
* sizeof(u8
)),
2708 dev_err(cdns_ctrl
->dev
, "could not allocate chip structure\n");
2712 cdns_chip
->nsels
= nsels
;
2714 for (i
= 0; i
< nsels
; i
++) {
2715 /* Retrieve CS id. */
2716 ret
= of_property_read_u32_index(np
, "reg", i
, &cs
);
2718 dev_err(cdns_ctrl
->dev
,
2719 "could not retrieve reg property: %d\n",
2724 if (cs
>= cdns_ctrl
->caps2
.max_banks
) {
2725 dev_err(cdns_ctrl
->dev
,
2726 "invalid reg value: %u (max CS = %d)\n",
2727 cs
, cdns_ctrl
->caps2
.max_banks
);
2731 if (test_and_set_bit(cs
, &cdns_ctrl
->assigned_cs
)) {
2732 dev_err(cdns_ctrl
->dev
,
2733 "CS %d already assigned\n", cs
);
2737 cdns_chip
->cs
[i
] = cs
;
2740 chip
= &cdns_chip
->chip
;
2741 chip
->controller
= &cdns_ctrl
->controller
;
2742 nand_set_flash_node(chip
, np
);
2744 mtd
= nand_to_mtd(chip
);
2745 mtd
->dev
.parent
= cdns_ctrl
->dev
;
2748 * Default to HW ECC engine mode. If the nand-ecc-mode property is given
2749 * in the DT node, this entry will be overwritten in nand_scan_ident().
2751 chip
->ecc
.mode
= NAND_ECC_HW
;
2753 ret
= nand_scan(chip
, cdns_chip
->nsels
);
2755 dev_err(cdns_ctrl
->dev
, "could not scan the nand chip\n");
2759 ret
= mtd_device_register(mtd
, NULL
, 0);
2761 dev_err(cdns_ctrl
->dev
,
2762 "failed to register mtd device: %d\n", ret
);
2767 list_add_tail(&cdns_chip
->node
, &cdns_ctrl
->chips
);
2772 static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl
*cdns_ctrl
)
2774 struct cdns_nand_chip
*entry
, *temp
;
2776 list_for_each_entry_safe(entry
, temp
, &cdns_ctrl
->chips
, node
) {
2777 nand_release(&entry
->chip
);
2778 list_del(&entry
->node
);
2782 static int cadence_nand_chips_init(struct cdns_nand_ctrl
*cdns_ctrl
)
2784 struct device_node
*np
= cdns_ctrl
->dev
->of_node
;
2785 struct device_node
*nand_np
;
2786 int max_cs
= cdns_ctrl
->caps2
.max_banks
;
2789 nchips
= of_get_child_count(np
);
2791 if (nchips
> max_cs
) {
2792 dev_err(cdns_ctrl
->dev
,
2793 "too many NAND chips: %d (max = %d CS)\n",
2798 for_each_child_of_node(np
, nand_np
) {
2799 ret
= cadence_nand_chip_init(cdns_ctrl
, nand_np
);
2801 of_node_put(nand_np
);
2802 cadence_nand_chips_cleanup(cdns_ctrl
);
2811 cadence_nand_irq_cleanup(int irqnum
, struct cdns_nand_ctrl
*cdns_ctrl
)
2813 /* Disable interrupts. */
2814 writel_relaxed(INTR_ENABLE_INTR_EN
, cdns_ctrl
->reg
+ INTR_ENABLE
);
2817 static int cadence_nand_init(struct cdns_nand_ctrl
*cdns_ctrl
)
2819 dma_cap_mask_t mask
;
2822 cdns_ctrl
->cdma_desc
= dma_alloc_coherent(cdns_ctrl
->dev
,
2823 sizeof(*cdns_ctrl
->cdma_desc
),
2824 &cdns_ctrl
->dma_cdma_desc
,
2826 if (!cdns_ctrl
->dma_cdma_desc
)
2829 cdns_ctrl
->buf_size
= SZ_16K
;
2830 cdns_ctrl
->buf
= kmalloc(cdns_ctrl
->buf_size
, GFP_KERNEL
);
2831 if (!cdns_ctrl
->buf
) {
2836 if (devm_request_irq(cdns_ctrl
->dev
, cdns_ctrl
->irq
, cadence_nand_isr
,
2837 IRQF_SHARED
, "cadence-nand-controller",
2839 dev_err(cdns_ctrl
->dev
, "Unable to allocate IRQ\n");
2844 spin_lock_init(&cdns_ctrl
->irq_lock
);
2845 init_completion(&cdns_ctrl
->complete
);
2847 ret
= cadence_nand_hw_init(cdns_ctrl
);
2852 dma_cap_set(DMA_MEMCPY
, mask
);
2854 if (cdns_ctrl
->caps1
->has_dma
) {
2855 cdns_ctrl
->dmac
= dma_request_channel(mask
, NULL
, NULL
);
2856 if (!cdns_ctrl
->dmac
) {
2857 dev_err(cdns_ctrl
->dev
,
2858 "Unable to get a DMA channel\n");
2864 nand_controller_init(&cdns_ctrl
->controller
);
2865 INIT_LIST_HEAD(&cdns_ctrl
->chips
);
2867 cdns_ctrl
->controller
.ops
= &cadence_nand_controller_ops
;
2868 cdns_ctrl
->curr_corr_str_idx
= 0xFF;
2870 ret
= cadence_nand_chips_init(cdns_ctrl
);
2872 dev_err(cdns_ctrl
->dev
, "Failed to register MTD: %d\n",
2874 goto dma_release_chnl
;
2877 kfree(cdns_ctrl
->buf
);
2878 cdns_ctrl
->buf
= kzalloc(cdns_ctrl
->buf_size
, GFP_KERNEL
);
2879 if (!cdns_ctrl
->buf
) {
2881 goto dma_release_chnl
;
2887 if (cdns_ctrl
->dmac
)
2888 dma_release_channel(cdns_ctrl
->dmac
);
2891 cadence_nand_irq_cleanup(cdns_ctrl
->irq
, cdns_ctrl
);
2894 kfree(cdns_ctrl
->buf
);
2897 dma_free_coherent(cdns_ctrl
->dev
, sizeof(struct cadence_nand_cdma_desc
),
2898 cdns_ctrl
->cdma_desc
, cdns_ctrl
->dma_cdma_desc
);
2903 /* Driver exit point. */
2904 static void cadence_nand_remove(struct cdns_nand_ctrl
*cdns_ctrl
)
2906 cadence_nand_chips_cleanup(cdns_ctrl
);
2907 cadence_nand_irq_cleanup(cdns_ctrl
->irq
, cdns_ctrl
);
2908 kfree(cdns_ctrl
->buf
);
2909 dma_free_coherent(cdns_ctrl
->dev
, sizeof(struct cadence_nand_cdma_desc
),
2910 cdns_ctrl
->cdma_desc
, cdns_ctrl
->dma_cdma_desc
);
2912 if (cdns_ctrl
->dmac
)
2913 dma_release_channel(cdns_ctrl
->dmac
);
2916 struct cadence_nand_dt
{
2917 struct cdns_nand_ctrl cdns_ctrl
;
2921 static const struct cadence_nand_dt_devdata cadence_nand_default
= {
2926 static const struct of_device_id cadence_nand_dt_ids
[] = {
2928 .compatible
= "cdns,hp-nfc",
2929 .data
= &cadence_nand_default
2933 MODULE_DEVICE_TABLE(of
, cadence_nand_dt_ids
);
2935 static int cadence_nand_dt_probe(struct platform_device
*ofdev
)
2937 struct resource
*res
;
2938 struct cadence_nand_dt
*dt
;
2939 struct cdns_nand_ctrl
*cdns_ctrl
;
2941 const struct of_device_id
*of_id
;
2942 const struct cadence_nand_dt_devdata
*devdata
;
2945 of_id
= of_match_device(cadence_nand_dt_ids
, &ofdev
->dev
);
2947 ofdev
->id_entry
= of_id
->data
;
2948 devdata
= of_id
->data
;
2950 pr_err("Failed to find the right device id.\n");
2954 dt
= devm_kzalloc(&ofdev
->dev
, sizeof(*dt
), GFP_KERNEL
);
2958 cdns_ctrl
= &dt
->cdns_ctrl
;
2959 cdns_ctrl
->caps1
= devdata
;
2961 cdns_ctrl
->dev
= &ofdev
->dev
;
2962 cdns_ctrl
->irq
= platform_get_irq(ofdev
, 0);
2963 if (cdns_ctrl
->irq
< 0)
2964 return cdns_ctrl
->irq
;
2966 dev_info(cdns_ctrl
->dev
, "IRQ: nr %d\n", cdns_ctrl
->irq
);
2968 cdns_ctrl
->reg
= devm_platform_ioremap_resource(ofdev
, 0);
2969 if (IS_ERR(cdns_ctrl
->reg
)) {
2970 dev_err(&ofdev
->dev
, "devm_ioremap_resource res 0 failed\n");
2971 return PTR_ERR(cdns_ctrl
->reg
);
2974 res
= platform_get_resource(ofdev
, IORESOURCE_MEM
, 1);
2975 cdns_ctrl
->io
.dma
= res
->start
;
2976 cdns_ctrl
->io
.virt
= devm_ioremap_resource(&ofdev
->dev
, res
);
2977 if (IS_ERR(cdns_ctrl
->io
.virt
)) {
2978 dev_err(cdns_ctrl
->dev
, "devm_ioremap_resource res 1 failed\n");
2979 return PTR_ERR(cdns_ctrl
->io
.virt
);
2982 dt
->clk
= devm_clk_get(cdns_ctrl
->dev
, "nf_clk");
2983 if (IS_ERR(dt
->clk
))
2984 return PTR_ERR(dt
->clk
);
2986 cdns_ctrl
->nf_clk_rate
= clk_get_rate(dt
->clk
);
2988 ret
= of_property_read_u32(ofdev
->dev
.of_node
,
2989 "cdns,board-delay-ps", &val
);
2992 dev_info(cdns_ctrl
->dev
,
2993 "missing cdns,board-delay-ps property, %d was set\n",
2996 cdns_ctrl
->board_delay
= val
;
2998 ret
= cadence_nand_init(cdns_ctrl
);
3002 platform_set_drvdata(ofdev
, dt
);
3006 static int cadence_nand_dt_remove(struct platform_device
*ofdev
)
3008 struct cadence_nand_dt
*dt
= platform_get_drvdata(ofdev
);
3010 cadence_nand_remove(&dt
->cdns_ctrl
);
3015 static struct platform_driver cadence_nand_dt_driver
= {
3016 .probe
= cadence_nand_dt_probe
,
3017 .remove
= cadence_nand_dt_remove
,
3019 .name
= "cadence-nand-controller",
3020 .of_match_table
= cadence_nand_dt_ids
,
3024 module_platform_driver(cadence_nand_dt_driver
);
3026 MODULE_AUTHOR("Piotr Sroka <piotrs@cadence.com>");
3027 MODULE_LICENSE("GPL v2");
3028 MODULE_DESCRIPTION("Driver for Cadence NAND flash controller");