2 * Copyright © 2010-2015 Broadcom Corporation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/clk.h>
15 #include <linux/version.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/platform_device.h>
21 #include <linux/err.h>
22 #include <linux/completion.h>
23 #include <linux/interrupt.h>
24 #include <linux/spinlock.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/ioport.h>
27 #include <linux/bug.h>
28 #include <linux/kernel.h>
29 #include <linux/bitops.h>
31 #include <linux/mtd/mtd.h>
32 #include <linux/mtd/nand.h>
33 #include <linux/mtd/partitions.h>
35 #include <linux/of_platform.h>
36 #include <linux/slab.h>
37 #include <linux/list.h>
38 #include <linux/log2.h>
43 * This flag controls if WP stays on between erase/write commands to mitigate
44 * flash corruption due to power glitches. Values:
45 * 0: NAND_WP is not used or not available
46 * 1: NAND_WP is set by default, cleared for erase/write operations
47 * 2: NAND_WP is always cleared
50 module_param(wp_on
, int, 0444);
52 /***********************************************************************
54 ***********************************************************************/
56 #define DRV_NAME "brcmnand"
59 #define CMD_PAGE_READ 0x01
60 #define CMD_SPARE_AREA_READ 0x02
61 #define CMD_STATUS_READ 0x03
62 #define CMD_PROGRAM_PAGE 0x04
63 #define CMD_PROGRAM_SPARE_AREA 0x05
64 #define CMD_COPY_BACK 0x06
65 #define CMD_DEVICE_ID_READ 0x07
66 #define CMD_BLOCK_ERASE 0x08
67 #define CMD_FLASH_RESET 0x09
68 #define CMD_BLOCKS_LOCK 0x0a
69 #define CMD_BLOCKS_LOCK_DOWN 0x0b
70 #define CMD_BLOCKS_UNLOCK 0x0c
71 #define CMD_READ_BLOCKS_LOCK_STATUS 0x0d
72 #define CMD_PARAMETER_READ 0x0e
73 #define CMD_PARAMETER_CHANGE_COL 0x0f
74 #define CMD_LOW_LEVEL_OP 0x10
76 struct brcm_nand_dma_desc
{
91 /* Bitfields for brcm_nand_dma_desc::status_valid */
92 #define FLASH_DMA_ECC_ERROR (1 << 8)
93 #define FLASH_DMA_CORR_ERROR (1 << 9)
95 /* 512B flash cache in the NAND controller HW */
98 #define FC_WORDS (FC_BYTES >> 2)
100 #define BRCMNAND_MIN_PAGESIZE 512
101 #define BRCMNAND_MIN_BLOCKSIZE (8 * 1024)
102 #define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
104 #define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY)
105 #define NAND_POLL_STATUS_TIMEOUT_MS 100
107 /* Controller feature flags */
109 BRCMNAND_HAS_1K_SECTORS
= BIT(0),
110 BRCMNAND_HAS_PREFETCH
= BIT(1),
111 BRCMNAND_HAS_CACHE_MODE
= BIT(2),
112 BRCMNAND_HAS_WP
= BIT(3),
115 struct brcmnand_controller
{
117 struct nand_hw_control controller
;
118 void __iomem
*nand_base
;
119 void __iomem
*nand_fc
; /* flash cache */
120 void __iomem
*flash_dma_base
;
122 unsigned int dma_irq
;
125 /* Some SoCs provide custom interrupt status register(s) */
126 struct brcmnand_soc
*soc
;
128 /* Some SoCs have a gateable clock for the controller */
133 struct completion done
;
134 struct completion dma_done
;
136 /* List of NAND hosts (one for each chip-select) */
137 struct list_head host_list
;
139 struct brcm_nand_dma_desc
*dma_desc
;
142 /* in-memory cache of the FLASH_CACHE, used only for some commands */
143 u8 flash_cache
[FC_BYTES
];
145 /* Controller revision details */
146 const u16
*reg_offsets
;
147 unsigned int reg_spacing
; /* between CS1, CS2, ... regs */
148 const u8
*cs_offsets
; /* within each chip-select */
149 const u8
*cs0_offsets
; /* within CS0, if different */
150 unsigned int max_block_size
;
151 const unsigned int *block_sizes
;
152 unsigned int max_page_size
;
153 const unsigned int *page_sizes
;
154 unsigned int max_oob
;
157 /* for low-power standby/resume only */
158 u32 nand_cs_nand_select
;
159 u32 nand_cs_nand_xor
;
160 u32 corr_stat_threshold
;
164 struct brcmnand_cfg
{
166 unsigned int block_size
;
167 unsigned int page_size
;
168 unsigned int spare_area_size
;
169 unsigned int device_width
;
170 unsigned int col_adr_bytes
;
171 unsigned int blk_adr_bytes
;
172 unsigned int ful_adr_bytes
;
173 unsigned int sector_size_1k
;
174 unsigned int ecc_level
;
175 /* use for low-power standby/resume only */
183 struct brcmnand_host
{
184 struct list_head node
;
186 struct nand_chip chip
;
187 struct platform_device
*pdev
;
190 unsigned int last_cmd
;
191 unsigned int last_byte
;
193 struct brcmnand_cfg hwcfg
;
194 struct brcmnand_controller
*ctrl
;
198 BRCMNAND_CMD_START
= 0,
199 BRCMNAND_CMD_EXT_ADDRESS
,
200 BRCMNAND_CMD_ADDRESS
,
201 BRCMNAND_INTFC_STATUS
,
206 BRCMNAND_CS1_BASE
, /* CS1 regs, if non-contiguous */
207 BRCMNAND_CORR_THRESHOLD
,
208 BRCMNAND_CORR_THRESHOLD_EXT
,
209 BRCMNAND_UNCORR_COUNT
,
211 BRCMNAND_CORR_EXT_ADDR
,
213 BRCMNAND_UNCORR_EXT_ADDR
,
214 BRCMNAND_UNCORR_ADDR
,
219 BRCMNAND_OOB_READ_BASE
,
220 BRCMNAND_OOB_READ_10_BASE
, /* offset 0x10, if non-contiguous */
221 BRCMNAND_OOB_WRITE_BASE
,
222 BRCMNAND_OOB_WRITE_10_BASE
, /* offset 0x10, if non-contiguous */
227 static const u16 brcmnand_regs_v40
[] = {
228 [BRCMNAND_CMD_START
] = 0x04,
229 [BRCMNAND_CMD_EXT_ADDRESS
] = 0x08,
230 [BRCMNAND_CMD_ADDRESS
] = 0x0c,
231 [BRCMNAND_INTFC_STATUS
] = 0x6c,
232 [BRCMNAND_CS_SELECT
] = 0x14,
233 [BRCMNAND_CS_XOR
] = 0x18,
234 [BRCMNAND_LL_OP
] = 0x178,
235 [BRCMNAND_CS0_BASE
] = 0x40,
236 [BRCMNAND_CS1_BASE
] = 0xd0,
237 [BRCMNAND_CORR_THRESHOLD
] = 0x84,
238 [BRCMNAND_CORR_THRESHOLD_EXT
] = 0,
239 [BRCMNAND_UNCORR_COUNT
] = 0,
240 [BRCMNAND_CORR_COUNT
] = 0,
241 [BRCMNAND_CORR_EXT_ADDR
] = 0x70,
242 [BRCMNAND_CORR_ADDR
] = 0x74,
243 [BRCMNAND_UNCORR_EXT_ADDR
] = 0x78,
244 [BRCMNAND_UNCORR_ADDR
] = 0x7c,
245 [BRCMNAND_SEMAPHORE
] = 0x58,
246 [BRCMNAND_ID
] = 0x60,
247 [BRCMNAND_ID_EXT
] = 0x64,
248 [BRCMNAND_LL_RDATA
] = 0x17c,
249 [BRCMNAND_OOB_READ_BASE
] = 0x20,
250 [BRCMNAND_OOB_READ_10_BASE
] = 0x130,
251 [BRCMNAND_OOB_WRITE_BASE
] = 0x30,
252 [BRCMNAND_OOB_WRITE_10_BASE
] = 0,
253 [BRCMNAND_FC_BASE
] = 0x200,
257 static const u16 brcmnand_regs_v50
[] = {
258 [BRCMNAND_CMD_START
] = 0x04,
259 [BRCMNAND_CMD_EXT_ADDRESS
] = 0x08,
260 [BRCMNAND_CMD_ADDRESS
] = 0x0c,
261 [BRCMNAND_INTFC_STATUS
] = 0x6c,
262 [BRCMNAND_CS_SELECT
] = 0x14,
263 [BRCMNAND_CS_XOR
] = 0x18,
264 [BRCMNAND_LL_OP
] = 0x178,
265 [BRCMNAND_CS0_BASE
] = 0x40,
266 [BRCMNAND_CS1_BASE
] = 0xd0,
267 [BRCMNAND_CORR_THRESHOLD
] = 0x84,
268 [BRCMNAND_CORR_THRESHOLD_EXT
] = 0,
269 [BRCMNAND_UNCORR_COUNT
] = 0,
270 [BRCMNAND_CORR_COUNT
] = 0,
271 [BRCMNAND_CORR_EXT_ADDR
] = 0x70,
272 [BRCMNAND_CORR_ADDR
] = 0x74,
273 [BRCMNAND_UNCORR_EXT_ADDR
] = 0x78,
274 [BRCMNAND_UNCORR_ADDR
] = 0x7c,
275 [BRCMNAND_SEMAPHORE
] = 0x58,
276 [BRCMNAND_ID
] = 0x60,
277 [BRCMNAND_ID_EXT
] = 0x64,
278 [BRCMNAND_LL_RDATA
] = 0x17c,
279 [BRCMNAND_OOB_READ_BASE
] = 0x20,
280 [BRCMNAND_OOB_READ_10_BASE
] = 0x130,
281 [BRCMNAND_OOB_WRITE_BASE
] = 0x30,
282 [BRCMNAND_OOB_WRITE_10_BASE
] = 0x140,
283 [BRCMNAND_FC_BASE
] = 0x200,
286 /* BRCMNAND v6.0 - v7.1 */
287 static const u16 brcmnand_regs_v60
[] = {
288 [BRCMNAND_CMD_START
] = 0x04,
289 [BRCMNAND_CMD_EXT_ADDRESS
] = 0x08,
290 [BRCMNAND_CMD_ADDRESS
] = 0x0c,
291 [BRCMNAND_INTFC_STATUS
] = 0x14,
292 [BRCMNAND_CS_SELECT
] = 0x18,
293 [BRCMNAND_CS_XOR
] = 0x1c,
294 [BRCMNAND_LL_OP
] = 0x20,
295 [BRCMNAND_CS0_BASE
] = 0x50,
296 [BRCMNAND_CS1_BASE
] = 0,
297 [BRCMNAND_CORR_THRESHOLD
] = 0xc0,
298 [BRCMNAND_CORR_THRESHOLD_EXT
] = 0xc4,
299 [BRCMNAND_UNCORR_COUNT
] = 0xfc,
300 [BRCMNAND_CORR_COUNT
] = 0x100,
301 [BRCMNAND_CORR_EXT_ADDR
] = 0x10c,
302 [BRCMNAND_CORR_ADDR
] = 0x110,
303 [BRCMNAND_UNCORR_EXT_ADDR
] = 0x114,
304 [BRCMNAND_UNCORR_ADDR
] = 0x118,
305 [BRCMNAND_SEMAPHORE
] = 0x150,
306 [BRCMNAND_ID
] = 0x194,
307 [BRCMNAND_ID_EXT
] = 0x198,
308 [BRCMNAND_LL_RDATA
] = 0x19c,
309 [BRCMNAND_OOB_READ_BASE
] = 0x200,
310 [BRCMNAND_OOB_READ_10_BASE
] = 0,
311 [BRCMNAND_OOB_WRITE_BASE
] = 0x280,
312 [BRCMNAND_OOB_WRITE_10_BASE
] = 0,
313 [BRCMNAND_FC_BASE
] = 0x400,
317 static const u16 brcmnand_regs_v71
[] = {
318 [BRCMNAND_CMD_START
] = 0x04,
319 [BRCMNAND_CMD_EXT_ADDRESS
] = 0x08,
320 [BRCMNAND_CMD_ADDRESS
] = 0x0c,
321 [BRCMNAND_INTFC_STATUS
] = 0x14,
322 [BRCMNAND_CS_SELECT
] = 0x18,
323 [BRCMNAND_CS_XOR
] = 0x1c,
324 [BRCMNAND_LL_OP
] = 0x20,
325 [BRCMNAND_CS0_BASE
] = 0x50,
326 [BRCMNAND_CS1_BASE
] = 0,
327 [BRCMNAND_CORR_THRESHOLD
] = 0xdc,
328 [BRCMNAND_CORR_THRESHOLD_EXT
] = 0xe0,
329 [BRCMNAND_UNCORR_COUNT
] = 0xfc,
330 [BRCMNAND_CORR_COUNT
] = 0x100,
331 [BRCMNAND_CORR_EXT_ADDR
] = 0x10c,
332 [BRCMNAND_CORR_ADDR
] = 0x110,
333 [BRCMNAND_UNCORR_EXT_ADDR
] = 0x114,
334 [BRCMNAND_UNCORR_ADDR
] = 0x118,
335 [BRCMNAND_SEMAPHORE
] = 0x150,
336 [BRCMNAND_ID
] = 0x194,
337 [BRCMNAND_ID_EXT
] = 0x198,
338 [BRCMNAND_LL_RDATA
] = 0x19c,
339 [BRCMNAND_OOB_READ_BASE
] = 0x200,
340 [BRCMNAND_OOB_READ_10_BASE
] = 0,
341 [BRCMNAND_OOB_WRITE_BASE
] = 0x280,
342 [BRCMNAND_OOB_WRITE_10_BASE
] = 0,
343 [BRCMNAND_FC_BASE
] = 0x400,
347 static const u16 brcmnand_regs_v72
[] = {
348 [BRCMNAND_CMD_START
] = 0x04,
349 [BRCMNAND_CMD_EXT_ADDRESS
] = 0x08,
350 [BRCMNAND_CMD_ADDRESS
] = 0x0c,
351 [BRCMNAND_INTFC_STATUS
] = 0x14,
352 [BRCMNAND_CS_SELECT
] = 0x18,
353 [BRCMNAND_CS_XOR
] = 0x1c,
354 [BRCMNAND_LL_OP
] = 0x20,
355 [BRCMNAND_CS0_BASE
] = 0x50,
356 [BRCMNAND_CS1_BASE
] = 0,
357 [BRCMNAND_CORR_THRESHOLD
] = 0xdc,
358 [BRCMNAND_CORR_THRESHOLD_EXT
] = 0xe0,
359 [BRCMNAND_UNCORR_COUNT
] = 0xfc,
360 [BRCMNAND_CORR_COUNT
] = 0x100,
361 [BRCMNAND_CORR_EXT_ADDR
] = 0x10c,
362 [BRCMNAND_CORR_ADDR
] = 0x110,
363 [BRCMNAND_UNCORR_EXT_ADDR
] = 0x114,
364 [BRCMNAND_UNCORR_ADDR
] = 0x118,
365 [BRCMNAND_SEMAPHORE
] = 0x150,
366 [BRCMNAND_ID
] = 0x194,
367 [BRCMNAND_ID_EXT
] = 0x198,
368 [BRCMNAND_LL_RDATA
] = 0x19c,
369 [BRCMNAND_OOB_READ_BASE
] = 0x200,
370 [BRCMNAND_OOB_READ_10_BASE
] = 0,
371 [BRCMNAND_OOB_WRITE_BASE
] = 0x400,
372 [BRCMNAND_OOB_WRITE_10_BASE
] = 0,
373 [BRCMNAND_FC_BASE
] = 0x600,
376 enum brcmnand_cs_reg
{
377 BRCMNAND_CS_CFG_EXT
= 0,
379 BRCMNAND_CS_ACC_CONTROL
,
384 /* Per chip-select offsets for v7.1 */
385 static const u8 brcmnand_cs_offsets_v71
[] = {
386 [BRCMNAND_CS_ACC_CONTROL
] = 0x00,
387 [BRCMNAND_CS_CFG_EXT
] = 0x04,
388 [BRCMNAND_CS_CFG
] = 0x08,
389 [BRCMNAND_CS_TIMING1
] = 0x0c,
390 [BRCMNAND_CS_TIMING2
] = 0x10,
393 /* Per chip-select offsets for pre v7.1, except CS0 on <= v5.0 */
394 static const u8 brcmnand_cs_offsets
[] = {
395 [BRCMNAND_CS_ACC_CONTROL
] = 0x00,
396 [BRCMNAND_CS_CFG_EXT
] = 0x04,
397 [BRCMNAND_CS_CFG
] = 0x04,
398 [BRCMNAND_CS_TIMING1
] = 0x08,
399 [BRCMNAND_CS_TIMING2
] = 0x0c,
402 /* Per chip-select offset for <= v5.0 on CS0 only */
403 static const u8 brcmnand_cs_offsets_cs0
[] = {
404 [BRCMNAND_CS_ACC_CONTROL
] = 0x00,
405 [BRCMNAND_CS_CFG_EXT
] = 0x08,
406 [BRCMNAND_CS_CFG
] = 0x08,
407 [BRCMNAND_CS_TIMING1
] = 0x10,
408 [BRCMNAND_CS_TIMING2
] = 0x14,
412 * Bitfields for the CFG and CFG_EXT registers. Pre-v7.1 controllers only had
413 * one config register, but once the bitfields overflowed, newer controllers
414 * (v7.1 and newer) added a CFG_EXT register and shuffled a few fields around.
417 CFG_BLK_ADR_BYTES_SHIFT
= 8,
418 CFG_COL_ADR_BYTES_SHIFT
= 12,
419 CFG_FUL_ADR_BYTES_SHIFT
= 16,
420 CFG_BUS_WIDTH_SHIFT
= 23,
421 CFG_BUS_WIDTH
= BIT(CFG_BUS_WIDTH_SHIFT
),
422 CFG_DEVICE_SIZE_SHIFT
= 24,
424 /* Only for pre-v7.1 (with no CFG_EXT register) */
425 CFG_PAGE_SIZE_SHIFT
= 20,
426 CFG_BLK_SIZE_SHIFT
= 28,
428 /* Only for v7.1+ (with CFG_EXT register) */
429 CFG_EXT_PAGE_SIZE_SHIFT
= 0,
430 CFG_EXT_BLK_SIZE_SHIFT
= 4,
433 /* BRCMNAND_INTFC_STATUS */
435 INTFC_FLASH_STATUS
= GENMASK(7, 0),
437 INTFC_ERASED
= BIT(27),
438 INTFC_OOB_VALID
= BIT(28),
439 INTFC_CACHE_VALID
= BIT(29),
440 INTFC_FLASH_READY
= BIT(30),
441 INTFC_CTLR_READY
= BIT(31),
444 static inline u32
nand_readreg(struct brcmnand_controller
*ctrl
, u32 offs
)
446 return brcmnand_readl(ctrl
->nand_base
+ offs
);
449 static inline void nand_writereg(struct brcmnand_controller
*ctrl
, u32 offs
,
452 brcmnand_writel(val
, ctrl
->nand_base
+ offs
);
455 static int brcmnand_revision_init(struct brcmnand_controller
*ctrl
)
457 static const unsigned int block_sizes_v6
[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 };
458 static const unsigned int block_sizes_v4
[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 };
459 static const unsigned int page_sizes
[] = { 512, 2048, 4096, 8192, 0 };
461 ctrl
->nand_version
= nand_readreg(ctrl
, 0) & 0xffff;
463 /* Only support v4.0+? */
464 if (ctrl
->nand_version
< 0x0400) {
465 dev_err(ctrl
->dev
, "version %#x not supported\n",
470 /* Register offsets */
471 if (ctrl
->nand_version
>= 0x0702)
472 ctrl
->reg_offsets
= brcmnand_regs_v72
;
473 else if (ctrl
->nand_version
>= 0x0701)
474 ctrl
->reg_offsets
= brcmnand_regs_v71
;
475 else if (ctrl
->nand_version
>= 0x0600)
476 ctrl
->reg_offsets
= brcmnand_regs_v60
;
477 else if (ctrl
->nand_version
>= 0x0500)
478 ctrl
->reg_offsets
= brcmnand_regs_v50
;
479 else if (ctrl
->nand_version
>= 0x0400)
480 ctrl
->reg_offsets
= brcmnand_regs_v40
;
482 /* Chip-select stride */
483 if (ctrl
->nand_version
>= 0x0701)
484 ctrl
->reg_spacing
= 0x14;
486 ctrl
->reg_spacing
= 0x10;
488 /* Per chip-select registers */
489 if (ctrl
->nand_version
>= 0x0701) {
490 ctrl
->cs_offsets
= brcmnand_cs_offsets_v71
;
492 ctrl
->cs_offsets
= brcmnand_cs_offsets
;
494 /* v5.0 and earlier has a different CS0 offset layout */
495 if (ctrl
->nand_version
<= 0x0500)
496 ctrl
->cs0_offsets
= brcmnand_cs_offsets_cs0
;
499 /* Page / block sizes */
500 if (ctrl
->nand_version
>= 0x0701) {
501 /* >= v7.1 use nice power-of-2 values! */
502 ctrl
->max_page_size
= 16 * 1024;
503 ctrl
->max_block_size
= 2 * 1024 * 1024;
505 ctrl
->page_sizes
= page_sizes
;
506 if (ctrl
->nand_version
>= 0x0600)
507 ctrl
->block_sizes
= block_sizes_v6
;
509 ctrl
->block_sizes
= block_sizes_v4
;
511 if (ctrl
->nand_version
< 0x0400) {
512 ctrl
->max_page_size
= 4096;
513 ctrl
->max_block_size
= 512 * 1024;
517 /* Maximum spare area sector size (per 512B) */
518 if (ctrl
->nand_version
>= 0x0702)
520 else if (ctrl
->nand_version
>= 0x0600)
522 else if (ctrl
->nand_version
>= 0x0500)
527 /* v6.0 and newer (except v6.1) have prefetch support */
528 if (ctrl
->nand_version
>= 0x0600 && ctrl
->nand_version
!= 0x0601)
529 ctrl
->features
|= BRCMNAND_HAS_PREFETCH
;
532 * v6.x has cache mode, but it's implemented differently. Ignore it for
535 if (ctrl
->nand_version
>= 0x0700)
536 ctrl
->features
|= BRCMNAND_HAS_CACHE_MODE
;
538 if (ctrl
->nand_version
>= 0x0500)
539 ctrl
->features
|= BRCMNAND_HAS_1K_SECTORS
;
541 if (ctrl
->nand_version
>= 0x0700)
542 ctrl
->features
|= BRCMNAND_HAS_WP
;
543 else if (of_property_read_bool(ctrl
->dev
->of_node
, "brcm,nand-has-wp"))
544 ctrl
->features
|= BRCMNAND_HAS_WP
;
549 static inline u32
brcmnand_read_reg(struct brcmnand_controller
*ctrl
,
550 enum brcmnand_reg reg
)
552 u16 offs
= ctrl
->reg_offsets
[reg
];
555 return nand_readreg(ctrl
, offs
);
560 static inline void brcmnand_write_reg(struct brcmnand_controller
*ctrl
,
561 enum brcmnand_reg reg
, u32 val
)
563 u16 offs
= ctrl
->reg_offsets
[reg
];
566 nand_writereg(ctrl
, offs
, val
);
569 static inline void brcmnand_rmw_reg(struct brcmnand_controller
*ctrl
,
570 enum brcmnand_reg reg
, u32 mask
, unsigned
573 u32 tmp
= brcmnand_read_reg(ctrl
, reg
);
577 brcmnand_write_reg(ctrl
, reg
, tmp
);
580 static inline u32
brcmnand_read_fc(struct brcmnand_controller
*ctrl
, int word
)
582 return __raw_readl(ctrl
->nand_fc
+ word
* 4);
585 static inline void brcmnand_write_fc(struct brcmnand_controller
*ctrl
,
588 __raw_writel(val
, ctrl
->nand_fc
+ word
* 4);
591 static inline u16
brcmnand_cs_offset(struct brcmnand_controller
*ctrl
, int cs
,
592 enum brcmnand_cs_reg reg
)
594 u16 offs_cs0
= ctrl
->reg_offsets
[BRCMNAND_CS0_BASE
];
595 u16 offs_cs1
= ctrl
->reg_offsets
[BRCMNAND_CS1_BASE
];
598 if (cs
== 0 && ctrl
->cs0_offsets
)
599 cs_offs
= ctrl
->cs0_offsets
[reg
];
601 cs_offs
= ctrl
->cs_offsets
[reg
];
604 return offs_cs1
+ (cs
- 1) * ctrl
->reg_spacing
+ cs_offs
;
606 return offs_cs0
+ cs
* ctrl
->reg_spacing
+ cs_offs
;
609 static inline u32
brcmnand_count_corrected(struct brcmnand_controller
*ctrl
)
611 if (ctrl
->nand_version
< 0x0600)
613 return brcmnand_read_reg(ctrl
, BRCMNAND_CORR_COUNT
);
616 static void brcmnand_wr_corr_thresh(struct brcmnand_host
*host
, u8 val
)
618 struct brcmnand_controller
*ctrl
= host
->ctrl
;
619 unsigned int shift
= 0, bits
;
620 enum brcmnand_reg reg
= BRCMNAND_CORR_THRESHOLD
;
623 if (ctrl
->nand_version
>= 0x0702)
625 else if (ctrl
->nand_version
>= 0x0600)
627 else if (ctrl
->nand_version
>= 0x0500)
632 if (ctrl
->nand_version
>= 0x0702) {
634 reg
= BRCMNAND_CORR_THRESHOLD_EXT
;
635 shift
= (cs
% 4) * bits
;
636 } else if (ctrl
->nand_version
>= 0x0600) {
638 reg
= BRCMNAND_CORR_THRESHOLD_EXT
;
639 shift
= (cs
% 5) * bits
;
641 brcmnand_rmw_reg(ctrl
, reg
, (bits
- 1) << shift
, shift
, val
);
644 static inline int brcmnand_cmd_shift(struct brcmnand_controller
*ctrl
)
646 if (ctrl
->nand_version
< 0x0602)
651 /***********************************************************************
652 * NAND ACC CONTROL bitfield
654 * Some bits have remained constant throughout hardware revision, while
655 * others have shifted around.
656 ***********************************************************************/
658 /* Constant for all versions (where supported) */
660 /* See BRCMNAND_HAS_CACHE_MODE */
661 ACC_CONTROL_CACHE_MODE
= BIT(22),
663 /* See BRCMNAND_HAS_PREFETCH */
664 ACC_CONTROL_PREFETCH
= BIT(23),
666 ACC_CONTROL_PAGE_HIT
= BIT(24),
667 ACC_CONTROL_WR_PREEMPT
= BIT(25),
668 ACC_CONTROL_PARTIAL_PAGE
= BIT(26),
669 ACC_CONTROL_RD_ERASED
= BIT(27),
670 ACC_CONTROL_FAST_PGM_RDIN
= BIT(28),
671 ACC_CONTROL_WR_ECC
= BIT(30),
672 ACC_CONTROL_RD_ECC
= BIT(31),
675 static inline u32
brcmnand_spare_area_mask(struct brcmnand_controller
*ctrl
)
677 if (ctrl
->nand_version
>= 0x0702)
678 return GENMASK(7, 0);
679 else if (ctrl
->nand_version
>= 0x0600)
680 return GENMASK(6, 0);
682 return GENMASK(5, 0);
685 #define NAND_ACC_CONTROL_ECC_SHIFT 16
686 #define NAND_ACC_CONTROL_ECC_EXT_SHIFT 13
688 static inline u32
brcmnand_ecc_level_mask(struct brcmnand_controller
*ctrl
)
690 u32 mask
= (ctrl
->nand_version
>= 0x0600) ? 0x1f : 0x0f;
692 mask
<<= NAND_ACC_CONTROL_ECC_SHIFT
;
694 /* v7.2 includes additional ECC levels */
695 if (ctrl
->nand_version
>= 0x0702)
696 mask
|= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT
;
701 static void brcmnand_set_ecc_enabled(struct brcmnand_host
*host
, int en
)
703 struct brcmnand_controller
*ctrl
= host
->ctrl
;
704 u16 offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_ACC_CONTROL
);
705 u32 acc_control
= nand_readreg(ctrl
, offs
);
706 u32 ecc_flags
= ACC_CONTROL_WR_ECC
| ACC_CONTROL_RD_ECC
;
709 acc_control
|= ecc_flags
; /* enable RD/WR ECC */
710 acc_control
|= host
->hwcfg
.ecc_level
711 << NAND_ACC_CONTROL_ECC_SHIFT
;
713 acc_control
&= ~ecc_flags
; /* disable RD/WR ECC */
714 acc_control
&= ~brcmnand_ecc_level_mask(ctrl
);
717 nand_writereg(ctrl
, offs
, acc_control
);
720 static inline int brcmnand_sector_1k_shift(struct brcmnand_controller
*ctrl
)
722 if (ctrl
->nand_version
>= 0x0702)
724 else if (ctrl
->nand_version
>= 0x0600)
726 else if (ctrl
->nand_version
>= 0x0500)
732 static int brcmnand_get_sector_size_1k(struct brcmnand_host
*host
)
734 struct brcmnand_controller
*ctrl
= host
->ctrl
;
735 int shift
= brcmnand_sector_1k_shift(ctrl
);
736 u16 acc_control_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
737 BRCMNAND_CS_ACC_CONTROL
);
742 return (nand_readreg(ctrl
, acc_control_offs
) >> shift
) & 0x1;
745 static void brcmnand_set_sector_size_1k(struct brcmnand_host
*host
, int val
)
747 struct brcmnand_controller
*ctrl
= host
->ctrl
;
748 int shift
= brcmnand_sector_1k_shift(ctrl
);
749 u16 acc_control_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
750 BRCMNAND_CS_ACC_CONTROL
);
756 tmp
= nand_readreg(ctrl
, acc_control_offs
);
757 tmp
&= ~(1 << shift
);
758 tmp
|= (!!val
) << shift
;
759 nand_writereg(ctrl
, acc_control_offs
, tmp
);
762 /***********************************************************************
764 ***********************************************************************/
767 CS_SELECT_NAND_WP
= BIT(29),
768 CS_SELECT_AUTO_DEVICE_ID_CFG
= BIT(30),
771 static int bcmnand_ctrl_poll_status(struct brcmnand_controller
*ctrl
,
772 u32 mask
, u32 expected_val
,
773 unsigned long timeout_ms
)
779 timeout_ms
= NAND_POLL_STATUS_TIMEOUT_MS
;
781 limit
= jiffies
+ msecs_to_jiffies(timeout_ms
);
783 val
= brcmnand_read_reg(ctrl
, BRCMNAND_INTFC_STATUS
);
784 if ((val
& mask
) == expected_val
)
788 } while (time_after(limit
, jiffies
));
790 dev_warn(ctrl
->dev
, "timeout on status poll (expected %x got %x)\n",
791 expected_val
, val
& mask
);
796 static inline void brcmnand_set_wp(struct brcmnand_controller
*ctrl
, bool en
)
798 u32 val
= en
? CS_SELECT_NAND_WP
: 0;
800 brcmnand_rmw_reg(ctrl
, BRCMNAND_CS_SELECT
, CS_SELECT_NAND_WP
, 0, val
);
803 /***********************************************************************
805 ***********************************************************************/
808 FLASH_DMA_REVISION
= 0x00,
809 FLASH_DMA_FIRST_DESC
= 0x04,
810 FLASH_DMA_FIRST_DESC_EXT
= 0x08,
811 FLASH_DMA_CTRL
= 0x0c,
812 FLASH_DMA_MODE
= 0x10,
813 FLASH_DMA_STATUS
= 0x14,
814 FLASH_DMA_INTERRUPT_DESC
= 0x18,
815 FLASH_DMA_INTERRUPT_DESC_EXT
= 0x1c,
816 FLASH_DMA_ERROR_STATUS
= 0x20,
817 FLASH_DMA_CURRENT_DESC
= 0x24,
818 FLASH_DMA_CURRENT_DESC_EXT
= 0x28,
821 static inline bool has_flash_dma(struct brcmnand_controller
*ctrl
)
823 return ctrl
->flash_dma_base
;
826 static inline bool flash_dma_buf_ok(const void *buf
)
828 return buf
&& !is_vmalloc_addr(buf
) &&
829 likely(IS_ALIGNED((uintptr_t)buf
, 4));
832 static inline void flash_dma_writel(struct brcmnand_controller
*ctrl
, u8 offs
,
835 brcmnand_writel(val
, ctrl
->flash_dma_base
+ offs
);
838 static inline u32
flash_dma_readl(struct brcmnand_controller
*ctrl
, u8 offs
)
840 return brcmnand_readl(ctrl
->flash_dma_base
+ offs
);
843 /* Low-level operation types: command, address, write, or read */
844 enum brcmnand_llop_type
{
851 /***********************************************************************
852 * Internal support functions
853 ***********************************************************************/
855 static inline bool is_hamming_ecc(struct brcmnand_controller
*ctrl
,
856 struct brcmnand_cfg
*cfg
)
858 if (ctrl
->nand_version
<= 0x0701)
859 return cfg
->sector_size_1k
== 0 && cfg
->spare_area_size
== 16 &&
860 cfg
->ecc_level
== 15;
862 return cfg
->sector_size_1k
== 0 && ((cfg
->spare_area_size
== 16 &&
863 cfg
->ecc_level
== 15) ||
864 (cfg
->spare_area_size
== 28 && cfg
->ecc_level
== 16));
868 * Set mtd->ooblayout to the appropriate mtd_ooblayout_ops given
869 * the layout/configuration.
870 * Returns -ERRCODE on failure.
872 static int brcmnand_hamming_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
873 struct mtd_oob_region
*oobregion
)
875 struct nand_chip
*chip
= mtd_to_nand(mtd
);
876 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
877 struct brcmnand_cfg
*cfg
= &host
->hwcfg
;
878 int sas
= cfg
->spare_area_size
<< cfg
->sector_size_1k
;
879 int sectors
= cfg
->page_size
/ (512 << cfg
->sector_size_1k
);
881 if (section
>= sectors
)
884 oobregion
->offset
= (section
* sas
) + 6;
885 oobregion
->length
= 3;
890 static int brcmnand_hamming_ooblayout_free(struct mtd_info
*mtd
, int section
,
891 struct mtd_oob_region
*oobregion
)
893 struct nand_chip
*chip
= mtd_to_nand(mtd
);
894 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
895 struct brcmnand_cfg
*cfg
= &host
->hwcfg
;
896 int sas
= cfg
->spare_area_size
<< cfg
->sector_size_1k
;
897 int sectors
= cfg
->page_size
/ (512 << cfg
->sector_size_1k
);
899 if (section
>= sectors
* 2)
902 oobregion
->offset
= (section
/ 2) * sas
;
905 oobregion
->offset
+= 9;
906 oobregion
->length
= 7;
908 oobregion
->length
= 6;
910 /* First sector of each page may have BBI */
913 * Small-page NAND use byte 6 for BBI while large-page
916 if (cfg
->page_size
> 512)
925 static const struct mtd_ooblayout_ops brcmnand_hamming_ooblayout_ops
= {
926 .ecc
= brcmnand_hamming_ooblayout_ecc
,
927 .free
= brcmnand_hamming_ooblayout_free
,
930 static int brcmnand_bch_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
931 struct mtd_oob_region
*oobregion
)
933 struct nand_chip
*chip
= mtd_to_nand(mtd
);
934 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
935 struct brcmnand_cfg
*cfg
= &host
->hwcfg
;
936 int sas
= cfg
->spare_area_size
<< cfg
->sector_size_1k
;
937 int sectors
= cfg
->page_size
/ (512 << cfg
->sector_size_1k
);
939 if (section
>= sectors
)
942 oobregion
->offset
= (section
* (sas
+ 1)) - chip
->ecc
.bytes
;
943 oobregion
->length
= chip
->ecc
.bytes
;
948 static int brcmnand_bch_ooblayout_free_lp(struct mtd_info
*mtd
, int section
,
949 struct mtd_oob_region
*oobregion
)
951 struct nand_chip
*chip
= mtd_to_nand(mtd
);
952 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
953 struct brcmnand_cfg
*cfg
= &host
->hwcfg
;
954 int sas
= cfg
->spare_area_size
<< cfg
->sector_size_1k
;
955 int sectors
= cfg
->page_size
/ (512 << cfg
->sector_size_1k
);
957 if (section
>= sectors
)
960 if (sas
<= chip
->ecc
.bytes
)
963 oobregion
->offset
= section
* sas
;
964 oobregion
->length
= sas
- chip
->ecc
.bytes
;
974 static int brcmnand_bch_ooblayout_free_sp(struct mtd_info
*mtd
, int section
,
975 struct mtd_oob_region
*oobregion
)
977 struct nand_chip
*chip
= mtd_to_nand(mtd
);
978 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
979 struct brcmnand_cfg
*cfg
= &host
->hwcfg
;
980 int sas
= cfg
->spare_area_size
<< cfg
->sector_size_1k
;
982 if (section
> 1 || sas
- chip
->ecc
.bytes
< 6 ||
983 (section
&& sas
- chip
->ecc
.bytes
== 6))
987 oobregion
->offset
= 0;
988 oobregion
->length
= 5;
990 oobregion
->offset
= 6;
991 oobregion
->length
= sas
- chip
->ecc
.bytes
- 6;
997 static const struct mtd_ooblayout_ops brcmnand_bch_lp_ooblayout_ops
= {
998 .ecc
= brcmnand_bch_ooblayout_ecc
,
999 .free
= brcmnand_bch_ooblayout_free_lp
,
1002 static const struct mtd_ooblayout_ops brcmnand_bch_sp_ooblayout_ops
= {
1003 .ecc
= brcmnand_bch_ooblayout_ecc
,
1004 .free
= brcmnand_bch_ooblayout_free_sp
,
1007 static int brcmstb_choose_ecc_layout(struct brcmnand_host
*host
)
1009 struct brcmnand_cfg
*p
= &host
->hwcfg
;
1010 struct mtd_info
*mtd
= nand_to_mtd(&host
->chip
);
1011 struct nand_ecc_ctrl
*ecc
= &host
->chip
.ecc
;
1012 unsigned int ecc_level
= p
->ecc_level
;
1013 int sas
= p
->spare_area_size
<< p
->sector_size_1k
;
1014 int sectors
= p
->page_size
/ (512 << p
->sector_size_1k
);
1016 if (p
->sector_size_1k
)
1019 if (is_hamming_ecc(host
->ctrl
, p
)) {
1020 ecc
->bytes
= 3 * sectors
;
1021 mtd_set_ooblayout(mtd
, &brcmnand_hamming_ooblayout_ops
);
1026 * CONTROLLER_VERSION:
1027 * < v5.0: ECC_REQ = ceil(BCH_T * 13/8)
1028 * >= v5.0: ECC_REQ = ceil(BCH_T * 14/8)
1029 * But we will just be conservative.
1031 ecc
->bytes
= DIV_ROUND_UP(ecc_level
* 14, 8);
1032 if (p
->page_size
== 512)
1033 mtd_set_ooblayout(mtd
, &brcmnand_bch_sp_ooblayout_ops
);
1035 mtd_set_ooblayout(mtd
, &brcmnand_bch_lp_ooblayout_ops
);
1037 if (ecc
->bytes
>= sas
) {
1038 dev_err(&host
->pdev
->dev
,
1039 "error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n",
1047 static void brcmnand_wp(struct mtd_info
*mtd
, int wp
)
1049 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1050 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1051 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1053 if ((ctrl
->features
& BRCMNAND_HAS_WP
) && wp_on
== 1) {
1054 static int old_wp
= -1;
1058 dev_dbg(ctrl
->dev
, "WP %s\n", wp
? "on" : "off");
1063 * make sure ctrl/flash ready before and after
1064 * changing state of #WP pin
1066 ret
= bcmnand_ctrl_poll_status(ctrl
, NAND_CTRL_RDY
|
1069 NAND_STATUS_READY
, 0);
1073 brcmnand_set_wp(ctrl
, wp
);
1074 chip
->cmdfunc(mtd
, NAND_CMD_STATUS
, -1, -1);
1075 /* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */
1076 ret
= bcmnand_ctrl_poll_status(ctrl
,
1082 (wp
? 0 : NAND_STATUS_WP
), 0);
1085 dev_err_ratelimited(&host
->pdev
->dev
,
1086 "nand #WP expected %s\n",
1091 /* Helper functions for reading and writing OOB registers */
1092 static inline u8
oob_reg_read(struct brcmnand_controller
*ctrl
, u32 offs
)
1094 u16 offset0
, offset10
, reg_offs
;
1096 offset0
= ctrl
->reg_offsets
[BRCMNAND_OOB_READ_BASE
];
1097 offset10
= ctrl
->reg_offsets
[BRCMNAND_OOB_READ_10_BASE
];
1099 if (offs
>= ctrl
->max_oob
)
1102 if (offs
>= 16 && offset10
)
1103 reg_offs
= offset10
+ ((offs
- 0x10) & ~0x03);
1105 reg_offs
= offset0
+ (offs
& ~0x03);
1107 return nand_readreg(ctrl
, reg_offs
) >> (24 - ((offs
& 0x03) << 3));
1110 static inline void oob_reg_write(struct brcmnand_controller
*ctrl
, u32 offs
,
1113 u16 offset0
, offset10
, reg_offs
;
1115 offset0
= ctrl
->reg_offsets
[BRCMNAND_OOB_WRITE_BASE
];
1116 offset10
= ctrl
->reg_offsets
[BRCMNAND_OOB_WRITE_10_BASE
];
1118 if (offs
>= ctrl
->max_oob
)
1121 if (offs
>= 16 && offset10
)
1122 reg_offs
= offset10
+ ((offs
- 0x10) & ~0x03);
1124 reg_offs
= offset0
+ (offs
& ~0x03);
1126 nand_writereg(ctrl
, reg_offs
, data
);
1130 * read_oob_from_regs - read data from OOB registers
1131 * @ctrl: NAND controller
1132 * @i: sub-page sector index
1133 * @oob: buffer to read to
1134 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
1135 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
1137 static int read_oob_from_regs(struct brcmnand_controller
*ctrl
, int i
, u8
*oob
,
1138 int sas
, int sector_1k
)
1140 int tbytes
= sas
<< sector_1k
;
1143 /* Adjust OOB values for 1K sector size */
1144 if (sector_1k
&& (i
& 0x01))
1145 tbytes
= max(0, tbytes
- (int)ctrl
->max_oob
);
1146 tbytes
= min_t(int, tbytes
, ctrl
->max_oob
);
1148 for (j
= 0; j
< tbytes
; j
++)
1149 oob
[j
] = oob_reg_read(ctrl
, j
);
1154 * write_oob_to_regs - write data to OOB registers
1155 * @i: sub-page sector index
1156 * @oob: buffer to write from
1157 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
1158 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
1160 static int write_oob_to_regs(struct brcmnand_controller
*ctrl
, int i
,
1161 const u8
*oob
, int sas
, int sector_1k
)
1163 int tbytes
= sas
<< sector_1k
;
1166 /* Adjust OOB values for 1K sector size */
1167 if (sector_1k
&& (i
& 0x01))
1168 tbytes
= max(0, tbytes
- (int)ctrl
->max_oob
);
1169 tbytes
= min_t(int, tbytes
, ctrl
->max_oob
);
1171 for (j
= 0; j
< tbytes
; j
+= 4)
1172 oob_reg_write(ctrl
, j
,
1173 (oob
[j
+ 0] << 24) |
1174 (oob
[j
+ 1] << 16) |
1180 static irqreturn_t
brcmnand_ctlrdy_irq(int irq
, void *data
)
1182 struct brcmnand_controller
*ctrl
= data
;
1184 /* Discard all NAND_CTLRDY interrupts during DMA */
1185 if (ctrl
->dma_pending
)
1188 complete(&ctrl
->done
);
1192 /* Handle SoC-specific interrupt hardware */
1193 static irqreturn_t
brcmnand_irq(int irq
, void *data
)
1195 struct brcmnand_controller
*ctrl
= data
;
1197 if (ctrl
->soc
->ctlrdy_ack(ctrl
->soc
))
1198 return brcmnand_ctlrdy_irq(irq
, data
);
1203 static irqreturn_t
brcmnand_dma_irq(int irq
, void *data
)
1205 struct brcmnand_controller
*ctrl
= data
;
1207 complete(&ctrl
->dma_done
);
1212 static void brcmnand_send_cmd(struct brcmnand_host
*host
, int cmd
)
1214 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1217 dev_dbg(ctrl
->dev
, "send native cmd %d addr_lo 0x%x\n", cmd
,
1218 brcmnand_read_reg(ctrl
, BRCMNAND_CMD_ADDRESS
));
1219 BUG_ON(ctrl
->cmd_pending
!= 0);
1220 ctrl
->cmd_pending
= cmd
;
1222 ret
= bcmnand_ctrl_poll_status(ctrl
, NAND_CTRL_RDY
, NAND_CTRL_RDY
, 0);
1225 mb(); /* flush previous writes */
1226 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_START
,
1227 cmd
<< brcmnand_cmd_shift(ctrl
));
1230 /***********************************************************************
1231 * NAND MTD API: read/program/erase
1232 ***********************************************************************/
1234 static void brcmnand_cmd_ctrl(struct mtd_info
*mtd
, int dat
,
1237 /* intentionally left blank */
1240 static int brcmnand_waitfunc(struct mtd_info
*mtd
, struct nand_chip
*this)
1242 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1243 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1244 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1245 unsigned long timeo
= msecs_to_jiffies(100);
1247 dev_dbg(ctrl
->dev
, "wait on native cmd %d\n", ctrl
->cmd_pending
);
1248 if (ctrl
->cmd_pending
&&
1249 wait_for_completion_timeout(&ctrl
->done
, timeo
) <= 0) {
1250 u32 cmd
= brcmnand_read_reg(ctrl
, BRCMNAND_CMD_START
)
1251 >> brcmnand_cmd_shift(ctrl
);
1253 dev_err_ratelimited(ctrl
->dev
,
1254 "timeout waiting for command %#02x\n", cmd
);
1255 dev_err_ratelimited(ctrl
->dev
, "intfc status %08x\n",
1256 brcmnand_read_reg(ctrl
, BRCMNAND_INTFC_STATUS
));
1258 ctrl
->cmd_pending
= 0;
1259 return brcmnand_read_reg(ctrl
, BRCMNAND_INTFC_STATUS
) &
1268 LLOP_RETURN_IDLE
= BIT(31),
1270 LLOP_DATA_MASK
= GENMASK(15, 0),
1273 static int brcmnand_low_level_op(struct brcmnand_host
*host
,
1274 enum brcmnand_llop_type type
, u32 data
,
1277 struct mtd_info
*mtd
= nand_to_mtd(&host
->chip
);
1278 struct nand_chip
*chip
= &host
->chip
;
1279 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1282 tmp
= data
& LLOP_DATA_MASK
;
1285 tmp
|= LLOP_WE
| LLOP_CLE
;
1289 tmp
|= LLOP_WE
| LLOP_ALE
;
1302 tmp
|= LLOP_RETURN_IDLE
;
1304 dev_dbg(ctrl
->dev
, "ll_op cmd %#x\n", tmp
);
1306 brcmnand_write_reg(ctrl
, BRCMNAND_LL_OP
, tmp
);
1307 (void)brcmnand_read_reg(ctrl
, BRCMNAND_LL_OP
);
1309 brcmnand_send_cmd(host
, CMD_LOW_LEVEL_OP
);
1310 return brcmnand_waitfunc(mtd
, chip
);
1313 static void brcmnand_cmdfunc(struct mtd_info
*mtd
, unsigned command
,
1314 int column
, int page_addr
)
1316 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1317 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1318 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1319 u64 addr
= (u64
)page_addr
<< chip
->page_shift
;
1322 if (command
== NAND_CMD_READID
|| command
== NAND_CMD_PARAM
||
1323 command
== NAND_CMD_RNDOUT
)
1325 /* Avoid propagating a negative, don't-care address */
1326 else if (page_addr
< 0)
1329 dev_dbg(ctrl
->dev
, "cmd 0x%x addr 0x%llx\n", command
,
1330 (unsigned long long)addr
);
1332 host
->last_cmd
= command
;
1333 host
->last_byte
= 0;
1334 host
->last_addr
= addr
;
1337 case NAND_CMD_RESET
:
1338 native_cmd
= CMD_FLASH_RESET
;
1340 case NAND_CMD_STATUS
:
1341 native_cmd
= CMD_STATUS_READ
;
1343 case NAND_CMD_READID
:
1344 native_cmd
= CMD_DEVICE_ID_READ
;
1346 case NAND_CMD_READOOB
:
1347 native_cmd
= CMD_SPARE_AREA_READ
;
1349 case NAND_CMD_ERASE1
:
1350 native_cmd
= CMD_BLOCK_ERASE
;
1351 brcmnand_wp(mtd
, 0);
1353 case NAND_CMD_PARAM
:
1354 native_cmd
= CMD_PARAMETER_READ
;
1356 case NAND_CMD_SET_FEATURES
:
1357 case NAND_CMD_GET_FEATURES
:
1358 brcmnand_low_level_op(host
, LL_OP_CMD
, command
, false);
1359 brcmnand_low_level_op(host
, LL_OP_ADDR
, column
, false);
1361 case NAND_CMD_RNDOUT
:
1362 native_cmd
= CMD_PARAMETER_CHANGE_COL
;
1363 addr
&= ~((u64
)(FC_BYTES
- 1));
1365 * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0
1366 * NB: hwcfg.sector_size_1k may not be initialized yet
1368 if (brcmnand_get_sector_size_1k(host
)) {
1369 host
->hwcfg
.sector_size_1k
=
1370 brcmnand_get_sector_size_1k(host
);
1371 brcmnand_set_sector_size_1k(host
, 0);
1379 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
,
1380 (host
->cs
<< 16) | ((addr
>> 32) & 0xffff));
1381 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
);
1382 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_ADDRESS
, lower_32_bits(addr
));
1383 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_ADDRESS
);
1385 brcmnand_send_cmd(host
, native_cmd
);
1386 brcmnand_waitfunc(mtd
, chip
);
1388 if (native_cmd
== CMD_PARAMETER_READ
||
1389 native_cmd
== CMD_PARAMETER_CHANGE_COL
) {
1390 /* Copy flash cache word-wise */
1391 u32
*flash_cache
= (u32
*)ctrl
->flash_cache
;
1394 brcmnand_soc_data_bus_prepare(ctrl
->soc
, true);
1397 * Must cache the FLASH_CACHE now, since changes in
1398 * SECTOR_SIZE_1K may invalidate it
1400 for (i
= 0; i
< FC_WORDS
; i
++)
1402 * Flash cache is big endian for parameter pages, at
1405 flash_cache
[i
] = be32_to_cpu(brcmnand_read_fc(ctrl
, i
));
1407 brcmnand_soc_data_bus_unprepare(ctrl
->soc
, true);
1409 /* Cleanup from HW quirk: restore SECTOR_SIZE_1K */
1410 if (host
->hwcfg
.sector_size_1k
)
1411 brcmnand_set_sector_size_1k(host
,
1412 host
->hwcfg
.sector_size_1k
);
1415 /* Re-enable protection is necessary only after erase */
1416 if (command
== NAND_CMD_ERASE1
)
1417 brcmnand_wp(mtd
, 1);
1420 static uint8_t brcmnand_read_byte(struct mtd_info
*mtd
)
1422 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1423 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1424 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1428 switch (host
->last_cmd
) {
1429 case NAND_CMD_READID
:
1430 if (host
->last_byte
< 4)
1431 ret
= brcmnand_read_reg(ctrl
, BRCMNAND_ID
) >>
1432 (24 - (host
->last_byte
<< 3));
1433 else if (host
->last_byte
< 8)
1434 ret
= brcmnand_read_reg(ctrl
, BRCMNAND_ID_EXT
) >>
1435 (56 - (host
->last_byte
<< 3));
1438 case NAND_CMD_READOOB
:
1439 ret
= oob_reg_read(ctrl
, host
->last_byte
);
1442 case NAND_CMD_STATUS
:
1443 ret
= brcmnand_read_reg(ctrl
, BRCMNAND_INTFC_STATUS
) &
1445 if (wp_on
) /* hide WP status */
1446 ret
|= NAND_STATUS_WP
;
1449 case NAND_CMD_PARAM
:
1450 case NAND_CMD_RNDOUT
:
1451 addr
= host
->last_addr
+ host
->last_byte
;
1452 offs
= addr
& (FC_BYTES
- 1);
1454 /* At FC_BYTES boundary, switch to next column */
1455 if (host
->last_byte
> 0 && offs
== 0)
1456 chip
->cmdfunc(mtd
, NAND_CMD_RNDOUT
, addr
, -1);
1458 ret
= ctrl
->flash_cache
[offs
];
1460 case NAND_CMD_GET_FEATURES
:
1461 if (host
->last_byte
>= ONFI_SUBFEATURE_PARAM_LEN
) {
1464 bool last
= host
->last_byte
==
1465 ONFI_SUBFEATURE_PARAM_LEN
- 1;
1466 brcmnand_low_level_op(host
, LL_OP_RD
, 0, last
);
1467 ret
= brcmnand_read_reg(ctrl
, BRCMNAND_LL_RDATA
) & 0xff;
1471 dev_dbg(ctrl
->dev
, "read byte = 0x%02x\n", ret
);
1477 static void brcmnand_read_buf(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
1481 for (i
= 0; i
< len
; i
++, buf
++)
1482 *buf
= brcmnand_read_byte(mtd
);
1485 static void brcmnand_write_buf(struct mtd_info
*mtd
, const uint8_t *buf
,
1489 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1490 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1492 switch (host
->last_cmd
) {
1493 case NAND_CMD_SET_FEATURES
:
1494 for (i
= 0; i
< len
; i
++)
1495 brcmnand_low_level_op(host
, LL_OP_WR
, buf
[i
],
1505 * Construct a FLASH_DMA descriptor as part of a linked list. You must know the
1506 * following ahead of time:
1507 * - Is this descriptor the beginning or end of a linked list?
1508 * - What is the (DMA) address of the next descriptor in the linked list?
1510 static int brcmnand_fill_dma_desc(struct brcmnand_host
*host
,
1511 struct brcm_nand_dma_desc
*desc
, u64 addr
,
1512 dma_addr_t buf
, u32 len
, u8 dma_cmd
,
1513 bool begin
, bool end
,
1514 dma_addr_t next_desc
)
1516 memset(desc
, 0, sizeof(*desc
));
1517 /* Descriptors are written in native byte order (wordwise) */
1518 desc
->next_desc
= lower_32_bits(next_desc
);
1519 desc
->next_desc_ext
= upper_32_bits(next_desc
);
1520 desc
->cmd_irq
= (dma_cmd
<< 24) |
1521 (end
? (0x03 << 8) : 0) | /* IRQ | STOP */
1522 (!!begin
) | ((!!end
) << 1); /* head, tail */
1523 #ifdef CONFIG_CPU_BIG_ENDIAN
1524 desc
->cmd_irq
|= 0x01 << 12;
1526 desc
->dram_addr
= lower_32_bits(buf
);
1527 desc
->dram_addr_ext
= upper_32_bits(buf
);
1528 desc
->tfr_len
= len
;
1529 desc
->total_len
= len
;
1530 desc
->flash_addr
= lower_32_bits(addr
);
1531 desc
->flash_addr_ext
= upper_32_bits(addr
);
1532 desc
->cs
= host
->cs
;
1533 desc
->status_valid
= 0x01;
1538 * Kick the FLASH_DMA engine, with a given DMA descriptor
1540 static void brcmnand_dma_run(struct brcmnand_host
*host
, dma_addr_t desc
)
1542 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1543 unsigned long timeo
= msecs_to_jiffies(100);
1545 flash_dma_writel(ctrl
, FLASH_DMA_FIRST_DESC
, lower_32_bits(desc
));
1546 (void)flash_dma_readl(ctrl
, FLASH_DMA_FIRST_DESC
);
1547 flash_dma_writel(ctrl
, FLASH_DMA_FIRST_DESC_EXT
, upper_32_bits(desc
));
1548 (void)flash_dma_readl(ctrl
, FLASH_DMA_FIRST_DESC_EXT
);
1550 /* Start FLASH_DMA engine */
1551 ctrl
->dma_pending
= true;
1552 mb(); /* flush previous writes */
1553 flash_dma_writel(ctrl
, FLASH_DMA_CTRL
, 0x03); /* wake | run */
1555 if (wait_for_completion_timeout(&ctrl
->dma_done
, timeo
) <= 0) {
1557 "timeout waiting for DMA; status %#x, error status %#x\n",
1558 flash_dma_readl(ctrl
, FLASH_DMA_STATUS
),
1559 flash_dma_readl(ctrl
, FLASH_DMA_ERROR_STATUS
));
1561 ctrl
->dma_pending
= false;
1562 flash_dma_writel(ctrl
, FLASH_DMA_CTRL
, 0); /* force stop */
1565 static int brcmnand_dma_trans(struct brcmnand_host
*host
, u64 addr
, u32
*buf
,
1566 u32 len
, u8 dma_cmd
)
1568 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1570 int dir
= dma_cmd
== CMD_PAGE_READ
? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
1572 buf_pa
= dma_map_single(ctrl
->dev
, buf
, len
, dir
);
1573 if (dma_mapping_error(ctrl
->dev
, buf_pa
)) {
1574 dev_err(ctrl
->dev
, "unable to map buffer for DMA\n");
1578 brcmnand_fill_dma_desc(host
, ctrl
->dma_desc
, addr
, buf_pa
, len
,
1579 dma_cmd
, true, true, 0);
1581 brcmnand_dma_run(host
, ctrl
->dma_pa
);
1583 dma_unmap_single(ctrl
->dev
, buf_pa
, len
, dir
);
1585 if (ctrl
->dma_desc
->status_valid
& FLASH_DMA_ECC_ERROR
)
1587 else if (ctrl
->dma_desc
->status_valid
& FLASH_DMA_CORR_ERROR
)
1594 * Assumes proper CS is already set
1596 static int brcmnand_read_by_pio(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1597 u64 addr
, unsigned int trans
, u32
*buf
,
1598 u8
*oob
, u64
*err_addr
)
1600 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1601 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1604 /* Clear error addresses */
1605 brcmnand_write_reg(ctrl
, BRCMNAND_UNCORR_ADDR
, 0);
1606 brcmnand_write_reg(ctrl
, BRCMNAND_CORR_ADDR
, 0);
1607 brcmnand_write_reg(ctrl
, BRCMNAND_UNCORR_EXT_ADDR
, 0);
1608 brcmnand_write_reg(ctrl
, BRCMNAND_CORR_EXT_ADDR
, 0);
1610 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
,
1611 (host
->cs
<< 16) | ((addr
>> 32) & 0xffff));
1612 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
);
1614 for (i
= 0; i
< trans
; i
++, addr
+= FC_BYTES
) {
1615 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_ADDRESS
,
1616 lower_32_bits(addr
));
1617 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_ADDRESS
);
1618 /* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
1619 brcmnand_send_cmd(host
, CMD_PAGE_READ
);
1620 brcmnand_waitfunc(mtd
, chip
);
1623 brcmnand_soc_data_bus_prepare(ctrl
->soc
, false);
1625 for (j
= 0; j
< FC_WORDS
; j
++, buf
++)
1626 *buf
= brcmnand_read_fc(ctrl
, j
);
1628 brcmnand_soc_data_bus_unprepare(ctrl
->soc
, false);
1632 oob
+= read_oob_from_regs(ctrl
, i
, oob
,
1633 mtd
->oobsize
/ trans
,
1634 host
->hwcfg
.sector_size_1k
);
1637 *err_addr
= brcmnand_read_reg(ctrl
,
1638 BRCMNAND_UNCORR_ADDR
) |
1639 ((u64
)(brcmnand_read_reg(ctrl
,
1640 BRCMNAND_UNCORR_EXT_ADDR
)
1647 *err_addr
= brcmnand_read_reg(ctrl
,
1648 BRCMNAND_CORR_ADDR
) |
1649 ((u64
)(brcmnand_read_reg(ctrl
,
1650 BRCMNAND_CORR_EXT_ADDR
)
1661 * Check a page to see if it is erased (w/ bitflips) after an uncorrectable ECC
1664 * Because the HW ECC signals an ECC error if an erase paged has even a single
1665 * bitflip, we must check each ECC error to see if it is actually an erased
1666 * page with bitflips, not a truly corrupted page.
1668 * On a real error, return a negative error code (-EBADMSG for ECC error), and
1669 * buf will contain raw data.
1670 * Otherwise, buf gets filled with 0xffs and return the maximum number of
1671 * bitflips-per-ECC-sector to the caller.
1674 static int brcmstb_nand_verify_erased_page(struct mtd_info
*mtd
,
1675 struct nand_chip
*chip
, void *buf
, u64 addr
)
1678 void *oob
= chip
->oob_poi
;
1680 int page
= addr
>> chip
->page_shift
;
1684 buf
= chip
->buffers
->databuf
;
1685 /* Invalidate page cache */
1689 sas
= mtd
->oobsize
/ chip
->ecc
.steps
;
1691 /* read without ecc for verification */
1692 chip
->cmdfunc(mtd
, NAND_CMD_READ0
, 0x00, page
);
1693 ret
= chip
->ecc
.read_page_raw(mtd
, chip
, buf
, true, page
);
1697 for (i
= 0; i
< chip
->ecc
.steps
; i
++, oob
+= sas
) {
1698 ret
= nand_check_erased_ecc_chunk(buf
, chip
->ecc
.size
,
1700 chip
->ecc
.strength
);
1704 bitflips
= max(bitflips
, ret
);
1710 static int brcmnand_read(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1711 u64 addr
, unsigned int trans
, u32
*buf
, u8
*oob
)
1713 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1714 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1719 dev_dbg(ctrl
->dev
, "read %llx -> %p\n", (unsigned long long)addr
, buf
);
1722 brcmnand_write_reg(ctrl
, BRCMNAND_UNCORR_COUNT
, 0);
1724 if (has_flash_dma(ctrl
) && !oob
&& flash_dma_buf_ok(buf
)) {
1725 err
= brcmnand_dma_trans(host
, addr
, buf
, trans
* FC_BYTES
,
1728 if (mtd_is_bitflip_or_eccerr(err
))
1735 memset(oob
, 0x99, mtd
->oobsize
);
1737 err
= brcmnand_read_by_pio(mtd
, chip
, addr
, trans
, buf
,
1741 if (mtd_is_eccerr(err
)) {
1743 * On controller version and 7.0, 7.1 , DMA read after a
1744 * prior PIO read that reported uncorrectable error,
1745 * the DMA engine captures this error following DMA read
1746 * cleared only on subsequent DMA read, so just retry once
1747 * to clear a possible false error reported for current DMA
1750 if ((ctrl
->nand_version
== 0x0700) ||
1751 (ctrl
->nand_version
== 0x0701)) {
1759 * Controller version 7.2 has hw encoder to detect erased page
1760 * bitflips, apply sw verification for older controllers only
1762 if (ctrl
->nand_version
< 0x0702) {
1763 err
= brcmstb_nand_verify_erased_page(mtd
, chip
, buf
,
1765 /* erased page bitflips corrected */
1770 dev_dbg(ctrl
->dev
, "uncorrectable error at 0x%llx\n",
1771 (unsigned long long)err_addr
);
1772 mtd
->ecc_stats
.failed
++;
1773 /* NAND layer expects zero on ECC errors */
1777 if (mtd_is_bitflip(err
)) {
1778 unsigned int corrected
= brcmnand_count_corrected(ctrl
);
1780 dev_dbg(ctrl
->dev
, "corrected error at 0x%llx\n",
1781 (unsigned long long)err_addr
);
1782 mtd
->ecc_stats
.corrected
+= corrected
;
1783 /* Always exceed the software-imposed threshold */
1784 return max(mtd
->bitflip_threshold
, corrected
);
1790 static int brcmnand_read_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1791 uint8_t *buf
, int oob_required
, int page
)
1793 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1794 u8
*oob
= oob_required
? (u8
*)chip
->oob_poi
: NULL
;
1796 return brcmnand_read(mtd
, chip
, host
->last_addr
,
1797 mtd
->writesize
>> FC_SHIFT
, (u32
*)buf
, oob
);
1800 static int brcmnand_read_page_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1801 uint8_t *buf
, int oob_required
, int page
)
1803 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1804 u8
*oob
= oob_required
? (u8
*)chip
->oob_poi
: NULL
;
1807 brcmnand_set_ecc_enabled(host
, 0);
1808 ret
= brcmnand_read(mtd
, chip
, host
->last_addr
,
1809 mtd
->writesize
>> FC_SHIFT
, (u32
*)buf
, oob
);
1810 brcmnand_set_ecc_enabled(host
, 1);
1814 static int brcmnand_read_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1817 return brcmnand_read(mtd
, chip
, (u64
)page
<< chip
->page_shift
,
1818 mtd
->writesize
>> FC_SHIFT
,
1819 NULL
, (u8
*)chip
->oob_poi
);
1822 static int brcmnand_read_oob_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1825 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1827 brcmnand_set_ecc_enabled(host
, 0);
1828 brcmnand_read(mtd
, chip
, (u64
)page
<< chip
->page_shift
,
1829 mtd
->writesize
>> FC_SHIFT
,
1830 NULL
, (u8
*)chip
->oob_poi
);
1831 brcmnand_set_ecc_enabled(host
, 1);
1835 static int brcmnand_write(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1836 u64 addr
, const u32
*buf
, u8
*oob
)
1838 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1839 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1840 unsigned int i
, j
, trans
= mtd
->writesize
>> FC_SHIFT
;
1841 int status
, ret
= 0;
1843 dev_dbg(ctrl
->dev
, "write %llx <- %p\n", (unsigned long long)addr
, buf
);
1845 if (unlikely((unsigned long)buf
& 0x03)) {
1846 dev_warn(ctrl
->dev
, "unaligned buffer: %p\n", buf
);
1847 buf
= (u32
*)((unsigned long)buf
& ~0x03);
1850 brcmnand_wp(mtd
, 0);
1852 for (i
= 0; i
< ctrl
->max_oob
; i
+= 4)
1853 oob_reg_write(ctrl
, i
, 0xffffffff);
1855 if (has_flash_dma(ctrl
) && !oob
&& flash_dma_buf_ok(buf
)) {
1856 if (brcmnand_dma_trans(host
, addr
, (u32
*)buf
,
1857 mtd
->writesize
, CMD_PROGRAM_PAGE
))
1862 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
,
1863 (host
->cs
<< 16) | ((addr
>> 32) & 0xffff));
1864 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
);
1866 for (i
= 0; i
< trans
; i
++, addr
+= FC_BYTES
) {
1867 /* full address MUST be set before populating FC */
1868 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_ADDRESS
,
1869 lower_32_bits(addr
));
1870 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_ADDRESS
);
1873 brcmnand_soc_data_bus_prepare(ctrl
->soc
, false);
1875 for (j
= 0; j
< FC_WORDS
; j
++, buf
++)
1876 brcmnand_write_fc(ctrl
, j
, *buf
);
1878 brcmnand_soc_data_bus_unprepare(ctrl
->soc
, false);
1880 for (j
= 0; j
< FC_WORDS
; j
++)
1881 brcmnand_write_fc(ctrl
, j
, 0xffffffff);
1885 oob
+= write_oob_to_regs(ctrl
, i
, oob
,
1886 mtd
->oobsize
/ trans
,
1887 host
->hwcfg
.sector_size_1k
);
1890 /* we cannot use SPARE_AREA_PROGRAM when PARTIAL_PAGE_EN=0 */
1891 brcmnand_send_cmd(host
, CMD_PROGRAM_PAGE
);
1892 status
= brcmnand_waitfunc(mtd
, chip
);
1894 if (status
& NAND_STATUS_FAIL
) {
1895 dev_info(ctrl
->dev
, "program failed at %llx\n",
1896 (unsigned long long)addr
);
1902 brcmnand_wp(mtd
, 1);
1906 static int brcmnand_write_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1907 const uint8_t *buf
, int oob_required
, int page
)
1909 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1910 void *oob
= oob_required
? chip
->oob_poi
: NULL
;
1912 brcmnand_write(mtd
, chip
, host
->last_addr
, (const u32
*)buf
, oob
);
1916 static int brcmnand_write_page_raw(struct mtd_info
*mtd
,
1917 struct nand_chip
*chip
, const uint8_t *buf
,
1918 int oob_required
, int page
)
1920 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1921 void *oob
= oob_required
? chip
->oob_poi
: NULL
;
1923 brcmnand_set_ecc_enabled(host
, 0);
1924 brcmnand_write(mtd
, chip
, host
->last_addr
, (const u32
*)buf
, oob
);
1925 brcmnand_set_ecc_enabled(host
, 1);
1929 static int brcmnand_write_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1932 return brcmnand_write(mtd
, chip
, (u64
)page
<< chip
->page_shift
,
1933 NULL
, chip
->oob_poi
);
1936 static int brcmnand_write_oob_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1939 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1942 brcmnand_set_ecc_enabled(host
, 0);
1943 ret
= brcmnand_write(mtd
, chip
, (u64
)page
<< chip
->page_shift
, NULL
,
1944 (u8
*)chip
->oob_poi
);
1945 brcmnand_set_ecc_enabled(host
, 1);
1950 /***********************************************************************
1951 * Per-CS setup (1 NAND device)
1952 ***********************************************************************/
1954 static int brcmnand_set_cfg(struct brcmnand_host
*host
,
1955 struct brcmnand_cfg
*cfg
)
1957 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1958 struct nand_chip
*chip
= &host
->chip
;
1959 u16 cfg_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_CFG
);
1960 u16 cfg_ext_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
1961 BRCMNAND_CS_CFG_EXT
);
1962 u16 acc_control_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
1963 BRCMNAND_CS_ACC_CONTROL
);
1964 u8 block_size
= 0, page_size
= 0, device_size
= 0;
1967 if (ctrl
->block_sizes
) {
1970 for (i
= 0, found
= 0; ctrl
->block_sizes
[i
]; i
++)
1971 if (ctrl
->block_sizes
[i
] * 1024 == cfg
->block_size
) {
1976 dev_warn(ctrl
->dev
, "invalid block size %u\n",
1981 block_size
= ffs(cfg
->block_size
) - ffs(BRCMNAND_MIN_BLOCKSIZE
);
1984 if (cfg
->block_size
< BRCMNAND_MIN_BLOCKSIZE
|| (ctrl
->max_block_size
&&
1985 cfg
->block_size
> ctrl
->max_block_size
)) {
1986 dev_warn(ctrl
->dev
, "invalid block size %u\n",
1991 if (ctrl
->page_sizes
) {
1994 for (i
= 0, found
= 0; ctrl
->page_sizes
[i
]; i
++)
1995 if (ctrl
->page_sizes
[i
] == cfg
->page_size
) {
2000 dev_warn(ctrl
->dev
, "invalid page size %u\n",
2005 page_size
= ffs(cfg
->page_size
) - ffs(BRCMNAND_MIN_PAGESIZE
);
2008 if (cfg
->page_size
< BRCMNAND_MIN_PAGESIZE
|| (ctrl
->max_page_size
&&
2009 cfg
->page_size
> ctrl
->max_page_size
)) {
2010 dev_warn(ctrl
->dev
, "invalid page size %u\n", cfg
->page_size
);
2014 if (fls64(cfg
->device_size
) < fls64(BRCMNAND_MIN_DEVSIZE
)) {
2015 dev_warn(ctrl
->dev
, "invalid device size 0x%llx\n",
2016 (unsigned long long)cfg
->device_size
);
2019 device_size
= fls64(cfg
->device_size
) - fls64(BRCMNAND_MIN_DEVSIZE
);
2021 tmp
= (cfg
->blk_adr_bytes
<< CFG_BLK_ADR_BYTES_SHIFT
) |
2022 (cfg
->col_adr_bytes
<< CFG_COL_ADR_BYTES_SHIFT
) |
2023 (cfg
->ful_adr_bytes
<< CFG_FUL_ADR_BYTES_SHIFT
) |
2024 (!!(cfg
->device_width
== 16) << CFG_BUS_WIDTH_SHIFT
) |
2025 (device_size
<< CFG_DEVICE_SIZE_SHIFT
);
2026 if (cfg_offs
== cfg_ext_offs
) {
2027 tmp
|= (page_size
<< CFG_PAGE_SIZE_SHIFT
) |
2028 (block_size
<< CFG_BLK_SIZE_SHIFT
);
2029 nand_writereg(ctrl
, cfg_offs
, tmp
);
2031 nand_writereg(ctrl
, cfg_offs
, tmp
);
2032 tmp
= (page_size
<< CFG_EXT_PAGE_SIZE_SHIFT
) |
2033 (block_size
<< CFG_EXT_BLK_SIZE_SHIFT
);
2034 nand_writereg(ctrl
, cfg_ext_offs
, tmp
);
2037 tmp
= nand_readreg(ctrl
, acc_control_offs
);
2038 tmp
&= ~brcmnand_ecc_level_mask(ctrl
);
2039 tmp
|= cfg
->ecc_level
<< NAND_ACC_CONTROL_ECC_SHIFT
;
2040 tmp
&= ~brcmnand_spare_area_mask(ctrl
);
2041 tmp
|= cfg
->spare_area_size
;
2042 nand_writereg(ctrl
, acc_control_offs
, tmp
);
2044 brcmnand_set_sector_size_1k(host
, cfg
->sector_size_1k
);
2046 /* threshold = ceil(BCH-level * 0.75) */
2047 brcmnand_wr_corr_thresh(host
, DIV_ROUND_UP(chip
->ecc
.strength
* 3, 4));
2052 static void brcmnand_print_cfg(struct brcmnand_host
*host
,
2053 char *buf
, struct brcmnand_cfg
*cfg
)
2056 "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit",
2057 (unsigned long long)cfg
->device_size
>> 20,
2058 cfg
->block_size
>> 10,
2059 cfg
->page_size
>= 1024 ? cfg
->page_size
>> 10 : cfg
->page_size
,
2060 cfg
->page_size
>= 1024 ? "KiB" : "B",
2061 cfg
->spare_area_size
, cfg
->device_width
);
2063 /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */
2064 if (is_hamming_ecc(host
->ctrl
, cfg
))
2065 sprintf(buf
, ", Hamming ECC");
2066 else if (cfg
->sector_size_1k
)
2067 sprintf(buf
, ", BCH-%u (1KiB sector)", cfg
->ecc_level
<< 1);
2069 sprintf(buf
, ", BCH-%u", cfg
->ecc_level
);
2073 * Minimum number of bytes to address a page. Calculated as:
2074 * roundup(log2(size / page-size) / 8)
2076 * NB: the following does not "round up" for non-power-of-2 'size'; but this is
2077 * OK because many other things will break if 'size' is irregular...
2079 static inline int get_blk_adr_bytes(u64 size
, u32 writesize
)
2081 return ALIGN(ilog2(size
) - ilog2(writesize
), 8) >> 3;
2084 static int brcmnand_setup_dev(struct brcmnand_host
*host
)
2086 struct mtd_info
*mtd
= nand_to_mtd(&host
->chip
);
2087 struct nand_chip
*chip
= &host
->chip
;
2088 struct brcmnand_controller
*ctrl
= host
->ctrl
;
2089 struct brcmnand_cfg
*cfg
= &host
->hwcfg
;
2091 u32 offs
, tmp
, oob_sector
;
2094 memset(cfg
, 0, sizeof(*cfg
));
2096 ret
= of_property_read_u32(nand_get_flash_node(chip
),
2097 "brcm,nand-oob-sector-size",
2100 /* Use detected size */
2101 cfg
->spare_area_size
= mtd
->oobsize
/
2102 (mtd
->writesize
>> FC_SHIFT
);
2104 cfg
->spare_area_size
= oob_sector
;
2106 if (cfg
->spare_area_size
> ctrl
->max_oob
)
2107 cfg
->spare_area_size
= ctrl
->max_oob
;
2109 * Set oobsize to be consistent with controller's spare_area_size, as
2110 * the rest is inaccessible.
2112 mtd
->oobsize
= cfg
->spare_area_size
* (mtd
->writesize
>> FC_SHIFT
);
2114 cfg
->device_size
= mtd
->size
;
2115 cfg
->block_size
= mtd
->erasesize
;
2116 cfg
->page_size
= mtd
->writesize
;
2117 cfg
->device_width
= (chip
->options
& NAND_BUSWIDTH_16
) ? 16 : 8;
2118 cfg
->col_adr_bytes
= 2;
2119 cfg
->blk_adr_bytes
= get_blk_adr_bytes(mtd
->size
, mtd
->writesize
);
2121 if (chip
->ecc
.mode
!= NAND_ECC_HW
) {
2122 dev_err(ctrl
->dev
, "only HW ECC supported; selected: %d\n",
2127 if (chip
->ecc
.algo
== NAND_ECC_UNKNOWN
) {
2128 if (chip
->ecc
.strength
== 1 && chip
->ecc
.size
== 512)
2129 /* Default to Hamming for 1-bit ECC, if unspecified */
2130 chip
->ecc
.algo
= NAND_ECC_HAMMING
;
2132 /* Otherwise, BCH */
2133 chip
->ecc
.algo
= NAND_ECC_BCH
;
2136 if (chip
->ecc
.algo
== NAND_ECC_HAMMING
&& (chip
->ecc
.strength
!= 1 ||
2137 chip
->ecc
.size
!= 512)) {
2138 dev_err(ctrl
->dev
, "invalid Hamming params: %d bits per %d bytes\n",
2139 chip
->ecc
.strength
, chip
->ecc
.size
);
2143 switch (chip
->ecc
.size
) {
2145 if (chip
->ecc
.algo
== NAND_ECC_HAMMING
)
2146 cfg
->ecc_level
= 15;
2148 cfg
->ecc_level
= chip
->ecc
.strength
;
2149 cfg
->sector_size_1k
= 0;
2152 if (!(ctrl
->features
& BRCMNAND_HAS_1K_SECTORS
)) {
2153 dev_err(ctrl
->dev
, "1KB sectors not supported\n");
2156 if (chip
->ecc
.strength
& 0x1) {
2158 "odd ECC not supported with 1KB sectors\n");
2162 cfg
->ecc_level
= chip
->ecc
.strength
>> 1;
2163 cfg
->sector_size_1k
= 1;
2166 dev_err(ctrl
->dev
, "unsupported ECC size: %d\n",
2171 cfg
->ful_adr_bytes
= cfg
->blk_adr_bytes
;
2172 if (mtd
->writesize
> 512)
2173 cfg
->ful_adr_bytes
+= cfg
->col_adr_bytes
;
2175 cfg
->ful_adr_bytes
+= 1;
2177 ret
= brcmnand_set_cfg(host
, cfg
);
2181 brcmnand_set_ecc_enabled(host
, 1);
2183 brcmnand_print_cfg(host
, msg
, cfg
);
2184 dev_info(ctrl
->dev
, "detected %s\n", msg
);
2186 /* Configure ACC_CONTROL */
2187 offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_ACC_CONTROL
);
2188 tmp
= nand_readreg(ctrl
, offs
);
2189 tmp
&= ~ACC_CONTROL_PARTIAL_PAGE
;
2190 tmp
&= ~ACC_CONTROL_RD_ERASED
;
2192 /* We need to turn on Read from erased paged protected by ECC */
2193 if (ctrl
->nand_version
>= 0x0702)
2194 tmp
|= ACC_CONTROL_RD_ERASED
;
2195 tmp
&= ~ACC_CONTROL_FAST_PGM_RDIN
;
2196 if (ctrl
->features
& BRCMNAND_HAS_PREFETCH
) {
2198 * FIXME: Flash DMA + prefetch may see spurious erased-page ECC
2201 if (has_flash_dma(ctrl
))
2202 tmp
&= ~ACC_CONTROL_PREFETCH
;
2204 tmp
|= ACC_CONTROL_PREFETCH
;
2206 nand_writereg(ctrl
, offs
, tmp
);
2211 static int brcmnand_init_cs(struct brcmnand_host
*host
, struct device_node
*dn
)
2213 struct brcmnand_controller
*ctrl
= host
->ctrl
;
2214 struct platform_device
*pdev
= host
->pdev
;
2215 struct mtd_info
*mtd
;
2216 struct nand_chip
*chip
;
2220 ret
= of_property_read_u32(dn
, "reg", &host
->cs
);
2222 dev_err(&pdev
->dev
, "can't get chip-select\n");
2226 mtd
= nand_to_mtd(&host
->chip
);
2229 nand_set_flash_node(chip
, dn
);
2230 nand_set_controller_data(chip
, host
);
2231 mtd
->name
= devm_kasprintf(&pdev
->dev
, GFP_KERNEL
, "brcmnand.%d",
2233 mtd
->owner
= THIS_MODULE
;
2234 mtd
->dev
.parent
= &pdev
->dev
;
2236 chip
->IO_ADDR_R
= (void __iomem
*)0xdeadbeef;
2237 chip
->IO_ADDR_W
= (void __iomem
*)0xdeadbeef;
2239 chip
->cmd_ctrl
= brcmnand_cmd_ctrl
;
2240 chip
->cmdfunc
= brcmnand_cmdfunc
;
2241 chip
->waitfunc
= brcmnand_waitfunc
;
2242 chip
->read_byte
= brcmnand_read_byte
;
2243 chip
->read_buf
= brcmnand_read_buf
;
2244 chip
->write_buf
= brcmnand_write_buf
;
2246 chip
->ecc
.mode
= NAND_ECC_HW
;
2247 chip
->ecc
.read_page
= brcmnand_read_page
;
2248 chip
->ecc
.write_page
= brcmnand_write_page
;
2249 chip
->ecc
.read_page_raw
= brcmnand_read_page_raw
;
2250 chip
->ecc
.write_page_raw
= brcmnand_write_page_raw
;
2251 chip
->ecc
.write_oob_raw
= brcmnand_write_oob_raw
;
2252 chip
->ecc
.read_oob_raw
= brcmnand_read_oob_raw
;
2253 chip
->ecc
.read_oob
= brcmnand_read_oob
;
2254 chip
->ecc
.write_oob
= brcmnand_write_oob
;
2256 chip
->controller
= &ctrl
->controller
;
2259 * The bootloader might have configured 16bit mode but
2260 * NAND READID command only works in 8bit mode. We force
2261 * 8bit mode here to ensure that NAND READID commands works.
2263 cfg_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_CFG
);
2264 nand_writereg(ctrl
, cfg_offs
,
2265 nand_readreg(ctrl
, cfg_offs
) & ~CFG_BUS_WIDTH
);
2267 ret
= nand_scan_ident(mtd
, 1, NULL
);
2271 chip
->options
|= NAND_NO_SUBPAGE_WRITE
;
2273 * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA
2274 * to/from, and have nand_base pass us a bounce buffer instead, as
2277 chip
->options
|= NAND_USE_BOUNCE_BUFFER
;
2279 if (chip
->bbt_options
& NAND_BBT_USE_FLASH
)
2280 chip
->bbt_options
|= NAND_BBT_NO_OOB
;
2282 if (brcmnand_setup_dev(host
))
2285 chip
->ecc
.size
= host
->hwcfg
.sector_size_1k
? 1024 : 512;
2286 /* only use our internal HW threshold */
2287 mtd
->bitflip_threshold
= 1;
2289 ret
= brcmstb_choose_ecc_layout(host
);
2293 ret
= nand_scan_tail(mtd
);
2297 return mtd_device_register(mtd
, NULL
, 0);
2300 static void brcmnand_save_restore_cs_config(struct brcmnand_host
*host
,
2303 struct brcmnand_controller
*ctrl
= host
->ctrl
;
2304 u16 cfg_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_CFG
);
2305 u16 cfg_ext_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
2306 BRCMNAND_CS_CFG_EXT
);
2307 u16 acc_control_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
2308 BRCMNAND_CS_ACC_CONTROL
);
2309 u16 t1_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_TIMING1
);
2310 u16 t2_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_TIMING2
);
2313 nand_writereg(ctrl
, cfg_offs
, host
->hwcfg
.config
);
2314 if (cfg_offs
!= cfg_ext_offs
)
2315 nand_writereg(ctrl
, cfg_ext_offs
,
2316 host
->hwcfg
.config_ext
);
2317 nand_writereg(ctrl
, acc_control_offs
, host
->hwcfg
.acc_control
);
2318 nand_writereg(ctrl
, t1_offs
, host
->hwcfg
.timing_1
);
2319 nand_writereg(ctrl
, t2_offs
, host
->hwcfg
.timing_2
);
2321 host
->hwcfg
.config
= nand_readreg(ctrl
, cfg_offs
);
2322 if (cfg_offs
!= cfg_ext_offs
)
2323 host
->hwcfg
.config_ext
=
2324 nand_readreg(ctrl
, cfg_ext_offs
);
2325 host
->hwcfg
.acc_control
= nand_readreg(ctrl
, acc_control_offs
);
2326 host
->hwcfg
.timing_1
= nand_readreg(ctrl
, t1_offs
);
2327 host
->hwcfg
.timing_2
= nand_readreg(ctrl
, t2_offs
);
2331 static int brcmnand_suspend(struct device
*dev
)
2333 struct brcmnand_controller
*ctrl
= dev_get_drvdata(dev
);
2334 struct brcmnand_host
*host
;
2336 list_for_each_entry(host
, &ctrl
->host_list
, node
)
2337 brcmnand_save_restore_cs_config(host
, 0);
2339 ctrl
->nand_cs_nand_select
= brcmnand_read_reg(ctrl
, BRCMNAND_CS_SELECT
);
2340 ctrl
->nand_cs_nand_xor
= brcmnand_read_reg(ctrl
, BRCMNAND_CS_XOR
);
2341 ctrl
->corr_stat_threshold
=
2342 brcmnand_read_reg(ctrl
, BRCMNAND_CORR_THRESHOLD
);
2344 if (has_flash_dma(ctrl
))
2345 ctrl
->flash_dma_mode
= flash_dma_readl(ctrl
, FLASH_DMA_MODE
);
2350 static int brcmnand_resume(struct device
*dev
)
2352 struct brcmnand_controller
*ctrl
= dev_get_drvdata(dev
);
2353 struct brcmnand_host
*host
;
2355 if (has_flash_dma(ctrl
)) {
2356 flash_dma_writel(ctrl
, FLASH_DMA_MODE
, ctrl
->flash_dma_mode
);
2357 flash_dma_writel(ctrl
, FLASH_DMA_ERROR_STATUS
, 0);
2360 brcmnand_write_reg(ctrl
, BRCMNAND_CS_SELECT
, ctrl
->nand_cs_nand_select
);
2361 brcmnand_write_reg(ctrl
, BRCMNAND_CS_XOR
, ctrl
->nand_cs_nand_xor
);
2362 brcmnand_write_reg(ctrl
, BRCMNAND_CORR_THRESHOLD
,
2363 ctrl
->corr_stat_threshold
);
2365 /* Clear/re-enable interrupt */
2366 ctrl
->soc
->ctlrdy_ack(ctrl
->soc
);
2367 ctrl
->soc
->ctlrdy_set_enabled(ctrl
->soc
, true);
2370 list_for_each_entry(host
, &ctrl
->host_list
, node
) {
2371 struct nand_chip
*chip
= &host
->chip
;
2372 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2374 brcmnand_save_restore_cs_config(host
, 1);
2376 /* Reset the chip, required by some chips after power-up */
2377 chip
->cmdfunc(mtd
, NAND_CMD_RESET
, -1, -1);
2383 const struct dev_pm_ops brcmnand_pm_ops
= {
2384 .suspend
= brcmnand_suspend
,
2385 .resume
= brcmnand_resume
,
2387 EXPORT_SYMBOL_GPL(brcmnand_pm_ops
);
2389 static const struct of_device_id brcmnand_of_match
[] = {
2390 { .compatible
= "brcm,brcmnand-v4.0" },
2391 { .compatible
= "brcm,brcmnand-v5.0" },
2392 { .compatible
= "brcm,brcmnand-v6.0" },
2393 { .compatible
= "brcm,brcmnand-v6.1" },
2394 { .compatible
= "brcm,brcmnand-v6.2" },
2395 { .compatible
= "brcm,brcmnand-v7.0" },
2396 { .compatible
= "brcm,brcmnand-v7.1" },
2397 { .compatible
= "brcm,brcmnand-v7.2" },
2400 MODULE_DEVICE_TABLE(of
, brcmnand_of_match
);
2402 /***********************************************************************
2403 * Platform driver setup (per controller)
2404 ***********************************************************************/
2406 int brcmnand_probe(struct platform_device
*pdev
, struct brcmnand_soc
*soc
)
2408 struct device
*dev
= &pdev
->dev
;
2409 struct device_node
*dn
= dev
->of_node
, *child
;
2410 struct brcmnand_controller
*ctrl
;
2411 struct resource
*res
;
2414 /* We only support device-tree instantiation */
2418 if (!of_match_node(brcmnand_of_match
, dn
))
2421 ctrl
= devm_kzalloc(dev
, sizeof(*ctrl
), GFP_KERNEL
);
2425 dev_set_drvdata(dev
, ctrl
);
2428 init_completion(&ctrl
->done
);
2429 init_completion(&ctrl
->dma_done
);
2430 nand_hw_control_init(&ctrl
->controller
);
2431 INIT_LIST_HEAD(&ctrl
->host_list
);
2433 /* NAND register range */
2434 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2435 ctrl
->nand_base
= devm_ioremap_resource(dev
, res
);
2436 if (IS_ERR(ctrl
->nand_base
))
2437 return PTR_ERR(ctrl
->nand_base
);
2439 /* Enable clock before using NAND registers */
2440 ctrl
->clk
= devm_clk_get(dev
, "nand");
2441 if (!IS_ERR(ctrl
->clk
)) {
2442 ret
= clk_prepare_enable(ctrl
->clk
);
2446 ret
= PTR_ERR(ctrl
->clk
);
2447 if (ret
== -EPROBE_DEFER
)
2453 /* Initialize NAND revision */
2454 ret
= brcmnand_revision_init(ctrl
);
2459 * Most chips have this cache at a fixed offset within 'nand' block.
2460 * Some must specify this region separately.
2462 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "nand-cache");
2464 ctrl
->nand_fc
= devm_ioremap_resource(dev
, res
);
2465 if (IS_ERR(ctrl
->nand_fc
)) {
2466 ret
= PTR_ERR(ctrl
->nand_fc
);
2470 ctrl
->nand_fc
= ctrl
->nand_base
+
2471 ctrl
->reg_offsets
[BRCMNAND_FC_BASE
];
2475 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "flash-dma");
2477 ctrl
->flash_dma_base
= devm_ioremap_resource(dev
, res
);
2478 if (IS_ERR(ctrl
->flash_dma_base
)) {
2479 ret
= PTR_ERR(ctrl
->flash_dma_base
);
2483 flash_dma_writel(ctrl
, FLASH_DMA_MODE
, 1); /* linked-list */
2484 flash_dma_writel(ctrl
, FLASH_DMA_ERROR_STATUS
, 0);
2486 /* Allocate descriptor(s) */
2487 ctrl
->dma_desc
= dmam_alloc_coherent(dev
,
2488 sizeof(*ctrl
->dma_desc
),
2489 &ctrl
->dma_pa
, GFP_KERNEL
);
2490 if (!ctrl
->dma_desc
) {
2495 ctrl
->dma_irq
= platform_get_irq(pdev
, 1);
2496 if ((int)ctrl
->dma_irq
< 0) {
2497 dev_err(dev
, "missing FLASH_DMA IRQ\n");
2502 ret
= devm_request_irq(dev
, ctrl
->dma_irq
,
2503 brcmnand_dma_irq
, 0, DRV_NAME
,
2506 dev_err(dev
, "can't allocate IRQ %d: error %d\n",
2507 ctrl
->dma_irq
, ret
);
2511 dev_info(dev
, "enabling FLASH_DMA\n");
2514 /* Disable automatic device ID config, direct addressing */
2515 brcmnand_rmw_reg(ctrl
, BRCMNAND_CS_SELECT
,
2516 CS_SELECT_AUTO_DEVICE_ID_CFG
| 0xff, 0, 0);
2517 /* Disable XOR addressing */
2518 brcmnand_rmw_reg(ctrl
, BRCMNAND_CS_XOR
, 0xff, 0, 0);
2520 if (ctrl
->features
& BRCMNAND_HAS_WP
) {
2521 /* Permanently disable write protection */
2523 brcmnand_set_wp(ctrl
, false);
2529 ctrl
->irq
= platform_get_irq(pdev
, 0);
2530 if ((int)ctrl
->irq
< 0) {
2531 dev_err(dev
, "no IRQ defined\n");
2537 * Some SoCs integrate this controller (e.g., its interrupt bits) in
2543 ret
= devm_request_irq(dev
, ctrl
->irq
, brcmnand_irq
, 0,
2546 /* Enable interrupt */
2547 ctrl
->soc
->ctlrdy_ack(ctrl
->soc
);
2548 ctrl
->soc
->ctlrdy_set_enabled(ctrl
->soc
, true);
2550 /* Use standard interrupt infrastructure */
2551 ret
= devm_request_irq(dev
, ctrl
->irq
, brcmnand_ctlrdy_irq
, 0,
2555 dev_err(dev
, "can't allocate IRQ %d: error %d\n",
2560 for_each_available_child_of_node(dn
, child
) {
2561 if (of_device_is_compatible(child
, "brcm,nandcs")) {
2562 struct brcmnand_host
*host
;
2564 host
= devm_kzalloc(dev
, sizeof(*host
), GFP_KERNEL
);
2573 ret
= brcmnand_init_cs(host
, child
);
2575 devm_kfree(dev
, host
);
2576 continue; /* Try all chip-selects */
2579 list_add_tail(&host
->node
, &ctrl
->host_list
);
2583 /* No chip-selects could initialize properly */
2584 if (list_empty(&ctrl
->host_list
)) {
2592 clk_disable_unprepare(ctrl
->clk
);
2596 EXPORT_SYMBOL_GPL(brcmnand_probe
);
2598 int brcmnand_remove(struct platform_device
*pdev
)
2600 struct brcmnand_controller
*ctrl
= dev_get_drvdata(&pdev
->dev
);
2601 struct brcmnand_host
*host
;
2603 list_for_each_entry(host
, &ctrl
->host_list
, node
)
2604 nand_release(nand_to_mtd(&host
->chip
));
2606 clk_disable_unprepare(ctrl
->clk
);
2608 dev_set_drvdata(&pdev
->dev
, NULL
);
2612 EXPORT_SYMBOL_GPL(brcmnand_remove
);
2614 MODULE_LICENSE("GPL v2");
2615 MODULE_AUTHOR("Kevin Cernekee");
2616 MODULE_AUTHOR("Brian Norris");
2617 MODULE_DESCRIPTION("NAND driver for Broadcom chips");
2618 MODULE_ALIAS("platform:brcmnand");