2 * Copyright © 2010-2015 Broadcom Corporation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/clk.h>
15 #include <linux/version.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/platform_device.h>
21 #include <linux/err.h>
22 #include <linux/completion.h>
23 #include <linux/interrupt.h>
24 #include <linux/spinlock.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/ioport.h>
27 #include <linux/bug.h>
28 #include <linux/kernel.h>
29 #include <linux/bitops.h>
31 #include <linux/mtd/mtd.h>
32 #include <linux/mtd/nand.h>
33 #include <linux/mtd/partitions.h>
35 #include <linux/of_mtd.h>
36 #include <linux/of_platform.h>
37 #include <linux/slab.h>
38 #include <linux/list.h>
39 #include <linux/log2.h>
44 * This flag controls if WP stays on between erase/write commands to mitigate
45 * flash corruption due to power glitches. Values:
46 * 0: NAND_WP is not used or not available
47 * 1: NAND_WP is set by default, cleared for erase/write operations
48 * 2: NAND_WP is always cleared
51 module_param(wp_on
, int, 0444);
53 /***********************************************************************
55 ***********************************************************************/
57 #define DRV_NAME "brcmnand"
60 #define CMD_PAGE_READ 0x01
61 #define CMD_SPARE_AREA_READ 0x02
62 #define CMD_STATUS_READ 0x03
63 #define CMD_PROGRAM_PAGE 0x04
64 #define CMD_PROGRAM_SPARE_AREA 0x05
65 #define CMD_COPY_BACK 0x06
66 #define CMD_DEVICE_ID_READ 0x07
67 #define CMD_BLOCK_ERASE 0x08
68 #define CMD_FLASH_RESET 0x09
69 #define CMD_BLOCKS_LOCK 0x0a
70 #define CMD_BLOCKS_LOCK_DOWN 0x0b
71 #define CMD_BLOCKS_UNLOCK 0x0c
72 #define CMD_READ_BLOCKS_LOCK_STATUS 0x0d
73 #define CMD_PARAMETER_READ 0x0e
74 #define CMD_PARAMETER_CHANGE_COL 0x0f
75 #define CMD_LOW_LEVEL_OP 0x10
77 struct brcm_nand_dma_desc
{
92 /* Bitfields for brcm_nand_dma_desc::status_valid */
93 #define FLASH_DMA_ECC_ERROR (1 << 8)
94 #define FLASH_DMA_CORR_ERROR (1 << 9)
96 /* 512B flash cache in the NAND controller HW */
99 #define FC_WORDS (FC_BYTES >> 2)
101 #define BRCMNAND_MIN_PAGESIZE 512
102 #define BRCMNAND_MIN_BLOCKSIZE (8 * 1024)
103 #define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
105 /* Controller feature flags */
107 BRCMNAND_HAS_1K_SECTORS
= BIT(0),
108 BRCMNAND_HAS_PREFETCH
= BIT(1),
109 BRCMNAND_HAS_CACHE_MODE
= BIT(2),
110 BRCMNAND_HAS_WP
= BIT(3),
113 struct brcmnand_controller
{
115 struct nand_hw_control controller
;
116 void __iomem
*nand_base
;
117 void __iomem
*nand_fc
; /* flash cache */
118 void __iomem
*flash_dma_base
;
120 unsigned int dma_irq
;
123 /* Some SoCs provide custom interrupt status register(s) */
124 struct brcmnand_soc
*soc
;
126 /* Some SoCs have a gateable clock for the controller */
131 struct completion done
;
132 struct completion dma_done
;
134 /* List of NAND hosts (one for each chip-select) */
135 struct list_head host_list
;
137 struct brcm_nand_dma_desc
*dma_desc
;
140 /* in-memory cache of the FLASH_CACHE, used only for some commands */
141 u8 flash_cache
[FC_BYTES
];
143 /* Controller revision details */
144 const u16
*reg_offsets
;
145 unsigned int reg_spacing
; /* between CS1, CS2, ... regs */
146 const u8
*cs_offsets
; /* within each chip-select */
147 const u8
*cs0_offsets
; /* within CS0, if different */
148 unsigned int max_block_size
;
149 const unsigned int *block_sizes
;
150 unsigned int max_page_size
;
151 const unsigned int *page_sizes
;
152 unsigned int max_oob
;
155 /* for low-power standby/resume only */
156 u32 nand_cs_nand_select
;
157 u32 nand_cs_nand_xor
;
158 u32 corr_stat_threshold
;
162 struct brcmnand_cfg
{
164 unsigned int block_size
;
165 unsigned int page_size
;
166 unsigned int spare_area_size
;
167 unsigned int device_width
;
168 unsigned int col_adr_bytes
;
169 unsigned int blk_adr_bytes
;
170 unsigned int ful_adr_bytes
;
171 unsigned int sector_size_1k
;
172 unsigned int ecc_level
;
173 /* use for low-power standby/resume only */
181 struct brcmnand_host
{
182 struct list_head node
;
184 struct nand_chip chip
;
185 struct platform_device
*pdev
;
188 unsigned int last_cmd
;
189 unsigned int last_byte
;
191 struct brcmnand_cfg hwcfg
;
192 struct brcmnand_controller
*ctrl
;
196 BRCMNAND_CMD_START
= 0,
197 BRCMNAND_CMD_EXT_ADDRESS
,
198 BRCMNAND_CMD_ADDRESS
,
199 BRCMNAND_INTFC_STATUS
,
204 BRCMNAND_CS1_BASE
, /* CS1 regs, if non-contiguous */
205 BRCMNAND_CORR_THRESHOLD
,
206 BRCMNAND_CORR_THRESHOLD_EXT
,
207 BRCMNAND_UNCORR_COUNT
,
209 BRCMNAND_CORR_EXT_ADDR
,
211 BRCMNAND_UNCORR_EXT_ADDR
,
212 BRCMNAND_UNCORR_ADDR
,
217 BRCMNAND_OOB_READ_BASE
,
218 BRCMNAND_OOB_READ_10_BASE
, /* offset 0x10, if non-contiguous */
219 BRCMNAND_OOB_WRITE_BASE
,
220 BRCMNAND_OOB_WRITE_10_BASE
, /* offset 0x10, if non-contiguous */
225 static const u16 brcmnand_regs_v40
[] = {
226 [BRCMNAND_CMD_START
] = 0x04,
227 [BRCMNAND_CMD_EXT_ADDRESS
] = 0x08,
228 [BRCMNAND_CMD_ADDRESS
] = 0x0c,
229 [BRCMNAND_INTFC_STATUS
] = 0x6c,
230 [BRCMNAND_CS_SELECT
] = 0x14,
231 [BRCMNAND_CS_XOR
] = 0x18,
232 [BRCMNAND_LL_OP
] = 0x178,
233 [BRCMNAND_CS0_BASE
] = 0x40,
234 [BRCMNAND_CS1_BASE
] = 0xd0,
235 [BRCMNAND_CORR_THRESHOLD
] = 0x84,
236 [BRCMNAND_CORR_THRESHOLD_EXT
] = 0,
237 [BRCMNAND_UNCORR_COUNT
] = 0,
238 [BRCMNAND_CORR_COUNT
] = 0,
239 [BRCMNAND_CORR_EXT_ADDR
] = 0x70,
240 [BRCMNAND_CORR_ADDR
] = 0x74,
241 [BRCMNAND_UNCORR_EXT_ADDR
] = 0x78,
242 [BRCMNAND_UNCORR_ADDR
] = 0x7c,
243 [BRCMNAND_SEMAPHORE
] = 0x58,
244 [BRCMNAND_ID
] = 0x60,
245 [BRCMNAND_ID_EXT
] = 0x64,
246 [BRCMNAND_LL_RDATA
] = 0x17c,
247 [BRCMNAND_OOB_READ_BASE
] = 0x20,
248 [BRCMNAND_OOB_READ_10_BASE
] = 0x130,
249 [BRCMNAND_OOB_WRITE_BASE
] = 0x30,
250 [BRCMNAND_OOB_WRITE_10_BASE
] = 0,
251 [BRCMNAND_FC_BASE
] = 0x200,
255 static const u16 brcmnand_regs_v50
[] = {
256 [BRCMNAND_CMD_START
] = 0x04,
257 [BRCMNAND_CMD_EXT_ADDRESS
] = 0x08,
258 [BRCMNAND_CMD_ADDRESS
] = 0x0c,
259 [BRCMNAND_INTFC_STATUS
] = 0x6c,
260 [BRCMNAND_CS_SELECT
] = 0x14,
261 [BRCMNAND_CS_XOR
] = 0x18,
262 [BRCMNAND_LL_OP
] = 0x178,
263 [BRCMNAND_CS0_BASE
] = 0x40,
264 [BRCMNAND_CS1_BASE
] = 0xd0,
265 [BRCMNAND_CORR_THRESHOLD
] = 0x84,
266 [BRCMNAND_CORR_THRESHOLD_EXT
] = 0,
267 [BRCMNAND_UNCORR_COUNT
] = 0,
268 [BRCMNAND_CORR_COUNT
] = 0,
269 [BRCMNAND_CORR_EXT_ADDR
] = 0x70,
270 [BRCMNAND_CORR_ADDR
] = 0x74,
271 [BRCMNAND_UNCORR_EXT_ADDR
] = 0x78,
272 [BRCMNAND_UNCORR_ADDR
] = 0x7c,
273 [BRCMNAND_SEMAPHORE
] = 0x58,
274 [BRCMNAND_ID
] = 0x60,
275 [BRCMNAND_ID_EXT
] = 0x64,
276 [BRCMNAND_LL_RDATA
] = 0x17c,
277 [BRCMNAND_OOB_READ_BASE
] = 0x20,
278 [BRCMNAND_OOB_READ_10_BASE
] = 0x130,
279 [BRCMNAND_OOB_WRITE_BASE
] = 0x30,
280 [BRCMNAND_OOB_WRITE_10_BASE
] = 0x140,
281 [BRCMNAND_FC_BASE
] = 0x200,
284 /* BRCMNAND v6.0 - v7.1 */
285 static const u16 brcmnand_regs_v60
[] = {
286 [BRCMNAND_CMD_START
] = 0x04,
287 [BRCMNAND_CMD_EXT_ADDRESS
] = 0x08,
288 [BRCMNAND_CMD_ADDRESS
] = 0x0c,
289 [BRCMNAND_INTFC_STATUS
] = 0x14,
290 [BRCMNAND_CS_SELECT
] = 0x18,
291 [BRCMNAND_CS_XOR
] = 0x1c,
292 [BRCMNAND_LL_OP
] = 0x20,
293 [BRCMNAND_CS0_BASE
] = 0x50,
294 [BRCMNAND_CS1_BASE
] = 0,
295 [BRCMNAND_CORR_THRESHOLD
] = 0xc0,
296 [BRCMNAND_CORR_THRESHOLD_EXT
] = 0xc4,
297 [BRCMNAND_UNCORR_COUNT
] = 0xfc,
298 [BRCMNAND_CORR_COUNT
] = 0x100,
299 [BRCMNAND_CORR_EXT_ADDR
] = 0x10c,
300 [BRCMNAND_CORR_ADDR
] = 0x110,
301 [BRCMNAND_UNCORR_EXT_ADDR
] = 0x114,
302 [BRCMNAND_UNCORR_ADDR
] = 0x118,
303 [BRCMNAND_SEMAPHORE
] = 0x150,
304 [BRCMNAND_ID
] = 0x194,
305 [BRCMNAND_ID_EXT
] = 0x198,
306 [BRCMNAND_LL_RDATA
] = 0x19c,
307 [BRCMNAND_OOB_READ_BASE
] = 0x200,
308 [BRCMNAND_OOB_READ_10_BASE
] = 0,
309 [BRCMNAND_OOB_WRITE_BASE
] = 0x280,
310 [BRCMNAND_OOB_WRITE_10_BASE
] = 0,
311 [BRCMNAND_FC_BASE
] = 0x400,
315 static const u16 brcmnand_regs_v71
[] = {
316 [BRCMNAND_CMD_START
] = 0x04,
317 [BRCMNAND_CMD_EXT_ADDRESS
] = 0x08,
318 [BRCMNAND_CMD_ADDRESS
] = 0x0c,
319 [BRCMNAND_INTFC_STATUS
] = 0x14,
320 [BRCMNAND_CS_SELECT
] = 0x18,
321 [BRCMNAND_CS_XOR
] = 0x1c,
322 [BRCMNAND_LL_OP
] = 0x20,
323 [BRCMNAND_CS0_BASE
] = 0x50,
324 [BRCMNAND_CS1_BASE
] = 0,
325 [BRCMNAND_CORR_THRESHOLD
] = 0xdc,
326 [BRCMNAND_CORR_THRESHOLD_EXT
] = 0xe0,
327 [BRCMNAND_UNCORR_COUNT
] = 0xfc,
328 [BRCMNAND_CORR_COUNT
] = 0x100,
329 [BRCMNAND_CORR_EXT_ADDR
] = 0x10c,
330 [BRCMNAND_CORR_ADDR
] = 0x110,
331 [BRCMNAND_UNCORR_EXT_ADDR
] = 0x114,
332 [BRCMNAND_UNCORR_ADDR
] = 0x118,
333 [BRCMNAND_SEMAPHORE
] = 0x150,
334 [BRCMNAND_ID
] = 0x194,
335 [BRCMNAND_ID_EXT
] = 0x198,
336 [BRCMNAND_LL_RDATA
] = 0x19c,
337 [BRCMNAND_OOB_READ_BASE
] = 0x200,
338 [BRCMNAND_OOB_READ_10_BASE
] = 0,
339 [BRCMNAND_OOB_WRITE_BASE
] = 0x280,
340 [BRCMNAND_OOB_WRITE_10_BASE
] = 0,
341 [BRCMNAND_FC_BASE
] = 0x400,
344 enum brcmnand_cs_reg
{
345 BRCMNAND_CS_CFG_EXT
= 0,
347 BRCMNAND_CS_ACC_CONTROL
,
352 /* Per chip-select offsets for v7.1 */
353 static const u8 brcmnand_cs_offsets_v71
[] = {
354 [BRCMNAND_CS_ACC_CONTROL
] = 0x00,
355 [BRCMNAND_CS_CFG_EXT
] = 0x04,
356 [BRCMNAND_CS_CFG
] = 0x08,
357 [BRCMNAND_CS_TIMING1
] = 0x0c,
358 [BRCMNAND_CS_TIMING2
] = 0x10,
361 /* Per chip-select offsets for pre v7.1, except CS0 on <= v5.0 */
362 static const u8 brcmnand_cs_offsets
[] = {
363 [BRCMNAND_CS_ACC_CONTROL
] = 0x00,
364 [BRCMNAND_CS_CFG_EXT
] = 0x04,
365 [BRCMNAND_CS_CFG
] = 0x04,
366 [BRCMNAND_CS_TIMING1
] = 0x08,
367 [BRCMNAND_CS_TIMING2
] = 0x0c,
370 /* Per chip-select offset for <= v5.0 on CS0 only */
371 static const u8 brcmnand_cs_offsets_cs0
[] = {
372 [BRCMNAND_CS_ACC_CONTROL
] = 0x00,
373 [BRCMNAND_CS_CFG_EXT
] = 0x08,
374 [BRCMNAND_CS_CFG
] = 0x08,
375 [BRCMNAND_CS_TIMING1
] = 0x10,
376 [BRCMNAND_CS_TIMING2
] = 0x14,
380 * Bitfields for the CFG and CFG_EXT registers. Pre-v7.1 controllers only had
381 * one config register, but once the bitfields overflowed, newer controllers
382 * (v7.1 and newer) added a CFG_EXT register and shuffled a few fields around.
385 CFG_BLK_ADR_BYTES_SHIFT
= 8,
386 CFG_COL_ADR_BYTES_SHIFT
= 12,
387 CFG_FUL_ADR_BYTES_SHIFT
= 16,
388 CFG_BUS_WIDTH_SHIFT
= 23,
389 CFG_BUS_WIDTH
= BIT(CFG_BUS_WIDTH_SHIFT
),
390 CFG_DEVICE_SIZE_SHIFT
= 24,
392 /* Only for pre-v7.1 (with no CFG_EXT register) */
393 CFG_PAGE_SIZE_SHIFT
= 20,
394 CFG_BLK_SIZE_SHIFT
= 28,
396 /* Only for v7.1+ (with CFG_EXT register) */
397 CFG_EXT_PAGE_SIZE_SHIFT
= 0,
398 CFG_EXT_BLK_SIZE_SHIFT
= 4,
401 /* BRCMNAND_INTFC_STATUS */
403 INTFC_FLASH_STATUS
= GENMASK(7, 0),
405 INTFC_ERASED
= BIT(27),
406 INTFC_OOB_VALID
= BIT(28),
407 INTFC_CACHE_VALID
= BIT(29),
408 INTFC_FLASH_READY
= BIT(30),
409 INTFC_CTLR_READY
= BIT(31),
412 static inline u32
nand_readreg(struct brcmnand_controller
*ctrl
, u32 offs
)
414 return brcmnand_readl(ctrl
->nand_base
+ offs
);
417 static inline void nand_writereg(struct brcmnand_controller
*ctrl
, u32 offs
,
420 brcmnand_writel(val
, ctrl
->nand_base
+ offs
);
423 static int brcmnand_revision_init(struct brcmnand_controller
*ctrl
)
425 static const unsigned int block_sizes_v6
[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 };
426 static const unsigned int block_sizes_v4
[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 };
427 static const unsigned int page_sizes
[] = { 512, 2048, 4096, 8192, 0 };
429 ctrl
->nand_version
= nand_readreg(ctrl
, 0) & 0xffff;
431 /* Only support v4.0+? */
432 if (ctrl
->nand_version
< 0x0400) {
433 dev_err(ctrl
->dev
, "version %#x not supported\n",
438 /* Register offsets */
439 if (ctrl
->nand_version
>= 0x0701)
440 ctrl
->reg_offsets
= brcmnand_regs_v71
;
441 else if (ctrl
->nand_version
>= 0x0600)
442 ctrl
->reg_offsets
= brcmnand_regs_v60
;
443 else if (ctrl
->nand_version
>= 0x0500)
444 ctrl
->reg_offsets
= brcmnand_regs_v50
;
445 else if (ctrl
->nand_version
>= 0x0400)
446 ctrl
->reg_offsets
= brcmnand_regs_v40
;
448 /* Chip-select stride */
449 if (ctrl
->nand_version
>= 0x0701)
450 ctrl
->reg_spacing
= 0x14;
452 ctrl
->reg_spacing
= 0x10;
454 /* Per chip-select registers */
455 if (ctrl
->nand_version
>= 0x0701) {
456 ctrl
->cs_offsets
= brcmnand_cs_offsets_v71
;
458 ctrl
->cs_offsets
= brcmnand_cs_offsets
;
460 /* v5.0 and earlier has a different CS0 offset layout */
461 if (ctrl
->nand_version
<= 0x0500)
462 ctrl
->cs0_offsets
= brcmnand_cs_offsets_cs0
;
465 /* Page / block sizes */
466 if (ctrl
->nand_version
>= 0x0701) {
467 /* >= v7.1 use nice power-of-2 values! */
468 ctrl
->max_page_size
= 16 * 1024;
469 ctrl
->max_block_size
= 2 * 1024 * 1024;
471 ctrl
->page_sizes
= page_sizes
;
472 if (ctrl
->nand_version
>= 0x0600)
473 ctrl
->block_sizes
= block_sizes_v6
;
475 ctrl
->block_sizes
= block_sizes_v4
;
477 if (ctrl
->nand_version
< 0x0400) {
478 ctrl
->max_page_size
= 4096;
479 ctrl
->max_block_size
= 512 * 1024;
483 /* Maximum spare area sector size (per 512B) */
484 if (ctrl
->nand_version
>= 0x0600)
486 else if (ctrl
->nand_version
>= 0x0500)
491 /* v6.0 and newer (except v6.1) have prefetch support */
492 if (ctrl
->nand_version
>= 0x0600 && ctrl
->nand_version
!= 0x0601)
493 ctrl
->features
|= BRCMNAND_HAS_PREFETCH
;
496 * v6.x has cache mode, but it's implemented differently. Ignore it for
499 if (ctrl
->nand_version
>= 0x0700)
500 ctrl
->features
|= BRCMNAND_HAS_CACHE_MODE
;
502 if (ctrl
->nand_version
>= 0x0500)
503 ctrl
->features
|= BRCMNAND_HAS_1K_SECTORS
;
505 if (ctrl
->nand_version
>= 0x0700)
506 ctrl
->features
|= BRCMNAND_HAS_WP
;
507 else if (of_property_read_bool(ctrl
->dev
->of_node
, "brcm,nand-has-wp"))
508 ctrl
->features
|= BRCMNAND_HAS_WP
;
513 static inline u32
brcmnand_read_reg(struct brcmnand_controller
*ctrl
,
514 enum brcmnand_reg reg
)
516 u16 offs
= ctrl
->reg_offsets
[reg
];
519 return nand_readreg(ctrl
, offs
);
524 static inline void brcmnand_write_reg(struct brcmnand_controller
*ctrl
,
525 enum brcmnand_reg reg
, u32 val
)
527 u16 offs
= ctrl
->reg_offsets
[reg
];
530 nand_writereg(ctrl
, offs
, val
);
533 static inline void brcmnand_rmw_reg(struct brcmnand_controller
*ctrl
,
534 enum brcmnand_reg reg
, u32 mask
, unsigned
537 u32 tmp
= brcmnand_read_reg(ctrl
, reg
);
541 brcmnand_write_reg(ctrl
, reg
, tmp
);
544 static inline u32
brcmnand_read_fc(struct brcmnand_controller
*ctrl
, int word
)
546 return __raw_readl(ctrl
->nand_fc
+ word
* 4);
549 static inline void brcmnand_write_fc(struct brcmnand_controller
*ctrl
,
552 __raw_writel(val
, ctrl
->nand_fc
+ word
* 4);
555 static inline u16
brcmnand_cs_offset(struct brcmnand_controller
*ctrl
, int cs
,
556 enum brcmnand_cs_reg reg
)
558 u16 offs_cs0
= ctrl
->reg_offsets
[BRCMNAND_CS0_BASE
];
559 u16 offs_cs1
= ctrl
->reg_offsets
[BRCMNAND_CS1_BASE
];
562 if (cs
== 0 && ctrl
->cs0_offsets
)
563 cs_offs
= ctrl
->cs0_offsets
[reg
];
565 cs_offs
= ctrl
->cs_offsets
[reg
];
568 return offs_cs1
+ (cs
- 1) * ctrl
->reg_spacing
+ cs_offs
;
570 return offs_cs0
+ cs
* ctrl
->reg_spacing
+ cs_offs
;
573 static inline u32
brcmnand_count_corrected(struct brcmnand_controller
*ctrl
)
575 if (ctrl
->nand_version
< 0x0600)
577 return brcmnand_read_reg(ctrl
, BRCMNAND_CORR_COUNT
);
580 static void brcmnand_wr_corr_thresh(struct brcmnand_host
*host
, u8 val
)
582 struct brcmnand_controller
*ctrl
= host
->ctrl
;
583 unsigned int shift
= 0, bits
;
584 enum brcmnand_reg reg
= BRCMNAND_CORR_THRESHOLD
;
587 if (ctrl
->nand_version
>= 0x0600)
589 else if (ctrl
->nand_version
>= 0x0500)
594 if (ctrl
->nand_version
>= 0x0600) {
596 reg
= BRCMNAND_CORR_THRESHOLD_EXT
;
597 shift
= (cs
% 5) * bits
;
599 brcmnand_rmw_reg(ctrl
, reg
, (bits
- 1) << shift
, shift
, val
);
602 static inline int brcmnand_cmd_shift(struct brcmnand_controller
*ctrl
)
604 if (ctrl
->nand_version
< 0x0700)
609 /***********************************************************************
610 * NAND ACC CONTROL bitfield
612 * Some bits have remained constant throughout hardware revision, while
613 * others have shifted around.
614 ***********************************************************************/
616 /* Constant for all versions (where supported) */
618 /* See BRCMNAND_HAS_CACHE_MODE */
619 ACC_CONTROL_CACHE_MODE
= BIT(22),
621 /* See BRCMNAND_HAS_PREFETCH */
622 ACC_CONTROL_PREFETCH
= BIT(23),
624 ACC_CONTROL_PAGE_HIT
= BIT(24),
625 ACC_CONTROL_WR_PREEMPT
= BIT(25),
626 ACC_CONTROL_PARTIAL_PAGE
= BIT(26),
627 ACC_CONTROL_RD_ERASED
= BIT(27),
628 ACC_CONTROL_FAST_PGM_RDIN
= BIT(28),
629 ACC_CONTROL_WR_ECC
= BIT(30),
630 ACC_CONTROL_RD_ECC
= BIT(31),
633 static inline u32
brcmnand_spare_area_mask(struct brcmnand_controller
*ctrl
)
635 if (ctrl
->nand_version
>= 0x0600)
636 return GENMASK(6, 0);
638 return GENMASK(5, 0);
641 #define NAND_ACC_CONTROL_ECC_SHIFT 16
643 static inline u32
brcmnand_ecc_level_mask(struct brcmnand_controller
*ctrl
)
645 u32 mask
= (ctrl
->nand_version
>= 0x0600) ? 0x1f : 0x0f;
647 return mask
<< NAND_ACC_CONTROL_ECC_SHIFT
;
650 static void brcmnand_set_ecc_enabled(struct brcmnand_host
*host
, int en
)
652 struct brcmnand_controller
*ctrl
= host
->ctrl
;
653 u16 offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_ACC_CONTROL
);
654 u32 acc_control
= nand_readreg(ctrl
, offs
);
655 u32 ecc_flags
= ACC_CONTROL_WR_ECC
| ACC_CONTROL_RD_ECC
;
658 acc_control
|= ecc_flags
; /* enable RD/WR ECC */
659 acc_control
|= host
->hwcfg
.ecc_level
660 << NAND_ACC_CONTROL_ECC_SHIFT
;
662 acc_control
&= ~ecc_flags
; /* disable RD/WR ECC */
663 acc_control
&= ~brcmnand_ecc_level_mask(ctrl
);
666 nand_writereg(ctrl
, offs
, acc_control
);
669 static inline int brcmnand_sector_1k_shift(struct brcmnand_controller
*ctrl
)
671 if (ctrl
->nand_version
>= 0x0600)
673 else if (ctrl
->nand_version
>= 0x0500)
679 static int brcmnand_get_sector_size_1k(struct brcmnand_host
*host
)
681 struct brcmnand_controller
*ctrl
= host
->ctrl
;
682 int shift
= brcmnand_sector_1k_shift(ctrl
);
683 u16 acc_control_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
684 BRCMNAND_CS_ACC_CONTROL
);
689 return (nand_readreg(ctrl
, acc_control_offs
) >> shift
) & 0x1;
692 static void brcmnand_set_sector_size_1k(struct brcmnand_host
*host
, int val
)
694 struct brcmnand_controller
*ctrl
= host
->ctrl
;
695 int shift
= brcmnand_sector_1k_shift(ctrl
);
696 u16 acc_control_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
697 BRCMNAND_CS_ACC_CONTROL
);
703 tmp
= nand_readreg(ctrl
, acc_control_offs
);
704 tmp
&= ~(1 << shift
);
705 tmp
|= (!!val
) << shift
;
706 nand_writereg(ctrl
, acc_control_offs
, tmp
);
709 /***********************************************************************
711 ***********************************************************************/
714 CS_SELECT_NAND_WP
= BIT(29),
715 CS_SELECT_AUTO_DEVICE_ID_CFG
= BIT(30),
718 static inline void brcmnand_set_wp(struct brcmnand_controller
*ctrl
, bool en
)
720 u32 val
= en
? CS_SELECT_NAND_WP
: 0;
722 brcmnand_rmw_reg(ctrl
, BRCMNAND_CS_SELECT
, CS_SELECT_NAND_WP
, 0, val
);
725 /***********************************************************************
727 ***********************************************************************/
730 FLASH_DMA_REVISION
= 0x00,
731 FLASH_DMA_FIRST_DESC
= 0x04,
732 FLASH_DMA_FIRST_DESC_EXT
= 0x08,
733 FLASH_DMA_CTRL
= 0x0c,
734 FLASH_DMA_MODE
= 0x10,
735 FLASH_DMA_STATUS
= 0x14,
736 FLASH_DMA_INTERRUPT_DESC
= 0x18,
737 FLASH_DMA_INTERRUPT_DESC_EXT
= 0x1c,
738 FLASH_DMA_ERROR_STATUS
= 0x20,
739 FLASH_DMA_CURRENT_DESC
= 0x24,
740 FLASH_DMA_CURRENT_DESC_EXT
= 0x28,
743 static inline bool has_flash_dma(struct brcmnand_controller
*ctrl
)
745 return ctrl
->flash_dma_base
;
748 static inline bool flash_dma_buf_ok(const void *buf
)
750 return buf
&& !is_vmalloc_addr(buf
) &&
751 likely(IS_ALIGNED((uintptr_t)buf
, 4));
754 static inline void flash_dma_writel(struct brcmnand_controller
*ctrl
, u8 offs
,
757 brcmnand_writel(val
, ctrl
->flash_dma_base
+ offs
);
760 static inline u32
flash_dma_readl(struct brcmnand_controller
*ctrl
, u8 offs
)
762 return brcmnand_readl(ctrl
->flash_dma_base
+ offs
);
765 /* Low-level operation types: command, address, write, or read */
766 enum brcmnand_llop_type
{
773 /***********************************************************************
774 * Internal support functions
775 ***********************************************************************/
777 static inline bool is_hamming_ecc(struct brcmnand_cfg
*cfg
)
779 return cfg
->sector_size_1k
== 0 && cfg
->spare_area_size
== 16 &&
780 cfg
->ecc_level
== 15;
784 * Returns a nand_ecclayout strucutre for the given layout/configuration.
785 * Returns NULL on failure.
787 static struct nand_ecclayout
*brcmnand_create_layout(int ecc_level
,
788 struct brcmnand_host
*host
)
790 struct brcmnand_cfg
*cfg
= &host
->hwcfg
;
792 struct nand_ecclayout
*layout
;
798 layout
= devm_kzalloc(&host
->pdev
->dev
, sizeof(*layout
), GFP_KERNEL
);
802 sectors
= cfg
->page_size
/ (512 << cfg
->sector_size_1k
);
803 sas
= cfg
->spare_area_size
<< cfg
->sector_size_1k
;
806 if (is_hamming_ecc(cfg
)) {
807 for (i
= 0, idx1
= 0, idx2
= 0; i
< sectors
; i
++) {
808 /* First sector of each page may have BBI */
810 layout
->oobfree
[idx2
].offset
= i
* sas
+ 1;
811 /* Small-page NAND use byte 6 for BBI */
812 if (cfg
->page_size
== 512)
813 layout
->oobfree
[idx2
].offset
--;
814 layout
->oobfree
[idx2
].length
= 5;
816 layout
->oobfree
[idx2
].offset
= i
* sas
;
817 layout
->oobfree
[idx2
].length
= 6;
820 layout
->eccpos
[idx1
++] = i
* sas
+ 6;
821 layout
->eccpos
[idx1
++] = i
* sas
+ 7;
822 layout
->eccpos
[idx1
++] = i
* sas
+ 8;
823 layout
->oobfree
[idx2
].offset
= i
* sas
+ 9;
824 layout
->oobfree
[idx2
].length
= 7;
826 /* Leave zero-terminated entry for OOBFREE */
827 if (idx1
>= MTD_MAX_ECCPOS_ENTRIES_LARGE
||
828 idx2
>= MTD_MAX_OOBFREE_ENTRIES_LARGE
- 1)
836 * CONTROLLER_VERSION:
837 * < v5.0: ECC_REQ = ceil(BCH_T * 13/8)
838 * >= v5.0: ECC_REQ = ceil(BCH_T * 14/8)
839 * But we will just be conservative.
841 req
= DIV_ROUND_UP(ecc_level
* 14, 8);
843 dev_err(&host
->pdev
->dev
,
844 "error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n",
849 layout
->eccbytes
= req
* sectors
;
850 for (i
= 0, idx1
= 0, idx2
= 0; i
< sectors
; i
++) {
851 for (j
= sas
- req
; j
< sas
&& idx1
<
852 MTD_MAX_ECCPOS_ENTRIES_LARGE
; j
++, idx1
++)
853 layout
->eccpos
[idx1
] = i
* sas
+ j
;
855 /* First sector of each page may have BBI */
857 if (cfg
->page_size
== 512 && (sas
- req
>= 6)) {
858 /* Small-page NAND use byte 6 for BBI */
859 layout
->oobfree
[idx2
].offset
= 0;
860 layout
->oobfree
[idx2
].length
= 5;
863 layout
->oobfree
[idx2
].offset
= 6;
864 layout
->oobfree
[idx2
].length
=
868 } else if (sas
> req
+ 1) {
869 layout
->oobfree
[idx2
].offset
= i
* sas
+ 1;
870 layout
->oobfree
[idx2
].length
= sas
- req
- 1;
873 } else if (sas
> req
) {
874 layout
->oobfree
[idx2
].offset
= i
* sas
;
875 layout
->oobfree
[idx2
].length
= sas
- req
;
878 /* Leave zero-terminated entry for OOBFREE */
879 if (idx1
>= MTD_MAX_ECCPOS_ENTRIES_LARGE
||
880 idx2
>= MTD_MAX_OOBFREE_ENTRIES_LARGE
- 1)
887 static struct nand_ecclayout
*brcmstb_choose_ecc_layout(
888 struct brcmnand_host
*host
)
890 struct nand_ecclayout
*layout
;
891 struct brcmnand_cfg
*p
= &host
->hwcfg
;
892 unsigned int ecc_level
= p
->ecc_level
;
894 if (p
->sector_size_1k
)
897 layout
= brcmnand_create_layout(ecc_level
, host
);
899 dev_err(&host
->pdev
->dev
,
900 "no proper ecc_layout for this NAND cfg\n");
907 static void brcmnand_wp(struct mtd_info
*mtd
, int wp
)
909 struct nand_chip
*chip
= mtd_to_nand(mtd
);
910 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
911 struct brcmnand_controller
*ctrl
= host
->ctrl
;
913 if ((ctrl
->features
& BRCMNAND_HAS_WP
) && wp_on
== 1) {
914 static int old_wp
= -1;
917 dev_dbg(ctrl
->dev
, "WP %s\n", wp
? "on" : "off");
920 brcmnand_set_wp(ctrl
, wp
);
924 /* Helper functions for reading and writing OOB registers */
925 static inline u8
oob_reg_read(struct brcmnand_controller
*ctrl
, u32 offs
)
927 u16 offset0
, offset10
, reg_offs
;
929 offset0
= ctrl
->reg_offsets
[BRCMNAND_OOB_READ_BASE
];
930 offset10
= ctrl
->reg_offsets
[BRCMNAND_OOB_READ_10_BASE
];
932 if (offs
>= ctrl
->max_oob
)
935 if (offs
>= 16 && offset10
)
936 reg_offs
= offset10
+ ((offs
- 0x10) & ~0x03);
938 reg_offs
= offset0
+ (offs
& ~0x03);
940 return nand_readreg(ctrl
, reg_offs
) >> (24 - ((offs
& 0x03) << 3));
943 static inline void oob_reg_write(struct brcmnand_controller
*ctrl
, u32 offs
,
946 u16 offset0
, offset10
, reg_offs
;
948 offset0
= ctrl
->reg_offsets
[BRCMNAND_OOB_WRITE_BASE
];
949 offset10
= ctrl
->reg_offsets
[BRCMNAND_OOB_WRITE_10_BASE
];
951 if (offs
>= ctrl
->max_oob
)
954 if (offs
>= 16 && offset10
)
955 reg_offs
= offset10
+ ((offs
- 0x10) & ~0x03);
957 reg_offs
= offset0
+ (offs
& ~0x03);
959 nand_writereg(ctrl
, reg_offs
, data
);
963 * read_oob_from_regs - read data from OOB registers
964 * @ctrl: NAND controller
965 * @i: sub-page sector index
966 * @oob: buffer to read to
967 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
968 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
970 static int read_oob_from_regs(struct brcmnand_controller
*ctrl
, int i
, u8
*oob
,
971 int sas
, int sector_1k
)
973 int tbytes
= sas
<< sector_1k
;
976 /* Adjust OOB values for 1K sector size */
977 if (sector_1k
&& (i
& 0x01))
978 tbytes
= max(0, tbytes
- (int)ctrl
->max_oob
);
979 tbytes
= min_t(int, tbytes
, ctrl
->max_oob
);
981 for (j
= 0; j
< tbytes
; j
++)
982 oob
[j
] = oob_reg_read(ctrl
, j
);
987 * write_oob_to_regs - write data to OOB registers
988 * @i: sub-page sector index
989 * @oob: buffer to write from
990 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
991 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
993 static int write_oob_to_regs(struct brcmnand_controller
*ctrl
, int i
,
994 const u8
*oob
, int sas
, int sector_1k
)
996 int tbytes
= sas
<< sector_1k
;
999 /* Adjust OOB values for 1K sector size */
1000 if (sector_1k
&& (i
& 0x01))
1001 tbytes
= max(0, tbytes
- (int)ctrl
->max_oob
);
1002 tbytes
= min_t(int, tbytes
, ctrl
->max_oob
);
1004 for (j
= 0; j
< tbytes
; j
+= 4)
1005 oob_reg_write(ctrl
, j
,
1006 (oob
[j
+ 0] << 24) |
1007 (oob
[j
+ 1] << 16) |
1013 static irqreturn_t
brcmnand_ctlrdy_irq(int irq
, void *data
)
1015 struct brcmnand_controller
*ctrl
= data
;
1017 /* Discard all NAND_CTLRDY interrupts during DMA */
1018 if (ctrl
->dma_pending
)
1021 complete(&ctrl
->done
);
1025 /* Handle SoC-specific interrupt hardware */
1026 static irqreturn_t
brcmnand_irq(int irq
, void *data
)
1028 struct brcmnand_controller
*ctrl
= data
;
1030 if (ctrl
->soc
->ctlrdy_ack(ctrl
->soc
))
1031 return brcmnand_ctlrdy_irq(irq
, data
);
1036 static irqreturn_t
brcmnand_dma_irq(int irq
, void *data
)
1038 struct brcmnand_controller
*ctrl
= data
;
1040 complete(&ctrl
->dma_done
);
1045 static void brcmnand_send_cmd(struct brcmnand_host
*host
, int cmd
)
1047 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1050 dev_dbg(ctrl
->dev
, "send native cmd %d addr_lo 0x%x\n", cmd
,
1051 brcmnand_read_reg(ctrl
, BRCMNAND_CMD_ADDRESS
));
1052 BUG_ON(ctrl
->cmd_pending
!= 0);
1053 ctrl
->cmd_pending
= cmd
;
1055 intfc
= brcmnand_read_reg(ctrl
, BRCMNAND_INTFC_STATUS
);
1056 BUG_ON(!(intfc
& INTFC_CTLR_READY
));
1058 mb(); /* flush previous writes */
1059 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_START
,
1060 cmd
<< brcmnand_cmd_shift(ctrl
));
1063 /***********************************************************************
1064 * NAND MTD API: read/program/erase
1065 ***********************************************************************/
1067 static void brcmnand_cmd_ctrl(struct mtd_info
*mtd
, int dat
,
1070 /* intentionally left blank */
1073 static int brcmnand_waitfunc(struct mtd_info
*mtd
, struct nand_chip
*this)
1075 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1076 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1077 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1078 unsigned long timeo
= msecs_to_jiffies(100);
1080 dev_dbg(ctrl
->dev
, "wait on native cmd %d\n", ctrl
->cmd_pending
);
1081 if (ctrl
->cmd_pending
&&
1082 wait_for_completion_timeout(&ctrl
->done
, timeo
) <= 0) {
1083 u32 cmd
= brcmnand_read_reg(ctrl
, BRCMNAND_CMD_START
)
1084 >> brcmnand_cmd_shift(ctrl
);
1086 dev_err_ratelimited(ctrl
->dev
,
1087 "timeout waiting for command %#02x\n", cmd
);
1088 dev_err_ratelimited(ctrl
->dev
, "intfc status %08x\n",
1089 brcmnand_read_reg(ctrl
, BRCMNAND_INTFC_STATUS
));
1091 ctrl
->cmd_pending
= 0;
1092 return brcmnand_read_reg(ctrl
, BRCMNAND_INTFC_STATUS
) &
1101 LLOP_RETURN_IDLE
= BIT(31),
1103 LLOP_DATA_MASK
= GENMASK(15, 0),
1106 static int brcmnand_low_level_op(struct brcmnand_host
*host
,
1107 enum brcmnand_llop_type type
, u32 data
,
1110 struct mtd_info
*mtd
= nand_to_mtd(&host
->chip
);
1111 struct nand_chip
*chip
= &host
->chip
;
1112 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1115 tmp
= data
& LLOP_DATA_MASK
;
1118 tmp
|= LLOP_WE
| LLOP_CLE
;
1122 tmp
|= LLOP_WE
| LLOP_ALE
;
1135 tmp
|= LLOP_RETURN_IDLE
;
1137 dev_dbg(ctrl
->dev
, "ll_op cmd %#x\n", tmp
);
1139 brcmnand_write_reg(ctrl
, BRCMNAND_LL_OP
, tmp
);
1140 (void)brcmnand_read_reg(ctrl
, BRCMNAND_LL_OP
);
1142 brcmnand_send_cmd(host
, CMD_LOW_LEVEL_OP
);
1143 return brcmnand_waitfunc(mtd
, chip
);
1146 static void brcmnand_cmdfunc(struct mtd_info
*mtd
, unsigned command
,
1147 int column
, int page_addr
)
1149 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1150 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1151 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1152 u64 addr
= (u64
)page_addr
<< chip
->page_shift
;
1155 if (command
== NAND_CMD_READID
|| command
== NAND_CMD_PARAM
||
1156 command
== NAND_CMD_RNDOUT
)
1158 /* Avoid propagating a negative, don't-care address */
1159 else if (page_addr
< 0)
1162 dev_dbg(ctrl
->dev
, "cmd 0x%x addr 0x%llx\n", command
,
1163 (unsigned long long)addr
);
1165 host
->last_cmd
= command
;
1166 host
->last_byte
= 0;
1167 host
->last_addr
= addr
;
1170 case NAND_CMD_RESET
:
1171 native_cmd
= CMD_FLASH_RESET
;
1173 case NAND_CMD_STATUS
:
1174 native_cmd
= CMD_STATUS_READ
;
1176 case NAND_CMD_READID
:
1177 native_cmd
= CMD_DEVICE_ID_READ
;
1179 case NAND_CMD_READOOB
:
1180 native_cmd
= CMD_SPARE_AREA_READ
;
1182 case NAND_CMD_ERASE1
:
1183 native_cmd
= CMD_BLOCK_ERASE
;
1184 brcmnand_wp(mtd
, 0);
1186 case NAND_CMD_PARAM
:
1187 native_cmd
= CMD_PARAMETER_READ
;
1189 case NAND_CMD_SET_FEATURES
:
1190 case NAND_CMD_GET_FEATURES
:
1191 brcmnand_low_level_op(host
, LL_OP_CMD
, command
, false);
1192 brcmnand_low_level_op(host
, LL_OP_ADDR
, column
, false);
1194 case NAND_CMD_RNDOUT
:
1195 native_cmd
= CMD_PARAMETER_CHANGE_COL
;
1196 addr
&= ~((u64
)(FC_BYTES
- 1));
1198 * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0
1199 * NB: hwcfg.sector_size_1k may not be initialized yet
1201 if (brcmnand_get_sector_size_1k(host
)) {
1202 host
->hwcfg
.sector_size_1k
=
1203 brcmnand_get_sector_size_1k(host
);
1204 brcmnand_set_sector_size_1k(host
, 0);
1212 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
,
1213 (host
->cs
<< 16) | ((addr
>> 32) & 0xffff));
1214 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
);
1215 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_ADDRESS
, lower_32_bits(addr
));
1216 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_ADDRESS
);
1218 brcmnand_send_cmd(host
, native_cmd
);
1219 brcmnand_waitfunc(mtd
, chip
);
1221 if (native_cmd
== CMD_PARAMETER_READ
||
1222 native_cmd
== CMD_PARAMETER_CHANGE_COL
) {
1223 /* Copy flash cache word-wise */
1224 u32
*flash_cache
= (u32
*)ctrl
->flash_cache
;
1227 brcmnand_soc_data_bus_prepare(ctrl
->soc
);
1230 * Must cache the FLASH_CACHE now, since changes in
1231 * SECTOR_SIZE_1K may invalidate it
1233 for (i
= 0; i
< FC_WORDS
; i
++)
1235 * Flash cache is big endian for parameter pages, at
1238 flash_cache
[i
] = be32_to_cpu(brcmnand_read_fc(ctrl
, i
));
1240 brcmnand_soc_data_bus_unprepare(ctrl
->soc
);
1242 /* Cleanup from HW quirk: restore SECTOR_SIZE_1K */
1243 if (host
->hwcfg
.sector_size_1k
)
1244 brcmnand_set_sector_size_1k(host
,
1245 host
->hwcfg
.sector_size_1k
);
1248 /* Re-enable protection is necessary only after erase */
1249 if (command
== NAND_CMD_ERASE1
)
1250 brcmnand_wp(mtd
, 1);
1253 static uint8_t brcmnand_read_byte(struct mtd_info
*mtd
)
1255 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1256 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1257 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1261 switch (host
->last_cmd
) {
1262 case NAND_CMD_READID
:
1263 if (host
->last_byte
< 4)
1264 ret
= brcmnand_read_reg(ctrl
, BRCMNAND_ID
) >>
1265 (24 - (host
->last_byte
<< 3));
1266 else if (host
->last_byte
< 8)
1267 ret
= brcmnand_read_reg(ctrl
, BRCMNAND_ID_EXT
) >>
1268 (56 - (host
->last_byte
<< 3));
1271 case NAND_CMD_READOOB
:
1272 ret
= oob_reg_read(ctrl
, host
->last_byte
);
1275 case NAND_CMD_STATUS
:
1276 ret
= brcmnand_read_reg(ctrl
, BRCMNAND_INTFC_STATUS
) &
1278 if (wp_on
) /* hide WP status */
1279 ret
|= NAND_STATUS_WP
;
1282 case NAND_CMD_PARAM
:
1283 case NAND_CMD_RNDOUT
:
1284 addr
= host
->last_addr
+ host
->last_byte
;
1285 offs
= addr
& (FC_BYTES
- 1);
1287 /* At FC_BYTES boundary, switch to next column */
1288 if (host
->last_byte
> 0 && offs
== 0)
1289 chip
->cmdfunc(mtd
, NAND_CMD_RNDOUT
, addr
, -1);
1291 ret
= ctrl
->flash_cache
[offs
];
1293 case NAND_CMD_GET_FEATURES
:
1294 if (host
->last_byte
>= ONFI_SUBFEATURE_PARAM_LEN
) {
1297 bool last
= host
->last_byte
==
1298 ONFI_SUBFEATURE_PARAM_LEN
- 1;
1299 brcmnand_low_level_op(host
, LL_OP_RD
, 0, last
);
1300 ret
= brcmnand_read_reg(ctrl
, BRCMNAND_LL_RDATA
) & 0xff;
1304 dev_dbg(ctrl
->dev
, "read byte = 0x%02x\n", ret
);
1310 static void brcmnand_read_buf(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
1314 for (i
= 0; i
< len
; i
++, buf
++)
1315 *buf
= brcmnand_read_byte(mtd
);
1318 static void brcmnand_write_buf(struct mtd_info
*mtd
, const uint8_t *buf
,
1322 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1323 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1325 switch (host
->last_cmd
) {
1326 case NAND_CMD_SET_FEATURES
:
1327 for (i
= 0; i
< len
; i
++)
1328 brcmnand_low_level_op(host
, LL_OP_WR
, buf
[i
],
1338 * Construct a FLASH_DMA descriptor as part of a linked list. You must know the
1339 * following ahead of time:
1340 * - Is this descriptor the beginning or end of a linked list?
1341 * - What is the (DMA) address of the next descriptor in the linked list?
1343 static int brcmnand_fill_dma_desc(struct brcmnand_host
*host
,
1344 struct brcm_nand_dma_desc
*desc
, u64 addr
,
1345 dma_addr_t buf
, u32 len
, u8 dma_cmd
,
1346 bool begin
, bool end
,
1347 dma_addr_t next_desc
)
1349 memset(desc
, 0, sizeof(*desc
));
1350 /* Descriptors are written in native byte order (wordwise) */
1351 desc
->next_desc
= lower_32_bits(next_desc
);
1352 desc
->next_desc_ext
= upper_32_bits(next_desc
);
1353 desc
->cmd_irq
= (dma_cmd
<< 24) |
1354 (end
? (0x03 << 8) : 0) | /* IRQ | STOP */
1355 (!!begin
) | ((!!end
) << 1); /* head, tail */
1356 #ifdef CONFIG_CPU_BIG_ENDIAN
1357 desc
->cmd_irq
|= 0x01 << 12;
1359 desc
->dram_addr
= lower_32_bits(buf
);
1360 desc
->dram_addr_ext
= upper_32_bits(buf
);
1361 desc
->tfr_len
= len
;
1362 desc
->total_len
= len
;
1363 desc
->flash_addr
= lower_32_bits(addr
);
1364 desc
->flash_addr_ext
= upper_32_bits(addr
);
1365 desc
->cs
= host
->cs
;
1366 desc
->status_valid
= 0x01;
1371 * Kick the FLASH_DMA engine, with a given DMA descriptor
1373 static void brcmnand_dma_run(struct brcmnand_host
*host
, dma_addr_t desc
)
1375 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1376 unsigned long timeo
= msecs_to_jiffies(100);
1378 flash_dma_writel(ctrl
, FLASH_DMA_FIRST_DESC
, lower_32_bits(desc
));
1379 (void)flash_dma_readl(ctrl
, FLASH_DMA_FIRST_DESC
);
1380 flash_dma_writel(ctrl
, FLASH_DMA_FIRST_DESC_EXT
, upper_32_bits(desc
));
1381 (void)flash_dma_readl(ctrl
, FLASH_DMA_FIRST_DESC_EXT
);
1383 /* Start FLASH_DMA engine */
1384 ctrl
->dma_pending
= true;
1385 mb(); /* flush previous writes */
1386 flash_dma_writel(ctrl
, FLASH_DMA_CTRL
, 0x03); /* wake | run */
1388 if (wait_for_completion_timeout(&ctrl
->dma_done
, timeo
) <= 0) {
1390 "timeout waiting for DMA; status %#x, error status %#x\n",
1391 flash_dma_readl(ctrl
, FLASH_DMA_STATUS
),
1392 flash_dma_readl(ctrl
, FLASH_DMA_ERROR_STATUS
));
1394 ctrl
->dma_pending
= false;
1395 flash_dma_writel(ctrl
, FLASH_DMA_CTRL
, 0); /* force stop */
1398 static int brcmnand_dma_trans(struct brcmnand_host
*host
, u64 addr
, u32
*buf
,
1399 u32 len
, u8 dma_cmd
)
1401 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1403 int dir
= dma_cmd
== CMD_PAGE_READ
? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
1405 buf_pa
= dma_map_single(ctrl
->dev
, buf
, len
, dir
);
1406 if (dma_mapping_error(ctrl
->dev
, buf_pa
)) {
1407 dev_err(ctrl
->dev
, "unable to map buffer for DMA\n");
1411 brcmnand_fill_dma_desc(host
, ctrl
->dma_desc
, addr
, buf_pa
, len
,
1412 dma_cmd
, true, true, 0);
1414 brcmnand_dma_run(host
, ctrl
->dma_pa
);
1416 dma_unmap_single(ctrl
->dev
, buf_pa
, len
, dir
);
1418 if (ctrl
->dma_desc
->status_valid
& FLASH_DMA_ECC_ERROR
)
1420 else if (ctrl
->dma_desc
->status_valid
& FLASH_DMA_CORR_ERROR
)
1427 * Assumes proper CS is already set
1429 static int brcmnand_read_by_pio(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1430 u64 addr
, unsigned int trans
, u32
*buf
,
1431 u8
*oob
, u64
*err_addr
)
1433 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1434 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1437 /* Clear error addresses */
1438 brcmnand_write_reg(ctrl
, BRCMNAND_UNCORR_ADDR
, 0);
1439 brcmnand_write_reg(ctrl
, BRCMNAND_CORR_ADDR
, 0);
1440 brcmnand_write_reg(ctrl
, BRCMNAND_UNCORR_EXT_ADDR
, 0);
1441 brcmnand_write_reg(ctrl
, BRCMNAND_CORR_EXT_ADDR
, 0);
1443 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
,
1444 (host
->cs
<< 16) | ((addr
>> 32) & 0xffff));
1445 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
);
1447 for (i
= 0; i
< trans
; i
++, addr
+= FC_BYTES
) {
1448 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_ADDRESS
,
1449 lower_32_bits(addr
));
1450 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_ADDRESS
);
1451 /* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
1452 brcmnand_send_cmd(host
, CMD_PAGE_READ
);
1453 brcmnand_waitfunc(mtd
, chip
);
1456 brcmnand_soc_data_bus_prepare(ctrl
->soc
);
1458 for (j
= 0; j
< FC_WORDS
; j
++, buf
++)
1459 *buf
= brcmnand_read_fc(ctrl
, j
);
1461 brcmnand_soc_data_bus_unprepare(ctrl
->soc
);
1465 oob
+= read_oob_from_regs(ctrl
, i
, oob
,
1466 mtd
->oobsize
/ trans
,
1467 host
->hwcfg
.sector_size_1k
);
1470 *err_addr
= brcmnand_read_reg(ctrl
,
1471 BRCMNAND_UNCORR_ADDR
) |
1472 ((u64
)(brcmnand_read_reg(ctrl
,
1473 BRCMNAND_UNCORR_EXT_ADDR
)
1480 *err_addr
= brcmnand_read_reg(ctrl
,
1481 BRCMNAND_CORR_ADDR
) |
1482 ((u64
)(brcmnand_read_reg(ctrl
,
1483 BRCMNAND_CORR_EXT_ADDR
)
1493 static int brcmnand_read(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1494 u64 addr
, unsigned int trans
, u32
*buf
, u8
*oob
)
1496 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1497 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1501 dev_dbg(ctrl
->dev
, "read %llx -> %p\n", (unsigned long long)addr
, buf
);
1503 brcmnand_write_reg(ctrl
, BRCMNAND_UNCORR_COUNT
, 0);
1505 if (has_flash_dma(ctrl
) && !oob
&& flash_dma_buf_ok(buf
)) {
1506 err
= brcmnand_dma_trans(host
, addr
, buf
, trans
* FC_BYTES
,
1509 if (mtd_is_bitflip_or_eccerr(err
))
1516 memset(oob
, 0x99, mtd
->oobsize
);
1518 err
= brcmnand_read_by_pio(mtd
, chip
, addr
, trans
, buf
,
1522 if (mtd_is_eccerr(err
)) {
1523 dev_dbg(ctrl
->dev
, "uncorrectable error at 0x%llx\n",
1524 (unsigned long long)err_addr
);
1525 mtd
->ecc_stats
.failed
++;
1526 /* NAND layer expects zero on ECC errors */
1530 if (mtd_is_bitflip(err
)) {
1531 unsigned int corrected
= brcmnand_count_corrected(ctrl
);
1533 dev_dbg(ctrl
->dev
, "corrected error at 0x%llx\n",
1534 (unsigned long long)err_addr
);
1535 mtd
->ecc_stats
.corrected
+= corrected
;
1536 /* Always exceed the software-imposed threshold */
1537 return max(mtd
->bitflip_threshold
, corrected
);
1543 static int brcmnand_read_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1544 uint8_t *buf
, int oob_required
, int page
)
1546 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1547 u8
*oob
= oob_required
? (u8
*)chip
->oob_poi
: NULL
;
1549 return brcmnand_read(mtd
, chip
, host
->last_addr
,
1550 mtd
->writesize
>> FC_SHIFT
, (u32
*)buf
, oob
);
1553 static int brcmnand_read_page_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1554 uint8_t *buf
, int oob_required
, int page
)
1556 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1557 u8
*oob
= oob_required
? (u8
*)chip
->oob_poi
: NULL
;
1560 brcmnand_set_ecc_enabled(host
, 0);
1561 ret
= brcmnand_read(mtd
, chip
, host
->last_addr
,
1562 mtd
->writesize
>> FC_SHIFT
, (u32
*)buf
, oob
);
1563 brcmnand_set_ecc_enabled(host
, 1);
1567 static int brcmnand_read_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1570 return brcmnand_read(mtd
, chip
, (u64
)page
<< chip
->page_shift
,
1571 mtd
->writesize
>> FC_SHIFT
,
1572 NULL
, (u8
*)chip
->oob_poi
);
1575 static int brcmnand_read_oob_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1578 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1580 brcmnand_set_ecc_enabled(host
, 0);
1581 brcmnand_read(mtd
, chip
, (u64
)page
<< chip
->page_shift
,
1582 mtd
->writesize
>> FC_SHIFT
,
1583 NULL
, (u8
*)chip
->oob_poi
);
1584 brcmnand_set_ecc_enabled(host
, 1);
1588 static int brcmnand_write(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1589 u64 addr
, const u32
*buf
, u8
*oob
)
1591 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1592 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1593 unsigned int i
, j
, trans
= mtd
->writesize
>> FC_SHIFT
;
1594 int status
, ret
= 0;
1596 dev_dbg(ctrl
->dev
, "write %llx <- %p\n", (unsigned long long)addr
, buf
);
1598 if (unlikely((unsigned long)buf
& 0x03)) {
1599 dev_warn(ctrl
->dev
, "unaligned buffer: %p\n", buf
);
1600 buf
= (u32
*)((unsigned long)buf
& ~0x03);
1603 brcmnand_wp(mtd
, 0);
1605 for (i
= 0; i
< ctrl
->max_oob
; i
+= 4)
1606 oob_reg_write(ctrl
, i
, 0xffffffff);
1608 if (has_flash_dma(ctrl
) && !oob
&& flash_dma_buf_ok(buf
)) {
1609 if (brcmnand_dma_trans(host
, addr
, (u32
*)buf
,
1610 mtd
->writesize
, CMD_PROGRAM_PAGE
))
1615 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
,
1616 (host
->cs
<< 16) | ((addr
>> 32) & 0xffff));
1617 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
);
1619 for (i
= 0; i
< trans
; i
++, addr
+= FC_BYTES
) {
1620 /* full address MUST be set before populating FC */
1621 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_ADDRESS
,
1622 lower_32_bits(addr
));
1623 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_ADDRESS
);
1626 brcmnand_soc_data_bus_prepare(ctrl
->soc
);
1628 for (j
= 0; j
< FC_WORDS
; j
++, buf
++)
1629 brcmnand_write_fc(ctrl
, j
, *buf
);
1631 brcmnand_soc_data_bus_unprepare(ctrl
->soc
);
1633 for (j
= 0; j
< FC_WORDS
; j
++)
1634 brcmnand_write_fc(ctrl
, j
, 0xffffffff);
1638 oob
+= write_oob_to_regs(ctrl
, i
, oob
,
1639 mtd
->oobsize
/ trans
,
1640 host
->hwcfg
.sector_size_1k
);
1643 /* we cannot use SPARE_AREA_PROGRAM when PARTIAL_PAGE_EN=0 */
1644 brcmnand_send_cmd(host
, CMD_PROGRAM_PAGE
);
1645 status
= brcmnand_waitfunc(mtd
, chip
);
1647 if (status
& NAND_STATUS_FAIL
) {
1648 dev_info(ctrl
->dev
, "program failed at %llx\n",
1649 (unsigned long long)addr
);
1655 brcmnand_wp(mtd
, 1);
1659 static int brcmnand_write_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1660 const uint8_t *buf
, int oob_required
, int page
)
1662 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1663 void *oob
= oob_required
? chip
->oob_poi
: NULL
;
1665 brcmnand_write(mtd
, chip
, host
->last_addr
, (const u32
*)buf
, oob
);
1669 static int brcmnand_write_page_raw(struct mtd_info
*mtd
,
1670 struct nand_chip
*chip
, const uint8_t *buf
,
1671 int oob_required
, int page
)
1673 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1674 void *oob
= oob_required
? chip
->oob_poi
: NULL
;
1676 brcmnand_set_ecc_enabled(host
, 0);
1677 brcmnand_write(mtd
, chip
, host
->last_addr
, (const u32
*)buf
, oob
);
1678 brcmnand_set_ecc_enabled(host
, 1);
1682 static int brcmnand_write_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1685 return brcmnand_write(mtd
, chip
, (u64
)page
<< chip
->page_shift
,
1686 NULL
, chip
->oob_poi
);
1689 static int brcmnand_write_oob_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1692 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1695 brcmnand_set_ecc_enabled(host
, 0);
1696 ret
= brcmnand_write(mtd
, chip
, (u64
)page
<< chip
->page_shift
, NULL
,
1697 (u8
*)chip
->oob_poi
);
1698 brcmnand_set_ecc_enabled(host
, 1);
1703 /***********************************************************************
1704 * Per-CS setup (1 NAND device)
1705 ***********************************************************************/
1707 static int brcmnand_set_cfg(struct brcmnand_host
*host
,
1708 struct brcmnand_cfg
*cfg
)
1710 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1711 struct nand_chip
*chip
= &host
->chip
;
1712 u16 cfg_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_CFG
);
1713 u16 cfg_ext_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
1714 BRCMNAND_CS_CFG_EXT
);
1715 u16 acc_control_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
1716 BRCMNAND_CS_ACC_CONTROL
);
1717 u8 block_size
= 0, page_size
= 0, device_size
= 0;
1720 if (ctrl
->block_sizes
) {
1723 for (i
= 0, found
= 0; ctrl
->block_sizes
[i
]; i
++)
1724 if (ctrl
->block_sizes
[i
] * 1024 == cfg
->block_size
) {
1729 dev_warn(ctrl
->dev
, "invalid block size %u\n",
1734 block_size
= ffs(cfg
->block_size
) - ffs(BRCMNAND_MIN_BLOCKSIZE
);
1737 if (cfg
->block_size
< BRCMNAND_MIN_BLOCKSIZE
|| (ctrl
->max_block_size
&&
1738 cfg
->block_size
> ctrl
->max_block_size
)) {
1739 dev_warn(ctrl
->dev
, "invalid block size %u\n",
1744 if (ctrl
->page_sizes
) {
1747 for (i
= 0, found
= 0; ctrl
->page_sizes
[i
]; i
++)
1748 if (ctrl
->page_sizes
[i
] == cfg
->page_size
) {
1753 dev_warn(ctrl
->dev
, "invalid page size %u\n",
1758 page_size
= ffs(cfg
->page_size
) - ffs(BRCMNAND_MIN_PAGESIZE
);
1761 if (cfg
->page_size
< BRCMNAND_MIN_PAGESIZE
|| (ctrl
->max_page_size
&&
1762 cfg
->page_size
> ctrl
->max_page_size
)) {
1763 dev_warn(ctrl
->dev
, "invalid page size %u\n", cfg
->page_size
);
1767 if (fls64(cfg
->device_size
) < fls64(BRCMNAND_MIN_DEVSIZE
)) {
1768 dev_warn(ctrl
->dev
, "invalid device size 0x%llx\n",
1769 (unsigned long long)cfg
->device_size
);
1772 device_size
= fls64(cfg
->device_size
) - fls64(BRCMNAND_MIN_DEVSIZE
);
1774 tmp
= (cfg
->blk_adr_bytes
<< CFG_BLK_ADR_BYTES_SHIFT
) |
1775 (cfg
->col_adr_bytes
<< CFG_COL_ADR_BYTES_SHIFT
) |
1776 (cfg
->ful_adr_bytes
<< CFG_FUL_ADR_BYTES_SHIFT
) |
1777 (!!(cfg
->device_width
== 16) << CFG_BUS_WIDTH_SHIFT
) |
1778 (device_size
<< CFG_DEVICE_SIZE_SHIFT
);
1779 if (cfg_offs
== cfg_ext_offs
) {
1780 tmp
|= (page_size
<< CFG_PAGE_SIZE_SHIFT
) |
1781 (block_size
<< CFG_BLK_SIZE_SHIFT
);
1782 nand_writereg(ctrl
, cfg_offs
, tmp
);
1784 nand_writereg(ctrl
, cfg_offs
, tmp
);
1785 tmp
= (page_size
<< CFG_EXT_PAGE_SIZE_SHIFT
) |
1786 (block_size
<< CFG_EXT_BLK_SIZE_SHIFT
);
1787 nand_writereg(ctrl
, cfg_ext_offs
, tmp
);
1790 tmp
= nand_readreg(ctrl
, acc_control_offs
);
1791 tmp
&= ~brcmnand_ecc_level_mask(ctrl
);
1792 tmp
|= cfg
->ecc_level
<< NAND_ACC_CONTROL_ECC_SHIFT
;
1793 tmp
&= ~brcmnand_spare_area_mask(ctrl
);
1794 tmp
|= cfg
->spare_area_size
;
1795 nand_writereg(ctrl
, acc_control_offs
, tmp
);
1797 brcmnand_set_sector_size_1k(host
, cfg
->sector_size_1k
);
1799 /* threshold = ceil(BCH-level * 0.75) */
1800 brcmnand_wr_corr_thresh(host
, DIV_ROUND_UP(chip
->ecc
.strength
* 3, 4));
1805 static void brcmnand_print_cfg(char *buf
, struct brcmnand_cfg
*cfg
)
1808 "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit",
1809 (unsigned long long)cfg
->device_size
>> 20,
1810 cfg
->block_size
>> 10,
1811 cfg
->page_size
>= 1024 ? cfg
->page_size
>> 10 : cfg
->page_size
,
1812 cfg
->page_size
>= 1024 ? "KiB" : "B",
1813 cfg
->spare_area_size
, cfg
->device_width
);
1815 /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */
1816 if (is_hamming_ecc(cfg
))
1817 sprintf(buf
, ", Hamming ECC");
1818 else if (cfg
->sector_size_1k
)
1819 sprintf(buf
, ", BCH-%u (1KiB sector)", cfg
->ecc_level
<< 1);
1821 sprintf(buf
, ", BCH-%u", cfg
->ecc_level
);
1825 * Minimum number of bytes to address a page. Calculated as:
1826 * roundup(log2(size / page-size) / 8)
1828 * NB: the following does not "round up" for non-power-of-2 'size'; but this is
1829 * OK because many other things will break if 'size' is irregular...
1831 static inline int get_blk_adr_bytes(u64 size
, u32 writesize
)
1833 return ALIGN(ilog2(size
) - ilog2(writesize
), 8) >> 3;
1836 static int brcmnand_setup_dev(struct brcmnand_host
*host
)
1838 struct mtd_info
*mtd
= nand_to_mtd(&host
->chip
);
1839 struct nand_chip
*chip
= &host
->chip
;
1840 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1841 struct brcmnand_cfg
*cfg
= &host
->hwcfg
;
1843 u32 offs
, tmp
, oob_sector
;
1846 memset(cfg
, 0, sizeof(*cfg
));
1848 ret
= of_property_read_u32(nand_get_flash_node(chip
),
1849 "brcm,nand-oob-sector-size",
1852 /* Use detected size */
1853 cfg
->spare_area_size
= mtd
->oobsize
/
1854 (mtd
->writesize
>> FC_SHIFT
);
1856 cfg
->spare_area_size
= oob_sector
;
1858 if (cfg
->spare_area_size
> ctrl
->max_oob
)
1859 cfg
->spare_area_size
= ctrl
->max_oob
;
1861 * Set oobsize to be consistent with controller's spare_area_size, as
1862 * the rest is inaccessible.
1864 mtd
->oobsize
= cfg
->spare_area_size
* (mtd
->writesize
>> FC_SHIFT
);
1866 cfg
->device_size
= mtd
->size
;
1867 cfg
->block_size
= mtd
->erasesize
;
1868 cfg
->page_size
= mtd
->writesize
;
1869 cfg
->device_width
= (chip
->options
& NAND_BUSWIDTH_16
) ? 16 : 8;
1870 cfg
->col_adr_bytes
= 2;
1871 cfg
->blk_adr_bytes
= get_blk_adr_bytes(mtd
->size
, mtd
->writesize
);
1873 switch (chip
->ecc
.size
) {
1875 if (chip
->ecc
.strength
== 1) /* Hamming */
1876 cfg
->ecc_level
= 15;
1878 cfg
->ecc_level
= chip
->ecc
.strength
;
1879 cfg
->sector_size_1k
= 0;
1882 if (!(ctrl
->features
& BRCMNAND_HAS_1K_SECTORS
)) {
1883 dev_err(ctrl
->dev
, "1KB sectors not supported\n");
1886 if (chip
->ecc
.strength
& 0x1) {
1888 "odd ECC not supported with 1KB sectors\n");
1892 cfg
->ecc_level
= chip
->ecc
.strength
>> 1;
1893 cfg
->sector_size_1k
= 1;
1896 dev_err(ctrl
->dev
, "unsupported ECC size: %d\n",
1901 cfg
->ful_adr_bytes
= cfg
->blk_adr_bytes
;
1902 if (mtd
->writesize
> 512)
1903 cfg
->ful_adr_bytes
+= cfg
->col_adr_bytes
;
1905 cfg
->ful_adr_bytes
+= 1;
1907 ret
= brcmnand_set_cfg(host
, cfg
);
1911 brcmnand_set_ecc_enabled(host
, 1);
1913 brcmnand_print_cfg(msg
, cfg
);
1914 dev_info(ctrl
->dev
, "detected %s\n", msg
);
1916 /* Configure ACC_CONTROL */
1917 offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_ACC_CONTROL
);
1918 tmp
= nand_readreg(ctrl
, offs
);
1919 tmp
&= ~ACC_CONTROL_PARTIAL_PAGE
;
1920 tmp
&= ~ACC_CONTROL_RD_ERASED
;
1921 tmp
&= ~ACC_CONTROL_FAST_PGM_RDIN
;
1922 if (ctrl
->features
& BRCMNAND_HAS_PREFETCH
) {
1924 * FIXME: Flash DMA + prefetch may see spurious erased-page ECC
1927 if (has_flash_dma(ctrl
))
1928 tmp
&= ~ACC_CONTROL_PREFETCH
;
1930 tmp
|= ACC_CONTROL_PREFETCH
;
1932 nand_writereg(ctrl
, offs
, tmp
);
1937 static int brcmnand_init_cs(struct brcmnand_host
*host
, struct device_node
*dn
)
1939 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1940 struct platform_device
*pdev
= host
->pdev
;
1941 struct mtd_info
*mtd
;
1942 struct nand_chip
*chip
;
1946 ret
= of_property_read_u32(dn
, "reg", &host
->cs
);
1948 dev_err(&pdev
->dev
, "can't get chip-select\n");
1952 mtd
= nand_to_mtd(&host
->chip
);
1955 nand_set_flash_node(chip
, dn
);
1956 nand_set_controller_data(chip
, host
);
1957 mtd
->name
= devm_kasprintf(&pdev
->dev
, GFP_KERNEL
, "brcmnand.%d",
1959 mtd
->owner
= THIS_MODULE
;
1960 mtd
->dev
.parent
= &pdev
->dev
;
1962 chip
->IO_ADDR_R
= (void __iomem
*)0xdeadbeef;
1963 chip
->IO_ADDR_W
= (void __iomem
*)0xdeadbeef;
1965 chip
->cmd_ctrl
= brcmnand_cmd_ctrl
;
1966 chip
->cmdfunc
= brcmnand_cmdfunc
;
1967 chip
->waitfunc
= brcmnand_waitfunc
;
1968 chip
->read_byte
= brcmnand_read_byte
;
1969 chip
->read_buf
= brcmnand_read_buf
;
1970 chip
->write_buf
= brcmnand_write_buf
;
1972 chip
->ecc
.mode
= NAND_ECC_HW
;
1973 chip
->ecc
.read_page
= brcmnand_read_page
;
1974 chip
->ecc
.write_page
= brcmnand_write_page
;
1975 chip
->ecc
.read_page_raw
= brcmnand_read_page_raw
;
1976 chip
->ecc
.write_page_raw
= brcmnand_write_page_raw
;
1977 chip
->ecc
.write_oob_raw
= brcmnand_write_oob_raw
;
1978 chip
->ecc
.read_oob_raw
= brcmnand_read_oob_raw
;
1979 chip
->ecc
.read_oob
= brcmnand_read_oob
;
1980 chip
->ecc
.write_oob
= brcmnand_write_oob
;
1982 chip
->controller
= &ctrl
->controller
;
1985 * The bootloader might have configured 16bit mode but
1986 * NAND READID command only works in 8bit mode. We force
1987 * 8bit mode here to ensure that NAND READID commands works.
1989 cfg_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_CFG
);
1990 nand_writereg(ctrl
, cfg_offs
,
1991 nand_readreg(ctrl
, cfg_offs
) & ~CFG_BUS_WIDTH
);
1993 if (nand_scan_ident(mtd
, 1, NULL
))
1996 chip
->options
|= NAND_NO_SUBPAGE_WRITE
;
1998 * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA
1999 * to/from, and have nand_base pass us a bounce buffer instead, as
2002 chip
->options
|= NAND_USE_BOUNCE_BUFFER
;
2004 if (of_get_nand_on_flash_bbt(dn
))
2005 chip
->bbt_options
|= NAND_BBT_USE_FLASH
| NAND_BBT_NO_OOB
;
2007 if (brcmnand_setup_dev(host
))
2010 chip
->ecc
.size
= host
->hwcfg
.sector_size_1k
? 1024 : 512;
2011 /* only use our internal HW threshold */
2012 mtd
->bitflip_threshold
= 1;
2014 chip
->ecc
.layout
= brcmstb_choose_ecc_layout(host
);
2015 if (!chip
->ecc
.layout
)
2018 if (nand_scan_tail(mtd
))
2021 return mtd_device_register(mtd
, NULL
, 0);
2024 static void brcmnand_save_restore_cs_config(struct brcmnand_host
*host
,
2027 struct brcmnand_controller
*ctrl
= host
->ctrl
;
2028 u16 cfg_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_CFG
);
2029 u16 cfg_ext_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
2030 BRCMNAND_CS_CFG_EXT
);
2031 u16 acc_control_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
2032 BRCMNAND_CS_ACC_CONTROL
);
2033 u16 t1_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_TIMING1
);
2034 u16 t2_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_TIMING2
);
2037 nand_writereg(ctrl
, cfg_offs
, host
->hwcfg
.config
);
2038 if (cfg_offs
!= cfg_ext_offs
)
2039 nand_writereg(ctrl
, cfg_ext_offs
,
2040 host
->hwcfg
.config_ext
);
2041 nand_writereg(ctrl
, acc_control_offs
, host
->hwcfg
.acc_control
);
2042 nand_writereg(ctrl
, t1_offs
, host
->hwcfg
.timing_1
);
2043 nand_writereg(ctrl
, t2_offs
, host
->hwcfg
.timing_2
);
2045 host
->hwcfg
.config
= nand_readreg(ctrl
, cfg_offs
);
2046 if (cfg_offs
!= cfg_ext_offs
)
2047 host
->hwcfg
.config_ext
=
2048 nand_readreg(ctrl
, cfg_ext_offs
);
2049 host
->hwcfg
.acc_control
= nand_readreg(ctrl
, acc_control_offs
);
2050 host
->hwcfg
.timing_1
= nand_readreg(ctrl
, t1_offs
);
2051 host
->hwcfg
.timing_2
= nand_readreg(ctrl
, t2_offs
);
2055 static int brcmnand_suspend(struct device
*dev
)
2057 struct brcmnand_controller
*ctrl
= dev_get_drvdata(dev
);
2058 struct brcmnand_host
*host
;
2060 list_for_each_entry(host
, &ctrl
->host_list
, node
)
2061 brcmnand_save_restore_cs_config(host
, 0);
2063 ctrl
->nand_cs_nand_select
= brcmnand_read_reg(ctrl
, BRCMNAND_CS_SELECT
);
2064 ctrl
->nand_cs_nand_xor
= brcmnand_read_reg(ctrl
, BRCMNAND_CS_XOR
);
2065 ctrl
->corr_stat_threshold
=
2066 brcmnand_read_reg(ctrl
, BRCMNAND_CORR_THRESHOLD
);
2068 if (has_flash_dma(ctrl
))
2069 ctrl
->flash_dma_mode
= flash_dma_readl(ctrl
, FLASH_DMA_MODE
);
2074 static int brcmnand_resume(struct device
*dev
)
2076 struct brcmnand_controller
*ctrl
= dev_get_drvdata(dev
);
2077 struct brcmnand_host
*host
;
2079 if (has_flash_dma(ctrl
)) {
2080 flash_dma_writel(ctrl
, FLASH_DMA_MODE
, ctrl
->flash_dma_mode
);
2081 flash_dma_writel(ctrl
, FLASH_DMA_ERROR_STATUS
, 0);
2084 brcmnand_write_reg(ctrl
, BRCMNAND_CS_SELECT
, ctrl
->nand_cs_nand_select
);
2085 brcmnand_write_reg(ctrl
, BRCMNAND_CS_XOR
, ctrl
->nand_cs_nand_xor
);
2086 brcmnand_write_reg(ctrl
, BRCMNAND_CORR_THRESHOLD
,
2087 ctrl
->corr_stat_threshold
);
2089 /* Clear/re-enable interrupt */
2090 ctrl
->soc
->ctlrdy_ack(ctrl
->soc
);
2091 ctrl
->soc
->ctlrdy_set_enabled(ctrl
->soc
, true);
2094 list_for_each_entry(host
, &ctrl
->host_list
, node
) {
2095 struct nand_chip
*chip
= &host
->chip
;
2096 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2098 brcmnand_save_restore_cs_config(host
, 1);
2100 /* Reset the chip, required by some chips after power-up */
2101 chip
->cmdfunc(mtd
, NAND_CMD_RESET
, -1, -1);
2107 const struct dev_pm_ops brcmnand_pm_ops
= {
2108 .suspend
= brcmnand_suspend
,
2109 .resume
= brcmnand_resume
,
2111 EXPORT_SYMBOL_GPL(brcmnand_pm_ops
);
2113 static const struct of_device_id brcmnand_of_match
[] = {
2114 { .compatible
= "brcm,brcmnand-v4.0" },
2115 { .compatible
= "brcm,brcmnand-v5.0" },
2116 { .compatible
= "brcm,brcmnand-v6.0" },
2117 { .compatible
= "brcm,brcmnand-v6.1" },
2118 { .compatible
= "brcm,brcmnand-v7.0" },
2119 { .compatible
= "brcm,brcmnand-v7.1" },
2122 MODULE_DEVICE_TABLE(of
, brcmnand_of_match
);
2124 /***********************************************************************
2125 * Platform driver setup (per controller)
2126 ***********************************************************************/
2128 int brcmnand_probe(struct platform_device
*pdev
, struct brcmnand_soc
*soc
)
2130 struct device
*dev
= &pdev
->dev
;
2131 struct device_node
*dn
= dev
->of_node
, *child
;
2132 struct brcmnand_controller
*ctrl
;
2133 struct resource
*res
;
2136 /* We only support device-tree instantiation */
2140 if (!of_match_node(brcmnand_of_match
, dn
))
2143 ctrl
= devm_kzalloc(dev
, sizeof(*ctrl
), GFP_KERNEL
);
2147 dev_set_drvdata(dev
, ctrl
);
2150 init_completion(&ctrl
->done
);
2151 init_completion(&ctrl
->dma_done
);
2152 spin_lock_init(&ctrl
->controller
.lock
);
2153 init_waitqueue_head(&ctrl
->controller
.wq
);
2154 INIT_LIST_HEAD(&ctrl
->host_list
);
2156 /* NAND register range */
2157 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2158 ctrl
->nand_base
= devm_ioremap_resource(dev
, res
);
2159 if (IS_ERR(ctrl
->nand_base
))
2160 return PTR_ERR(ctrl
->nand_base
);
2162 /* Enable clock before using NAND registers */
2163 ctrl
->clk
= devm_clk_get(dev
, "nand");
2164 if (!IS_ERR(ctrl
->clk
)) {
2165 ret
= clk_prepare_enable(ctrl
->clk
);
2169 ret
= PTR_ERR(ctrl
->clk
);
2170 if (ret
== -EPROBE_DEFER
)
2176 /* Initialize NAND revision */
2177 ret
= brcmnand_revision_init(ctrl
);
2182 * Most chips have this cache at a fixed offset within 'nand' block.
2183 * Some must specify this region separately.
2185 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "nand-cache");
2187 ctrl
->nand_fc
= devm_ioremap_resource(dev
, res
);
2188 if (IS_ERR(ctrl
->nand_fc
)) {
2189 ret
= PTR_ERR(ctrl
->nand_fc
);
2193 ctrl
->nand_fc
= ctrl
->nand_base
+
2194 ctrl
->reg_offsets
[BRCMNAND_FC_BASE
];
2198 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "flash-dma");
2200 ctrl
->flash_dma_base
= devm_ioremap_resource(dev
, res
);
2201 if (IS_ERR(ctrl
->flash_dma_base
)) {
2202 ret
= PTR_ERR(ctrl
->flash_dma_base
);
2206 flash_dma_writel(ctrl
, FLASH_DMA_MODE
, 1); /* linked-list */
2207 flash_dma_writel(ctrl
, FLASH_DMA_ERROR_STATUS
, 0);
2209 /* Allocate descriptor(s) */
2210 ctrl
->dma_desc
= dmam_alloc_coherent(dev
,
2211 sizeof(*ctrl
->dma_desc
),
2212 &ctrl
->dma_pa
, GFP_KERNEL
);
2213 if (!ctrl
->dma_desc
) {
2218 ctrl
->dma_irq
= platform_get_irq(pdev
, 1);
2219 if ((int)ctrl
->dma_irq
< 0) {
2220 dev_err(dev
, "missing FLASH_DMA IRQ\n");
2225 ret
= devm_request_irq(dev
, ctrl
->dma_irq
,
2226 brcmnand_dma_irq
, 0, DRV_NAME
,
2229 dev_err(dev
, "can't allocate IRQ %d: error %d\n",
2230 ctrl
->dma_irq
, ret
);
2234 dev_info(dev
, "enabling FLASH_DMA\n");
2237 /* Disable automatic device ID config, direct addressing */
2238 brcmnand_rmw_reg(ctrl
, BRCMNAND_CS_SELECT
,
2239 CS_SELECT_AUTO_DEVICE_ID_CFG
| 0xff, 0, 0);
2240 /* Disable XOR addressing */
2241 brcmnand_rmw_reg(ctrl
, BRCMNAND_CS_XOR
, 0xff, 0, 0);
2243 if (ctrl
->features
& BRCMNAND_HAS_WP
) {
2244 /* Permanently disable write protection */
2246 brcmnand_set_wp(ctrl
, false);
2252 ctrl
->irq
= platform_get_irq(pdev
, 0);
2253 if ((int)ctrl
->irq
< 0) {
2254 dev_err(dev
, "no IRQ defined\n");
2260 * Some SoCs integrate this controller (e.g., its interrupt bits) in
2266 ret
= devm_request_irq(dev
, ctrl
->irq
, brcmnand_irq
, 0,
2269 /* Enable interrupt */
2270 ctrl
->soc
->ctlrdy_ack(ctrl
->soc
);
2271 ctrl
->soc
->ctlrdy_set_enabled(ctrl
->soc
, true);
2273 /* Use standard interrupt infrastructure */
2274 ret
= devm_request_irq(dev
, ctrl
->irq
, brcmnand_ctlrdy_irq
, 0,
2278 dev_err(dev
, "can't allocate IRQ %d: error %d\n",
2283 for_each_available_child_of_node(dn
, child
) {
2284 if (of_device_is_compatible(child
, "brcm,nandcs")) {
2285 struct brcmnand_host
*host
;
2287 host
= devm_kzalloc(dev
, sizeof(*host
), GFP_KERNEL
);
2296 ret
= brcmnand_init_cs(host
, child
);
2298 devm_kfree(dev
, host
);
2299 continue; /* Try all chip-selects */
2302 list_add_tail(&host
->node
, &ctrl
->host_list
);
2306 /* No chip-selects could initialize properly */
2307 if (list_empty(&ctrl
->host_list
)) {
2315 clk_disable_unprepare(ctrl
->clk
);
2319 EXPORT_SYMBOL_GPL(brcmnand_probe
);
2321 int brcmnand_remove(struct platform_device
*pdev
)
2323 struct brcmnand_controller
*ctrl
= dev_get_drvdata(&pdev
->dev
);
2324 struct brcmnand_host
*host
;
2326 list_for_each_entry(host
, &ctrl
->host_list
, node
)
2327 nand_release(nand_to_mtd(&host
->chip
));
2329 clk_disable_unprepare(ctrl
->clk
);
2331 dev_set_drvdata(&pdev
->dev
, NULL
);
2335 EXPORT_SYMBOL_GPL(brcmnand_remove
);
2337 MODULE_LICENSE("GPL v2");
2338 MODULE_AUTHOR("Kevin Cernekee");
2339 MODULE_AUTHOR("Brian Norris");
2340 MODULE_DESCRIPTION("NAND driver for Broadcom chips");
2341 MODULE_ALIAS("platform:brcmnand");