1 // SPDX-License-Identifier: GPL-2.0+
3 * Freescale GPMI NAND Flash Driver
5 * Copyright (C) 2010-2015 Freescale Semiconductor, Inc.
6 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/interrupt.h>
13 #include <linux/module.h>
14 #include <linux/mtd/partitions.h>
16 #include <linux/of_device.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/dma/mxs-dma.h>
19 #include "gpmi-nand.h"
20 #include "gpmi-regs.h"
23 /* Resource names for the GPMI NAND driver. */
24 #define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
25 #define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
26 #define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
28 /* Converts time to clock cycles */
29 #define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period)
31 #define MXS_SET_ADDR 0x4
32 #define MXS_CLR_ADDR 0x8
34 * Clear the bit and poll it cleared. This is usually called with
35 * a reset address and mask being either SFTRST(bit 31) or CLKGATE
38 static int clear_poll_bit(void __iomem
*addr
, u32 mask
)
43 writel(mask
, addr
+ MXS_CLR_ADDR
);
46 * SFTRST needs 3 GPMI clocks to settle, the reference manual
47 * recommends to wait 1us.
51 /* poll the bit becoming clear */
52 while ((readl(addr
) & mask
) && --timeout
)
58 #define MODULE_CLKGATE (1 << 30)
59 #define MODULE_SFTRST (1 << 31)
61 * The current mxs_reset_block() will do two things:
62 * [1] enable the module.
63 * [2] reset the module.
65 * In most of the cases, it's ok.
66 * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
67 * If you try to soft reset the BCH block, it becomes unusable until
68 * the next hard reset. This case occurs in the NAND boot mode. When the board
69 * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
70 * So If the driver tries to reset the BCH again, the BCH will not work anymore.
71 * You will see a DMA timeout in this case. The bug has been fixed
72 * in the following chips, such as MX28.
74 * To avoid this bug, just add a new parameter `just_enable` for
75 * the mxs_reset_block(), and rewrite it here.
77 static int gpmi_reset_block(void __iomem
*reset_addr
, bool just_enable
)
82 /* clear and poll SFTRST */
83 ret
= clear_poll_bit(reset_addr
, MODULE_SFTRST
);
88 writel(MODULE_CLKGATE
, reset_addr
+ MXS_CLR_ADDR
);
91 /* set SFTRST to reset the block */
92 writel(MODULE_SFTRST
, reset_addr
+ MXS_SET_ADDR
);
95 /* poll CLKGATE becoming set */
96 while ((!(readl(reset_addr
) & MODULE_CLKGATE
)) && --timeout
)
98 if (unlikely(!timeout
))
102 /* clear and poll SFTRST */
103 ret
= clear_poll_bit(reset_addr
, MODULE_SFTRST
);
107 /* clear and poll CLKGATE */
108 ret
= clear_poll_bit(reset_addr
, MODULE_CLKGATE
);
115 pr_err("%s(%p): module reset timeout\n", __func__
, reset_addr
);
119 static int __gpmi_enable_clk(struct gpmi_nand_data
*this, bool v
)
125 for (i
= 0; i
< GPMI_CLK_MAX
; i
++) {
126 clk
= this->resources
.clock
[i
];
131 ret
= clk_prepare_enable(clk
);
135 clk_disable_unprepare(clk
);
142 clk_disable_unprepare(this->resources
.clock
[i
- 1]);
146 static int gpmi_init(struct gpmi_nand_data
*this)
148 struct resources
*r
= &this->resources
;
151 ret
= pm_runtime_get_sync(this->dev
);
155 ret
= gpmi_reset_block(r
->gpmi_regs
, false);
160 * Reset BCH here, too. We got failures otherwise :(
161 * See later BCH reset for explanation of MX23 and MX28 handling
163 ret
= gpmi_reset_block(r
->bch_regs
, GPMI_IS_MXS(this));
167 /* Choose NAND mode. */
168 writel(BM_GPMI_CTRL1_GPMI_MODE
, r
->gpmi_regs
+ HW_GPMI_CTRL1_CLR
);
170 /* Set the IRQ polarity. */
171 writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY
,
172 r
->gpmi_regs
+ HW_GPMI_CTRL1_SET
);
174 /* Disable Write-Protection. */
175 writel(BM_GPMI_CTRL1_DEV_RESET
, r
->gpmi_regs
+ HW_GPMI_CTRL1_SET
);
177 /* Select BCH ECC. */
178 writel(BM_GPMI_CTRL1_BCH_MODE
, r
->gpmi_regs
+ HW_GPMI_CTRL1_SET
);
181 * Decouple the chip select from dma channel. We use dma0 for all
184 writel(BM_GPMI_CTRL1_DECOUPLE_CS
, r
->gpmi_regs
+ HW_GPMI_CTRL1_SET
);
187 pm_runtime_mark_last_busy(this->dev
);
188 pm_runtime_put_autosuspend(this->dev
);
192 /* This function is very useful. It is called only when the bug occur. */
193 static void gpmi_dump_info(struct gpmi_nand_data
*this)
195 struct resources
*r
= &this->resources
;
196 struct bch_geometry
*geo
= &this->bch_geometry
;
200 dev_err(this->dev
, "Show GPMI registers :\n");
201 for (i
= 0; i
<= HW_GPMI_DEBUG
/ 0x10 + 1; i
++) {
202 reg
= readl(r
->gpmi_regs
+ i
* 0x10);
203 dev_err(this->dev
, "offset 0x%.3x : 0x%.8x\n", i
* 0x10, reg
);
206 /* start to print out the BCH info */
207 dev_err(this->dev
, "Show BCH registers :\n");
208 for (i
= 0; i
<= HW_BCH_VERSION
/ 0x10 + 1; i
++) {
209 reg
= readl(r
->bch_regs
+ i
* 0x10);
210 dev_err(this->dev
, "offset 0x%.3x : 0x%.8x\n", i
* 0x10, reg
);
212 dev_err(this->dev
, "BCH Geometry :\n"
214 "ECC Strength : %u\n"
215 "Page Size in Bytes : %u\n"
216 "Metadata Size in Bytes : %u\n"
217 "ECC Chunk Size in Bytes: %u\n"
218 "ECC Chunk Count : %u\n"
219 "Payload Size in Bytes : %u\n"
220 "Auxiliary Size in Bytes: %u\n"
221 "Auxiliary Status Offset: %u\n"
222 "Block Mark Byte Offset : %u\n"
223 "Block Mark Bit Offset : %u\n",
229 geo
->ecc_chunk_count
,
232 geo
->auxiliary_status_offset
,
233 geo
->block_mark_byte_offset
,
234 geo
->block_mark_bit_offset
);
237 static inline bool gpmi_check_ecc(struct gpmi_nand_data
*this)
239 struct bch_geometry
*geo
= &this->bch_geometry
;
241 /* Do the sanity check. */
242 if (GPMI_IS_MXS(this)) {
243 /* The mx23/mx28 only support the GF13. */
244 if (geo
->gf_len
== 14)
247 return geo
->ecc_strength
<= this->devdata
->bch_max_ecc_strength
;
251 * If we can get the ECC information from the nand chip, we do not
252 * need to calculate them ourselves.
254 * We may have available oob space in this case.
256 static int set_geometry_by_ecc_info(struct gpmi_nand_data
*this,
257 unsigned int ecc_strength
,
258 unsigned int ecc_step
)
260 struct bch_geometry
*geo
= &this->bch_geometry
;
261 struct nand_chip
*chip
= &this->nand
;
262 struct mtd_info
*mtd
= nand_to_mtd(chip
);
263 unsigned int block_mark_bit_offset
;
274 "unsupported nand chip. ecc bits : %d, ecc size : %d\n",
275 chip
->base
.eccreq
.strength
,
276 chip
->base
.eccreq
.step_size
);
279 geo
->ecc_chunk_size
= ecc_step
;
280 geo
->ecc_strength
= round_up(ecc_strength
, 2);
281 if (!gpmi_check_ecc(this))
284 /* Keep the C >= O */
285 if (geo
->ecc_chunk_size
< mtd
->oobsize
) {
287 "unsupported nand chip. ecc size: %d, oob size : %d\n",
288 ecc_step
, mtd
->oobsize
);
292 /* The default value, see comment in the legacy_set_geometry(). */
293 geo
->metadata_size
= 10;
295 geo
->ecc_chunk_count
= mtd
->writesize
/ geo
->ecc_chunk_size
;
298 * Now, the NAND chip with 2K page(data chunk is 512byte) shows below:
301 * |<----------------------------------------------------->|
305 * |<-------------------------------------------->| D | | O' |
308 * +---+----------+-+----------+-+----------+-+----------+-+-----+
309 * | M | data |E| data |E| data |E| data |E| |
310 * +---+----------+-+----------+-+----------+-+----------+-+-----+
316 * P : the page size for BCH module.
317 * E : The ECC strength.
318 * G : the length of Galois Field.
319 * N : The chunk count of per page.
320 * M : the metasize of per page.
321 * C : the ecc chunk size, aka the "data" above.
322 * P': the nand chip's page size.
323 * O : the nand chip's oob size.
326 * The formula for P is :
329 * P = ------------ + P' + M
332 * The position of block mark moves forward in the ECC-based view
333 * of page, and the delta is:
336 * D = (---------------- + M)
339 * Please see the comment in legacy_set_geometry().
340 * With the condition C >= O , we still can get same result.
341 * So the bit position of the physical block mark within the ECC-based
342 * view of the page is :
345 geo
->page_size
= mtd
->writesize
+ geo
->metadata_size
+
346 (geo
->gf_len
* geo
->ecc_strength
* geo
->ecc_chunk_count
) / 8;
348 geo
->payload_size
= mtd
->writesize
;
350 geo
->auxiliary_status_offset
= ALIGN(geo
->metadata_size
, 4);
351 geo
->auxiliary_size
= ALIGN(geo
->metadata_size
, 4)
352 + ALIGN(geo
->ecc_chunk_count
, 4);
354 if (!this->swap_block_mark
)
358 block_mark_bit_offset
= mtd
->writesize
* 8 -
359 (geo
->ecc_strength
* geo
->gf_len
* (geo
->ecc_chunk_count
- 1)
360 + geo
->metadata_size
* 8);
362 geo
->block_mark_byte_offset
= block_mark_bit_offset
/ 8;
363 geo
->block_mark_bit_offset
= block_mark_bit_offset
% 8;
368 * Calculate the ECC strength by hand:
369 * E : The ECC strength.
370 * G : the length of Galois Field.
371 * N : The chunk count of per page.
372 * O : the oobsize of the NAND chip.
373 * M : the metasize of per page.
377 * ------------ <= (O - M)
385 static inline int get_ecc_strength(struct gpmi_nand_data
*this)
387 struct bch_geometry
*geo
= &this->bch_geometry
;
388 struct mtd_info
*mtd
= nand_to_mtd(&this->nand
);
391 ecc_strength
= ((mtd
->oobsize
- geo
->metadata_size
) * 8)
392 / (geo
->gf_len
* geo
->ecc_chunk_count
);
394 /* We need the minor even number. */
395 return round_down(ecc_strength
, 2);
398 static int legacy_set_geometry(struct gpmi_nand_data
*this)
400 struct bch_geometry
*geo
= &this->bch_geometry
;
401 struct mtd_info
*mtd
= nand_to_mtd(&this->nand
);
402 unsigned int metadata_size
;
403 unsigned int status_size
;
404 unsigned int block_mark_bit_offset
;
407 * The size of the metadata can be changed, though we set it to 10
408 * bytes now. But it can't be too large, because we have to save
409 * enough space for BCH.
411 geo
->metadata_size
= 10;
413 /* The default for the length of Galois Field. */
416 /* The default for chunk size. */
417 geo
->ecc_chunk_size
= 512;
418 while (geo
->ecc_chunk_size
< mtd
->oobsize
) {
419 geo
->ecc_chunk_size
*= 2; /* keep C >= O */
423 geo
->ecc_chunk_count
= mtd
->writesize
/ geo
->ecc_chunk_size
;
425 /* We use the same ECC strength for all chunks. */
426 geo
->ecc_strength
= get_ecc_strength(this);
427 if (!gpmi_check_ecc(this)) {
429 "ecc strength: %d cannot be supported by the controller (%d)\n"
430 "try to use minimum ecc strength that NAND chip required\n",
432 this->devdata
->bch_max_ecc_strength
);
436 geo
->page_size
= mtd
->writesize
+ geo
->metadata_size
+
437 (geo
->gf_len
* geo
->ecc_strength
* geo
->ecc_chunk_count
) / 8;
438 geo
->payload_size
= mtd
->writesize
;
441 * The auxiliary buffer contains the metadata and the ECC status. The
442 * metadata is padded to the nearest 32-bit boundary. The ECC status
443 * contains one byte for every ECC chunk, and is also padded to the
444 * nearest 32-bit boundary.
446 metadata_size
= ALIGN(geo
->metadata_size
, 4);
447 status_size
= ALIGN(geo
->ecc_chunk_count
, 4);
449 geo
->auxiliary_size
= metadata_size
+ status_size
;
450 geo
->auxiliary_status_offset
= metadata_size
;
452 if (!this->swap_block_mark
)
456 * We need to compute the byte and bit offsets of
457 * the physical block mark within the ECC-based view of the page.
459 * NAND chip with 2K page shows below:
465 * +---+----------+-+----------+-+----------+-+----------+-+
466 * | M | data |E| data |E| data |E| data |E|
467 * +---+----------+-+----------+-+----------+-+----------+-+
469 * The position of block mark moves forward in the ECC-based view
470 * of page, and the delta is:
473 * D = (---------------- + M)
476 * With the formula to compute the ECC strength, and the condition
477 * : C >= O (C is the ecc chunk size)
479 * It's easy to deduce to the following result:
481 * E * G (O - M) C - M C - M
482 * ----------- <= ------- <= -------- < ---------
488 * D = (---------------- + M) < C
491 * The above inequality means the position of block mark
492 * within the ECC-based view of the page is still in the data chunk,
493 * and it's NOT in the ECC bits of the chunk.
495 * Use the following to compute the bit position of the
496 * physical block mark within the ECC-based view of the page:
497 * (page_size - D) * 8
501 block_mark_bit_offset
= mtd
->writesize
* 8 -
502 (geo
->ecc_strength
* geo
->gf_len
* (geo
->ecc_chunk_count
- 1)
503 + geo
->metadata_size
* 8);
505 geo
->block_mark_byte_offset
= block_mark_bit_offset
/ 8;
506 geo
->block_mark_bit_offset
= block_mark_bit_offset
% 8;
510 static int common_nfc_set_geometry(struct gpmi_nand_data
*this)
512 struct nand_chip
*chip
= &this->nand
;
514 if (chip
->ecc
.strength
> 0 && chip
->ecc
.size
> 0)
515 return set_geometry_by_ecc_info(this, chip
->ecc
.strength
,
518 if ((of_property_read_bool(this->dev
->of_node
, "fsl,use-minimum-ecc"))
519 || legacy_set_geometry(this)) {
520 if (!(chip
->base
.eccreq
.strength
> 0 &&
521 chip
->base
.eccreq
.step_size
> 0))
524 return set_geometry_by_ecc_info(this,
525 chip
->base
.eccreq
.strength
,
526 chip
->base
.eccreq
.step_size
);
532 /* Configures the geometry for BCH. */
533 static int bch_set_geometry(struct gpmi_nand_data
*this)
535 struct resources
*r
= &this->resources
;
538 ret
= common_nfc_set_geometry(this);
542 ret
= pm_runtime_get_sync(this->dev
);
547 * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
548 * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
551 ret
= gpmi_reset_block(r
->bch_regs
, GPMI_IS_MXS(this));
555 /* Set *all* chip selects to use layout 0. */
556 writel(0, r
->bch_regs
+ HW_BCH_LAYOUTSELECT
);
560 pm_runtime_mark_last_busy(this->dev
);
561 pm_runtime_put_autosuspend(this->dev
);
567 * <1> Firstly, we should know what's the GPMI-clock means.
568 * The GPMI-clock is the internal clock in the gpmi nand controller.
569 * If you set 100MHz to gpmi nand controller, the GPMI-clock's period
570 * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
572 * <2> Secondly, we should know what's the frequency on the nand chip pins.
573 * The frequency on the nand chip pins is derived from the GPMI-clock.
574 * We can get it from the following equation:
578 * F : the frequency on the nand chip pins.
579 * G : the GPMI clock, such as 100MHz.
580 * DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
581 * DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
583 * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
584 * the nand EDO(extended Data Out) timing could be applied.
585 * The GPMI implements a feedback read strobe to sample the read data.
586 * The feedback read strobe can be delayed to support the nand EDO timing
587 * where the read strobe may deasserts before the read data is valid, and
588 * read data is valid for some time after read strobe.
590 * The following figure illustrates some aspects of a NAND Flash read:
597 * __ ___|__________________________________
601 * Read Data --------------< >---------
605 * FeedbackRDN ________ ____________
608 * D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
611 * <4> Now, we begin to describe how to compute the right RDN_DELAY.
613 * 4.1) From the aspect of the nand chip pins:
614 * Delay = (tREA + C - tRP) {1}
616 * tREA : the maximum read access time.
617 * C : a constant to adjust the delay. default is 4000ps.
618 * tRP : the read pulse width, which is exactly:
619 * tRP = (GPMI-clock-period) * DATA_SETUP
621 * 4.2) From the aspect of the GPMI nand controller:
622 * Delay = RDN_DELAY * 0.125 * RP {2}
624 * RP : the DLL reference period.
625 * if (GPMI-clock-period > DLL_THRETHOLD)
626 * RP = GPMI-clock-period / 2;
628 * RP = GPMI-clock-period;
630 * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
631 * is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
632 * is 16000ps, but in mx6q, we use 12000ps.
634 * 4.3) since {1} equals {2}, we get:
636 * (tREA + 4000 - tRP) * 8
637 * RDN_DELAY = ----------------------- {3}
640 static void gpmi_nfc_compute_timings(struct gpmi_nand_data
*this,
641 const struct nand_sdr_timings
*sdr
)
643 struct gpmi_nfc_hardware_timing
*hw
= &this->hw
;
644 unsigned int dll_threshold_ps
= this->devdata
->max_chain_delay
;
645 unsigned int period_ps
, reference_period_ps
;
646 unsigned int data_setup_cycles
, data_hold_cycles
, addr_setup_cycles
;
648 bool use_half_period
;
649 int sample_delay_ps
, sample_delay_factor
;
650 u16 busy_timeout_cycles
;
653 if (sdr
->tRC_min
>= 30000) {
654 /* ONFI non-EDO modes [0-3] */
655 hw
->clk_rate
= 22000000;
656 wrn_dly_sel
= BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS
;
657 } else if (sdr
->tRC_min
>= 25000) {
658 /* ONFI EDO mode 4 */
659 hw
->clk_rate
= 80000000;
660 wrn_dly_sel
= BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY
;
662 /* ONFI EDO mode 5 */
663 hw
->clk_rate
= 100000000;
664 wrn_dly_sel
= BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY
;
667 /* SDR core timings are given in picoseconds */
668 period_ps
= div_u64((u64
)NSEC_PER_SEC
* 1000, hw
->clk_rate
);
670 addr_setup_cycles
= TO_CYCLES(sdr
->tALS_min
, period_ps
);
671 data_setup_cycles
= TO_CYCLES(sdr
->tDS_min
, period_ps
);
672 data_hold_cycles
= TO_CYCLES(sdr
->tDH_min
, period_ps
);
673 busy_timeout_cycles
= TO_CYCLES(sdr
->tWB_max
+ sdr
->tR_max
, period_ps
);
675 hw
->timing0
= BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles
) |
676 BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles
) |
677 BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles
);
678 hw
->timing1
= BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles
* 4096);
681 * Derive NFC ideal delay from {3}:
683 * (tREA + 4000 - tRP) * 8
684 * RDN_DELAY = -----------------------
687 if (period_ps
> dll_threshold_ps
) {
688 use_half_period
= true;
689 reference_period_ps
= period_ps
/ 2;
691 use_half_period
= false;
692 reference_period_ps
= period_ps
;
695 tRP_ps
= data_setup_cycles
* period_ps
;
696 sample_delay_ps
= (sdr
->tREA_max
+ 4000 - tRP_ps
) * 8;
697 if (sample_delay_ps
> 0)
698 sample_delay_factor
= sample_delay_ps
/ reference_period_ps
;
700 sample_delay_factor
= 0;
702 hw
->ctrl1n
= BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel
);
703 if (sample_delay_factor
)
704 hw
->ctrl1n
|= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor
) |
705 BM_GPMI_CTRL1_DLL_ENABLE
|
706 (use_half_period
? BM_GPMI_CTRL1_HALF_PERIOD
: 0);
709 static void gpmi_nfc_apply_timings(struct gpmi_nand_data
*this)
711 struct gpmi_nfc_hardware_timing
*hw
= &this->hw
;
712 struct resources
*r
= &this->resources
;
713 void __iomem
*gpmi_regs
= r
->gpmi_regs
;
714 unsigned int dll_wait_time_us
;
716 clk_set_rate(r
->clock
[0], hw
->clk_rate
);
718 writel(hw
->timing0
, gpmi_regs
+ HW_GPMI_TIMING0
);
719 writel(hw
->timing1
, gpmi_regs
+ HW_GPMI_TIMING1
);
722 * Clear several CTRL1 fields, DLL must be disabled when setting
723 * RDN_DELAY or HALF_PERIOD.
725 writel(BM_GPMI_CTRL1_CLEAR_MASK
, gpmi_regs
+ HW_GPMI_CTRL1_CLR
);
726 writel(hw
->ctrl1n
, gpmi_regs
+ HW_GPMI_CTRL1_SET
);
728 /* Wait 64 clock cycles before using the GPMI after enabling the DLL */
729 dll_wait_time_us
= USEC_PER_SEC
/ hw
->clk_rate
* 64;
730 if (!dll_wait_time_us
)
731 dll_wait_time_us
= 1;
733 /* Wait for the DLL to settle. */
734 udelay(dll_wait_time_us
);
737 static int gpmi_setup_data_interface(struct nand_chip
*chip
, int chipnr
,
738 const struct nand_data_interface
*conf
)
740 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
741 const struct nand_sdr_timings
*sdr
;
743 /* Retrieve required NAND timings */
744 sdr
= nand_get_sdr_timings(conf
);
748 /* Only MX6 GPMI controller can reach EDO timings */
749 if (sdr
->tRC_min
<= 25000 && !GPMI_IS_MX6(this))
752 /* Stop here if this call was just a check */
756 /* Do the actual derivation of the controller timings */
757 gpmi_nfc_compute_timings(this, sdr
);
759 this->hw
.must_apply_timings
= true;
764 /* Clears a BCH interrupt. */
765 static void gpmi_clear_bch(struct gpmi_nand_data
*this)
767 struct resources
*r
= &this->resources
;
768 writel(BM_BCH_CTRL_COMPLETE_IRQ
, r
->bch_regs
+ HW_BCH_CTRL_CLR
);
771 static struct dma_chan
*get_dma_chan(struct gpmi_nand_data
*this)
773 /* We use the DMA channel 0 to access all the nand chips. */
774 return this->dma_chans
[0];
777 /* This will be called after the DMA operation is finished. */
778 static void dma_irq_callback(void *param
)
780 struct gpmi_nand_data
*this = param
;
781 struct completion
*dma_c
= &this->dma_done
;
786 static irqreturn_t
bch_irq(int irq
, void *cookie
)
788 struct gpmi_nand_data
*this = cookie
;
790 gpmi_clear_bch(this);
791 complete(&this->bch_done
);
795 static int gpmi_raw_len_to_len(struct gpmi_nand_data
*this, int raw_len
)
798 * raw_len is the length to read/write including bch data which
799 * we are passed in exec_op. Calculate the data length from it.
802 return ALIGN_DOWN(raw_len
, this->bch_geometry
.ecc_chunk_size
);
807 /* Can we use the upper's buffer directly for DMA? */
808 static bool prepare_data_dma(struct gpmi_nand_data
*this, const void *buf
,
809 int raw_len
, struct scatterlist
*sgl
,
810 enum dma_data_direction dr
)
813 int len
= gpmi_raw_len_to_len(this, raw_len
);
815 /* first try to map the upper buffer directly */
816 if (virt_addr_valid(buf
) && !object_is_on_stack(buf
)) {
817 sg_init_one(sgl
, buf
, len
);
818 ret
= dma_map_sg(this->dev
, sgl
, 1, dr
);
826 /* We have to use our own DMA buffer. */
827 sg_init_one(sgl
, this->data_buffer_dma
, len
);
829 if (dr
== DMA_TO_DEVICE
&& buf
!= this->data_buffer_dma
)
830 memcpy(this->data_buffer_dma
, buf
, len
);
832 dma_map_sg(this->dev
, sgl
, 1, dr
);
838 * gpmi_copy_bits - copy bits from one memory region to another
839 * @dst: destination buffer
840 * @dst_bit_off: bit offset we're starting to write at
841 * @src: source buffer
842 * @src_bit_off: bit offset we're starting to read from
843 * @nbits: number of bits to copy
845 * This functions copies bits from one memory region to another, and is used by
846 * the GPMI driver to copy ECC sections which are not guaranteed to be byte
849 * src and dst should not overlap.
852 static void gpmi_copy_bits(u8
*dst
, size_t dst_bit_off
, const u8
*src
,
853 size_t src_bit_off
, size_t nbits
)
858 size_t bits_in_src_buffer
= 0;
864 * Move src and dst pointers to the closest byte pointer and store bit
865 * offsets within a byte.
867 src
+= src_bit_off
/ 8;
870 dst
+= dst_bit_off
/ 8;
874 * Initialize the src_buffer value with bits available in the first
875 * byte of data so that we end up with a byte aligned src pointer.
878 src_buffer
= src
[0] >> src_bit_off
;
879 if (nbits
>= (8 - src_bit_off
)) {
880 bits_in_src_buffer
+= 8 - src_bit_off
;
882 src_buffer
&= GENMASK(nbits
- 1, 0);
883 bits_in_src_buffer
+= nbits
;
885 nbits
-= bits_in_src_buffer
;
889 /* Calculate the number of bytes that can be copied from src to dst. */
892 /* Try to align dst to a byte boundary. */
894 if (bits_in_src_buffer
< (8 - dst_bit_off
) && nbytes
) {
895 src_buffer
|= src
[0] << bits_in_src_buffer
;
896 bits_in_src_buffer
+= 8;
901 if (bits_in_src_buffer
>= (8 - dst_bit_off
)) {
902 dst
[0] &= GENMASK(dst_bit_off
- 1, 0);
903 dst
[0] |= src_buffer
<< dst_bit_off
;
904 src_buffer
>>= (8 - dst_bit_off
);
905 bits_in_src_buffer
-= (8 - dst_bit_off
);
908 if (bits_in_src_buffer
> 7) {
909 bits_in_src_buffer
-= 8;
917 if (!bits_in_src_buffer
&& !dst_bit_off
) {
919 * Both src and dst pointers are byte aligned, thus we can
920 * just use the optimized memcpy function.
923 memcpy(dst
, src
, nbytes
);
926 * src buffer is not byte aligned, hence we have to copy each
927 * src byte to the src_buffer variable before extracting a byte
930 for (i
= 0; i
< nbytes
; i
++) {
931 src_buffer
|= src
[i
] << bits_in_src_buffer
;
936 /* Update dst and src pointers */
941 * nbits is the number of remaining bits. It should not exceed 8 as
942 * we've already copied as much bytes as possible.
947 * If there's no more bits to copy to the destination and src buffer
948 * was already byte aligned, then we're done.
950 if (!nbits
&& !bits_in_src_buffer
)
953 /* Copy the remaining bits to src_buffer */
955 src_buffer
|= (*src
& GENMASK(nbits
- 1, 0)) <<
957 bits_in_src_buffer
+= nbits
;
960 * In case there were not enough bits to get a byte aligned dst buffer
961 * prepare the src_buffer variable to match the dst organization (shift
962 * src_buffer by dst_bit_off and retrieve the least significant bits
966 src_buffer
= (src_buffer
<< dst_bit_off
) |
967 (*dst
& GENMASK(dst_bit_off
- 1, 0));
968 bits_in_src_buffer
+= dst_bit_off
;
971 * Keep most significant bits from dst if we end up with an unaligned
974 nbytes
= bits_in_src_buffer
/ 8;
975 if (bits_in_src_buffer
% 8) {
976 src_buffer
|= (dst
[nbytes
] &
977 GENMASK(7, bits_in_src_buffer
% 8)) <<
982 /* Copy the remaining bytes to dst */
983 for (i
= 0; i
< nbytes
; i
++) {
989 /* add our owner bbt descriptor */
990 static uint8_t scan_ff_pattern
[] = { 0xff };
991 static struct nand_bbt_descr gpmi_bbt_descr
= {
995 .pattern
= scan_ff_pattern
999 * We may change the layout if we can get the ECC info from the datasheet,
1000 * else we will use all the (page + OOB).
1002 static int gpmi_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
1003 struct mtd_oob_region
*oobregion
)
1005 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1006 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
1007 struct bch_geometry
*geo
= &this->bch_geometry
;
1012 oobregion
->offset
= 0;
1013 oobregion
->length
= geo
->page_size
- mtd
->writesize
;
1018 static int gpmi_ooblayout_free(struct mtd_info
*mtd
, int section
,
1019 struct mtd_oob_region
*oobregion
)
1021 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1022 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
1023 struct bch_geometry
*geo
= &this->bch_geometry
;
1028 /* The available oob size we have. */
1029 if (geo
->page_size
< mtd
->writesize
+ mtd
->oobsize
) {
1030 oobregion
->offset
= geo
->page_size
- mtd
->writesize
;
1031 oobregion
->length
= mtd
->oobsize
- oobregion
->offset
;
1037 static const char * const gpmi_clks_for_mx2x
[] = {
1041 static const struct mtd_ooblayout_ops gpmi_ooblayout_ops
= {
1042 .ecc
= gpmi_ooblayout_ecc
,
1043 .free
= gpmi_ooblayout_free
,
1046 static const struct gpmi_devdata gpmi_devdata_imx23
= {
1048 .bch_max_ecc_strength
= 20,
1049 .max_chain_delay
= 16000,
1050 .clks
= gpmi_clks_for_mx2x
,
1051 .clks_count
= ARRAY_SIZE(gpmi_clks_for_mx2x
),
1054 static const struct gpmi_devdata gpmi_devdata_imx28
= {
1056 .bch_max_ecc_strength
= 20,
1057 .max_chain_delay
= 16000,
1058 .clks
= gpmi_clks_for_mx2x
,
1059 .clks_count
= ARRAY_SIZE(gpmi_clks_for_mx2x
),
1062 static const char * const gpmi_clks_for_mx6
[] = {
1063 "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
1066 static const struct gpmi_devdata gpmi_devdata_imx6q
= {
1068 .bch_max_ecc_strength
= 40,
1069 .max_chain_delay
= 12000,
1070 .clks
= gpmi_clks_for_mx6
,
1071 .clks_count
= ARRAY_SIZE(gpmi_clks_for_mx6
),
1074 static const struct gpmi_devdata gpmi_devdata_imx6sx
= {
1076 .bch_max_ecc_strength
= 62,
1077 .max_chain_delay
= 12000,
1078 .clks
= gpmi_clks_for_mx6
,
1079 .clks_count
= ARRAY_SIZE(gpmi_clks_for_mx6
),
1082 static const char * const gpmi_clks_for_mx7d
[] = {
1083 "gpmi_io", "gpmi_bch_apb",
1086 static const struct gpmi_devdata gpmi_devdata_imx7d
= {
1088 .bch_max_ecc_strength
= 62,
1089 .max_chain_delay
= 12000,
1090 .clks
= gpmi_clks_for_mx7d
,
1091 .clks_count
= ARRAY_SIZE(gpmi_clks_for_mx7d
),
1094 static int acquire_register_block(struct gpmi_nand_data
*this,
1095 const char *res_name
)
1097 struct platform_device
*pdev
= this->pdev
;
1098 struct resources
*res
= &this->resources
;
1102 r
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, res_name
);
1103 p
= devm_ioremap_resource(&pdev
->dev
, r
);
1107 if (!strcmp(res_name
, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME
))
1109 else if (!strcmp(res_name
, GPMI_NAND_BCH_REGS_ADDR_RES_NAME
))
1112 dev_err(this->dev
, "unknown resource name : %s\n", res_name
);
1117 static int acquire_bch_irq(struct gpmi_nand_data
*this, irq_handler_t irq_h
)
1119 struct platform_device
*pdev
= this->pdev
;
1120 const char *res_name
= GPMI_NAND_BCH_INTERRUPT_RES_NAME
;
1124 r
= platform_get_resource_byname(pdev
, IORESOURCE_IRQ
, res_name
);
1126 dev_err(this->dev
, "Can't get resource for %s\n", res_name
);
1130 err
= devm_request_irq(this->dev
, r
->start
, irq_h
, 0, res_name
, this);
1132 dev_err(this->dev
, "error requesting BCH IRQ\n");
1137 static void release_dma_channels(struct gpmi_nand_data
*this)
1140 for (i
= 0; i
< DMA_CHANS
; i
++)
1141 if (this->dma_chans
[i
]) {
1142 dma_release_channel(this->dma_chans
[i
]);
1143 this->dma_chans
[i
] = NULL
;
1147 static int acquire_dma_channels(struct gpmi_nand_data
*this)
1149 struct platform_device
*pdev
= this->pdev
;
1150 struct dma_chan
*dma_chan
;
1152 /* request dma channel */
1153 dma_chan
= dma_request_slave_channel(&pdev
->dev
, "rx-tx");
1155 dev_err(this->dev
, "Failed to request DMA channel.\n");
1159 this->dma_chans
[0] = dma_chan
;
1163 release_dma_channels(this);
1167 static int gpmi_get_clks(struct gpmi_nand_data
*this)
1169 struct resources
*r
= &this->resources
;
1173 for (i
= 0; i
< this->devdata
->clks_count
; i
++) {
1174 clk
= devm_clk_get(this->dev
, this->devdata
->clks
[i
]);
1183 if (GPMI_IS_MX6(this))
1185 * Set the default value for the gpmi clock.
1187 * If you want to use the ONFI nand which is in the
1188 * Synchronous Mode, you should change the clock as you need.
1190 clk_set_rate(r
->clock
[0], 22000000);
1195 dev_dbg(this->dev
, "failed in finding the clocks.\n");
1199 static int acquire_resources(struct gpmi_nand_data
*this)
1203 ret
= acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME
);
1207 ret
= acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME
);
1211 ret
= acquire_bch_irq(this, bch_irq
);
1215 ret
= acquire_dma_channels(this);
1219 ret
= gpmi_get_clks(this);
1225 release_dma_channels(this);
1230 static void release_resources(struct gpmi_nand_data
*this)
1232 release_dma_channels(this);
1235 static void gpmi_free_dma_buffer(struct gpmi_nand_data
*this)
1237 struct device
*dev
= this->dev
;
1238 struct bch_geometry
*geo
= &this->bch_geometry
;
1240 if (this->auxiliary_virt
&& virt_addr_valid(this->auxiliary_virt
))
1241 dma_free_coherent(dev
, geo
->auxiliary_size
,
1242 this->auxiliary_virt
,
1243 this->auxiliary_phys
);
1244 kfree(this->data_buffer_dma
);
1245 kfree(this->raw_buffer
);
1247 this->data_buffer_dma
= NULL
;
1248 this->raw_buffer
= NULL
;
1251 /* Allocate the DMA buffers */
1252 static int gpmi_alloc_dma_buffer(struct gpmi_nand_data
*this)
1254 struct bch_geometry
*geo
= &this->bch_geometry
;
1255 struct device
*dev
= this->dev
;
1256 struct mtd_info
*mtd
= nand_to_mtd(&this->nand
);
1259 * [2] Allocate a read/write data buffer.
1260 * The gpmi_alloc_dma_buffer can be called twice.
1261 * We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer
1262 * is called before the NAND identification; and we allocate a
1263 * buffer of the real NAND page size when the gpmi_alloc_dma_buffer
1266 this->data_buffer_dma
= kzalloc(mtd
->writesize
?: PAGE_SIZE
,
1267 GFP_DMA
| GFP_KERNEL
);
1268 if (this->data_buffer_dma
== NULL
)
1271 this->auxiliary_virt
= dma_alloc_coherent(dev
, geo
->auxiliary_size
,
1272 &this->auxiliary_phys
, GFP_DMA
);
1273 if (!this->auxiliary_virt
)
1276 this->raw_buffer
= kzalloc((mtd
->writesize
?: PAGE_SIZE
) + mtd
->oobsize
, GFP_KERNEL
);
1277 if (!this->raw_buffer
)
1283 gpmi_free_dma_buffer(this);
1288 * Handles block mark swapping.
1289 * It can be called in swapping the block mark, or swapping it back,
1290 * because the the operations are the same.
1292 static void block_mark_swapping(struct gpmi_nand_data
*this,
1293 void *payload
, void *auxiliary
)
1295 struct bch_geometry
*nfc_geo
= &this->bch_geometry
;
1300 unsigned char from_data
;
1301 unsigned char from_oob
;
1303 if (!this->swap_block_mark
)
1307 * If control arrives here, we're swapping. Make some convenience
1310 bit
= nfc_geo
->block_mark_bit_offset
;
1311 p
= payload
+ nfc_geo
->block_mark_byte_offset
;
1315 * Get the byte from the data area that overlays the block mark. Since
1316 * the ECC engine applies its own view to the bits in the page, the
1317 * physical block mark won't (in general) appear on a byte boundary in
1320 from_data
= (p
[0] >> bit
) | (p
[1] << (8 - bit
));
1322 /* Get the byte from the OOB. */
1328 mask
= (0x1 << bit
) - 1;
1329 p
[0] = (p
[0] & mask
) | (from_oob
<< bit
);
1332 p
[1] = (p
[1] & mask
) | (from_oob
>> (8 - bit
));
1335 static int gpmi_count_bitflips(struct nand_chip
*chip
, void *buf
, int first
,
1338 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
1339 struct bch_geometry
*nfc_geo
= &this->bch_geometry
;
1340 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1342 unsigned char *status
;
1343 unsigned int max_bitflips
= 0;
1345 /* Loop over status bytes, accumulating ECC status. */
1346 status
= this->auxiliary_virt
+ ALIGN(meta
, 4);
1348 for (i
= first
; i
< last
; i
++, status
++) {
1349 if ((*status
== STATUS_GOOD
) || (*status
== STATUS_ERASED
))
1352 if (*status
== STATUS_UNCORRECTABLE
) {
1353 int eccbits
= nfc_geo
->ecc_strength
* nfc_geo
->gf_len
;
1354 u8
*eccbuf
= this->raw_buffer
;
1355 int offset
, bitoffset
;
1359 /* Read ECC bytes into our internal raw_buffer */
1360 offset
= nfc_geo
->metadata_size
* 8;
1361 offset
+= ((8 * nfc_geo
->ecc_chunk_size
) + eccbits
) * (i
+ 1);
1363 bitoffset
= offset
% 8;
1364 eccbytes
= DIV_ROUND_UP(offset
+ eccbits
, 8);
1367 nand_change_read_column_op(chip
, offset
, eccbuf
,
1371 * ECC data are not byte aligned and we may have
1372 * in-band data in the first and last byte of
1373 * eccbuf. Set non-eccbits to one so that
1374 * nand_check_erased_ecc_chunk() does not count them
1378 eccbuf
[0] |= GENMASK(bitoffset
- 1, 0);
1380 bitoffset
= (bitoffset
+ eccbits
) % 8;
1382 eccbuf
[eccbytes
- 1] |= GENMASK(7, bitoffset
);
1385 * The ECC hardware has an uncorrectable ECC status
1386 * code in case we have bitflips in an erased page. As
1387 * nothing was written into this subpage the ECC is
1388 * obviously wrong and we can not trust it. We assume
1389 * at this point that we are reading an erased page and
1390 * try to correct the bitflips in buffer up to
1391 * ecc_strength bitflips. If this is a page with random
1392 * data, we exceed this number of bitflips and have a
1393 * ECC failure. Otherwise we use the corrected buffer.
1396 /* The first block includes metadata */
1397 flips
= nand_check_erased_ecc_chunk(
1398 buf
+ i
* nfc_geo
->ecc_chunk_size
,
1399 nfc_geo
->ecc_chunk_size
,
1401 this->auxiliary_virt
,
1402 nfc_geo
->metadata_size
,
1403 nfc_geo
->ecc_strength
);
1405 flips
= nand_check_erased_ecc_chunk(
1406 buf
+ i
* nfc_geo
->ecc_chunk_size
,
1407 nfc_geo
->ecc_chunk_size
,
1410 nfc_geo
->ecc_strength
);
1414 max_bitflips
= max_t(unsigned int, max_bitflips
,
1416 mtd
->ecc_stats
.corrected
+= flips
;
1420 mtd
->ecc_stats
.failed
++;
1424 mtd
->ecc_stats
.corrected
+= *status
;
1425 max_bitflips
= max_t(unsigned int, max_bitflips
, *status
);
1428 return max_bitflips
;
1431 static void gpmi_bch_layout_std(struct gpmi_nand_data
*this)
1433 struct bch_geometry
*geo
= &this->bch_geometry
;
1434 unsigned int ecc_strength
= geo
->ecc_strength
>> 1;
1435 unsigned int gf_len
= geo
->gf_len
;
1436 unsigned int block_size
= geo
->ecc_chunk_size
;
1438 this->bch_flashlayout0
=
1439 BF_BCH_FLASH0LAYOUT0_NBLOCKS(geo
->ecc_chunk_count
- 1) |
1440 BF_BCH_FLASH0LAYOUT0_META_SIZE(geo
->metadata_size
) |
1441 BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength
, this) |
1442 BF_BCH_FLASH0LAYOUT0_GF(gf_len
, this) |
1443 BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size
, this);
1445 this->bch_flashlayout1
=
1446 BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(geo
->page_size
) |
1447 BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength
, this) |
1448 BF_BCH_FLASH0LAYOUT1_GF(gf_len
, this) |
1449 BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size
, this);
1452 static int gpmi_ecc_read_page(struct nand_chip
*chip
, uint8_t *buf
,
1453 int oob_required
, int page
)
1455 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
1456 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1457 struct bch_geometry
*geo
= &this->bch_geometry
;
1458 unsigned int max_bitflips
;
1461 gpmi_bch_layout_std(this);
1464 ret
= nand_read_page_op(chip
, page
, 0, buf
, geo
->page_size
);
1468 max_bitflips
= gpmi_count_bitflips(chip
, buf
, 0,
1469 geo
->ecc_chunk_count
,
1470 geo
->auxiliary_status_offset
);
1472 /* handle the block mark swapping */
1473 block_mark_swapping(this, buf
, this->auxiliary_virt
);
1477 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
1478 * for details about our policy for delivering the OOB.
1480 * We fill the caller's buffer with set bits, and then copy the
1481 * block mark to th caller's buffer. Note that, if block mark
1482 * swapping was necessary, it has already been done, so we can
1483 * rely on the first byte of the auxiliary buffer to contain
1486 memset(chip
->oob_poi
, ~0, mtd
->oobsize
);
1487 chip
->oob_poi
[0] = ((uint8_t *)this->auxiliary_virt
)[0];
1490 return max_bitflips
;
1493 /* Fake a virtual small page for the subpage read */
1494 static int gpmi_ecc_read_subpage(struct nand_chip
*chip
, uint32_t offs
,
1495 uint32_t len
, uint8_t *buf
, int page
)
1497 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
1498 struct bch_geometry
*geo
= &this->bch_geometry
;
1499 int size
= chip
->ecc
.size
; /* ECC chunk size */
1500 int meta
, n
, page_size
;
1501 unsigned int max_bitflips
;
1502 unsigned int ecc_strength
;
1503 int first
, last
, marker_pos
;
1504 int ecc_parity_size
;
1508 /* The size of ECC parity */
1509 ecc_parity_size
= geo
->gf_len
* geo
->ecc_strength
/ 8;
1511 /* Align it with the chunk size */
1512 first
= offs
/ size
;
1513 last
= (offs
+ len
- 1) / size
;
1515 if (this->swap_block_mark
) {
1517 * Find the chunk which contains the Block Marker.
1518 * If this chunk is in the range of [first, last],
1519 * we have to read out the whole page.
1520 * Why? since we had swapped the data at the position of Block
1521 * Marker to the metadata which is bound with the chunk 0.
1523 marker_pos
= geo
->block_mark_byte_offset
/ size
;
1524 if (last
>= marker_pos
&& first
<= marker_pos
) {
1526 "page:%d, first:%d, last:%d, marker at:%d\n",
1527 page
, first
, last
, marker_pos
);
1528 return gpmi_ecc_read_page(chip
, buf
, 0, page
);
1532 meta
= geo
->metadata_size
;
1534 col
= meta
+ (size
+ ecc_parity_size
) * first
;
1536 buf
= buf
+ first
* size
;
1539 ecc_parity_size
= geo
->gf_len
* geo
->ecc_strength
/ 8;
1541 n
= last
- first
+ 1;
1542 page_size
= meta
+ (size
+ ecc_parity_size
) * n
;
1543 ecc_strength
= geo
->ecc_strength
>> 1;
1545 this->bch_flashlayout0
= BF_BCH_FLASH0LAYOUT0_NBLOCKS(n
- 1) |
1546 BF_BCH_FLASH0LAYOUT0_META_SIZE(meta
) |
1547 BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength
, this) |
1548 BF_BCH_FLASH0LAYOUT0_GF(geo
->gf_len
, this) |
1549 BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(geo
->ecc_chunk_size
, this);
1551 this->bch_flashlayout1
= BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size
) |
1552 BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength
, this) |
1553 BF_BCH_FLASH0LAYOUT1_GF(geo
->gf_len
, this) |
1554 BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(geo
->ecc_chunk_size
, this);
1558 ret
= nand_read_page_op(chip
, page
, col
, buf
, page_size
);
1562 dev_dbg(this->dev
, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
1563 page
, offs
, len
, col
, first
, n
, page_size
);
1565 max_bitflips
= gpmi_count_bitflips(chip
, buf
, first
, last
, meta
);
1567 return max_bitflips
;
1570 static int gpmi_ecc_write_page(struct nand_chip
*chip
, const uint8_t *buf
,
1571 int oob_required
, int page
)
1573 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1574 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
1575 struct bch_geometry
*nfc_geo
= &this->bch_geometry
;
1578 dev_dbg(this->dev
, "ecc write page.\n");
1580 gpmi_bch_layout_std(this);
1583 memcpy(this->auxiliary_virt
, chip
->oob_poi
, nfc_geo
->auxiliary_size
);
1585 if (this->swap_block_mark
) {
1587 * When doing bad block marker swapping we must always copy the
1588 * input buffer as we can't modify the const buffer.
1590 memcpy(this->data_buffer_dma
, buf
, mtd
->writesize
);
1591 buf
= this->data_buffer_dma
;
1592 block_mark_swapping(this, this->data_buffer_dma
,
1593 this->auxiliary_virt
);
1596 ret
= nand_prog_page_op(chip
, page
, 0, buf
, nfc_geo
->page_size
);
1602 * There are several places in this driver where we have to handle the OOB and
1603 * block marks. This is the function where things are the most complicated, so
1604 * this is where we try to explain it all. All the other places refer back to
1607 * These are the rules, in order of decreasing importance:
1609 * 1) Nothing the caller does can be allowed to imperil the block mark.
1611 * 2) In read operations, the first byte of the OOB we return must reflect the
1612 * true state of the block mark, no matter where that block mark appears in
1613 * the physical page.
1615 * 3) ECC-based read operations return an OOB full of set bits (since we never
1616 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
1619 * 4) "Raw" read operations return a direct view of the physical bytes in the
1620 * page, using the conventional definition of which bytes are data and which
1621 * are OOB. This gives the caller a way to see the actual, physical bytes
1622 * in the page, without the distortions applied by our ECC engine.
1625 * What we do for this specific read operation depends on two questions:
1627 * 1) Are we doing a "raw" read, or an ECC-based read?
1629 * 2) Are we using block mark swapping or transcription?
1631 * There are four cases, illustrated by the following Karnaugh map:
1633 * | Raw | ECC-based |
1634 * -------------+-------------------------+-------------------------+
1635 * | Read the conventional | |
1636 * | OOB at the end of the | |
1637 * Swapping | page and return it. It | |
1638 * | contains exactly what | |
1639 * | we want. | Read the block mark and |
1640 * -------------+-------------------------+ return it in a buffer |
1641 * | Read the conventional | full of set bits. |
1642 * | OOB at the end of the | |
1643 * | page and also the block | |
1644 * Transcribing | mark in the metadata. | |
1645 * | Copy the block mark | |
1646 * | into the first byte of | |
1648 * -------------+-------------------------+-------------------------+
1650 * Note that we break rule #4 in the Transcribing/Raw case because we're not
1651 * giving an accurate view of the actual, physical bytes in the page (we're
1652 * overwriting the block mark). That's OK because it's more important to follow
1655 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
1656 * easy. When reading a page, for example, the NAND Flash MTD code calls our
1657 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1658 * ECC-based or raw view of the page is implicit in which function it calls
1659 * (there is a similar pair of ECC-based/raw functions for writing).
1661 static int gpmi_ecc_read_oob(struct nand_chip
*chip
, int page
)
1663 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1664 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
1667 /* clear the OOB buffer */
1668 memset(chip
->oob_poi
, ~0, mtd
->oobsize
);
1670 /* Read out the conventional OOB. */
1671 ret
= nand_read_page_op(chip
, page
, mtd
->writesize
, chip
->oob_poi
,
1677 * Now, we want to make sure the block mark is correct. In the
1678 * non-transcribing case (!GPMI_IS_MX23()), we already have it.
1679 * Otherwise, we need to explicitly read it.
1681 if (GPMI_IS_MX23(this)) {
1682 /* Read the block mark into the first byte of the OOB buffer. */
1683 ret
= nand_read_page_op(chip
, page
, 0, chip
->oob_poi
, 1);
1691 static int gpmi_ecc_write_oob(struct nand_chip
*chip
, int page
)
1693 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1694 struct mtd_oob_region of
= { };
1696 /* Do we have available oob area? */
1697 mtd_ooblayout_free(mtd
, 0, &of
);
1701 if (!nand_is_slc(chip
))
1704 return nand_prog_page_op(chip
, page
, mtd
->writesize
+ of
.offset
,
1705 chip
->oob_poi
+ of
.offset
, of
.length
);
1709 * This function reads a NAND page without involving the ECC engine (no HW
1711 * The tricky part in the GPMI/BCH controller is that it stores ECC bits
1712 * inline (interleaved with payload DATA), and do not align data chunk on
1714 * We thus need to take care moving the payload data and ECC bits stored in the
1715 * page into the provided buffers, which is why we're using gpmi_copy_bits.
1717 * See set_geometry_by_ecc_info inline comments to have a full description
1718 * of the layout used by the GPMI controller.
1720 static int gpmi_ecc_read_page_raw(struct nand_chip
*chip
, uint8_t *buf
,
1721 int oob_required
, int page
)
1723 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1724 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
1725 struct bch_geometry
*nfc_geo
= &this->bch_geometry
;
1726 int eccsize
= nfc_geo
->ecc_chunk_size
;
1727 int eccbits
= nfc_geo
->ecc_strength
* nfc_geo
->gf_len
;
1728 u8
*tmp_buf
= this->raw_buffer
;
1731 size_t oob_byte_off
;
1732 uint8_t *oob
= chip
->oob_poi
;
1736 ret
= nand_read_page_op(chip
, page
, 0, tmp_buf
,
1737 mtd
->writesize
+ mtd
->oobsize
);
1742 * If required, swap the bad block marker and the data stored in the
1743 * metadata section, so that we don't wrongly consider a block as bad.
1745 * See the layout description for a detailed explanation on why this
1748 if (this->swap_block_mark
)
1749 swap(tmp_buf
[0], tmp_buf
[mtd
->writesize
]);
1752 * Copy the metadata section into the oob buffer (this section is
1753 * guaranteed to be aligned on a byte boundary).
1756 memcpy(oob
, tmp_buf
, nfc_geo
->metadata_size
);
1758 oob_bit_off
= nfc_geo
->metadata_size
* 8;
1759 src_bit_off
= oob_bit_off
;
1761 /* Extract interleaved payload data and ECC bits */
1762 for (step
= 0; step
< nfc_geo
->ecc_chunk_count
; step
++) {
1764 gpmi_copy_bits(buf
, step
* eccsize
* 8,
1765 tmp_buf
, src_bit_off
,
1767 src_bit_off
+= eccsize
* 8;
1769 /* Align last ECC block to align a byte boundary */
1770 if (step
== nfc_geo
->ecc_chunk_count
- 1 &&
1771 (oob_bit_off
+ eccbits
) % 8)
1772 eccbits
+= 8 - ((oob_bit_off
+ eccbits
) % 8);
1775 gpmi_copy_bits(oob
, oob_bit_off
,
1776 tmp_buf
, src_bit_off
,
1779 src_bit_off
+= eccbits
;
1780 oob_bit_off
+= eccbits
;
1784 oob_byte_off
= oob_bit_off
/ 8;
1786 if (oob_byte_off
< mtd
->oobsize
)
1787 memcpy(oob
+ oob_byte_off
,
1788 tmp_buf
+ mtd
->writesize
+ oob_byte_off
,
1789 mtd
->oobsize
- oob_byte_off
);
1796 * This function writes a NAND page without involving the ECC engine (no HW
1798 * The tricky part in the GPMI/BCH controller is that it stores ECC bits
1799 * inline (interleaved with payload DATA), and do not align data chunk on
1801 * We thus need to take care moving the OOB area at the right place in the
1802 * final page, which is why we're using gpmi_copy_bits.
1804 * See set_geometry_by_ecc_info inline comments to have a full description
1805 * of the layout used by the GPMI controller.
1807 static int gpmi_ecc_write_page_raw(struct nand_chip
*chip
, const uint8_t *buf
,
1808 int oob_required
, int page
)
1810 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1811 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
1812 struct bch_geometry
*nfc_geo
= &this->bch_geometry
;
1813 int eccsize
= nfc_geo
->ecc_chunk_size
;
1814 int eccbits
= nfc_geo
->ecc_strength
* nfc_geo
->gf_len
;
1815 u8
*tmp_buf
= this->raw_buffer
;
1816 uint8_t *oob
= chip
->oob_poi
;
1819 size_t oob_byte_off
;
1823 * Initialize all bits to 1 in case we don't have a buffer for the
1824 * payload or oob data in order to leave unspecified bits of data
1825 * to their initial state.
1827 if (!buf
|| !oob_required
)
1828 memset(tmp_buf
, 0xff, mtd
->writesize
+ mtd
->oobsize
);
1831 * First copy the metadata section (stored in oob buffer) at the
1832 * beginning of the page, as imposed by the GPMI layout.
1834 memcpy(tmp_buf
, oob
, nfc_geo
->metadata_size
);
1835 oob_bit_off
= nfc_geo
->metadata_size
* 8;
1836 dst_bit_off
= oob_bit_off
;
1838 /* Interleave payload data and ECC bits */
1839 for (step
= 0; step
< nfc_geo
->ecc_chunk_count
; step
++) {
1841 gpmi_copy_bits(tmp_buf
, dst_bit_off
,
1842 buf
, step
* eccsize
* 8, eccsize
* 8);
1843 dst_bit_off
+= eccsize
* 8;
1845 /* Align last ECC block to align a byte boundary */
1846 if (step
== nfc_geo
->ecc_chunk_count
- 1 &&
1847 (oob_bit_off
+ eccbits
) % 8)
1848 eccbits
+= 8 - ((oob_bit_off
+ eccbits
) % 8);
1851 gpmi_copy_bits(tmp_buf
, dst_bit_off
,
1852 oob
, oob_bit_off
, eccbits
);
1854 dst_bit_off
+= eccbits
;
1855 oob_bit_off
+= eccbits
;
1858 oob_byte_off
= oob_bit_off
/ 8;
1860 if (oob_required
&& oob_byte_off
< mtd
->oobsize
)
1861 memcpy(tmp_buf
+ mtd
->writesize
+ oob_byte_off
,
1862 oob
+ oob_byte_off
, mtd
->oobsize
- oob_byte_off
);
1865 * If required, swap the bad block marker and the first byte of the
1866 * metadata section, so that we don't modify the bad block marker.
1868 * See the layout description for a detailed explanation on why this
1871 if (this->swap_block_mark
)
1872 swap(tmp_buf
[0], tmp_buf
[mtd
->writesize
]);
1874 return nand_prog_page_op(chip
, page
, 0, tmp_buf
,
1875 mtd
->writesize
+ mtd
->oobsize
);
1878 static int gpmi_ecc_read_oob_raw(struct nand_chip
*chip
, int page
)
1880 return gpmi_ecc_read_page_raw(chip
, NULL
, 1, page
);
1883 static int gpmi_ecc_write_oob_raw(struct nand_chip
*chip
, int page
)
1885 return gpmi_ecc_write_page_raw(chip
, NULL
, 1, page
);
1888 static int gpmi_block_markbad(struct nand_chip
*chip
, loff_t ofs
)
1890 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1891 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
1893 uint8_t *block_mark
;
1894 int column
, page
, chipnr
;
1896 chipnr
= (int)(ofs
>> chip
->chip_shift
);
1897 nand_select_target(chip
, chipnr
);
1899 column
= !GPMI_IS_MX23(this) ? mtd
->writesize
: 0;
1901 /* Write the block mark. */
1902 block_mark
= this->data_buffer_dma
;
1903 block_mark
[0] = 0; /* bad block marker */
1905 /* Shift to get page */
1906 page
= (int)(ofs
>> chip
->page_shift
);
1908 ret
= nand_prog_page_op(chip
, page
, column
, block_mark
, 1);
1910 nand_deselect_target(chip
);
1915 static int nand_boot_set_geometry(struct gpmi_nand_data
*this)
1917 struct boot_rom_geometry
*geometry
= &this->rom_geometry
;
1920 * Set the boot block stride size.
1922 * In principle, we should be reading this from the OTP bits, since
1923 * that's where the ROM is going to get it. In fact, we don't have any
1924 * way to read the OTP bits, so we go with the default and hope for the
1927 geometry
->stride_size_in_pages
= 64;
1930 * Set the search area stride exponent.
1932 * In principle, we should be reading this from the OTP bits, since
1933 * that's where the ROM is going to get it. In fact, we don't have any
1934 * way to read the OTP bits, so we go with the default and hope for the
1937 geometry
->search_area_stride_exponent
= 2;
1941 static const char *fingerprint
= "STMP";
1942 static int mx23_check_transcription_stamp(struct gpmi_nand_data
*this)
1944 struct boot_rom_geometry
*rom_geo
= &this->rom_geometry
;
1945 struct device
*dev
= this->dev
;
1946 struct nand_chip
*chip
= &this->nand
;
1947 unsigned int search_area_size_in_strides
;
1948 unsigned int stride
;
1950 u8
*buffer
= nand_get_data_buf(chip
);
1951 int found_an_ncb_fingerprint
= false;
1954 /* Compute the number of strides in a search area. */
1955 search_area_size_in_strides
= 1 << rom_geo
->search_area_stride_exponent
;
1957 nand_select_target(chip
, 0);
1960 * Loop through the first search area, looking for the NCB fingerprint.
1962 dev_dbg(dev
, "Scanning for an NCB fingerprint...\n");
1964 for (stride
= 0; stride
< search_area_size_in_strides
; stride
++) {
1965 /* Compute the page addresses. */
1966 page
= stride
* rom_geo
->stride_size_in_pages
;
1968 dev_dbg(dev
, "Looking for a fingerprint in page 0x%x\n", page
);
1971 * Read the NCB fingerprint. The fingerprint is four bytes long
1972 * and starts in the 12th byte of the page.
1974 ret
= nand_read_page_op(chip
, page
, 12, buffer
,
1975 strlen(fingerprint
));
1979 /* Look for the fingerprint. */
1980 if (!memcmp(buffer
, fingerprint
, strlen(fingerprint
))) {
1981 found_an_ncb_fingerprint
= true;
1987 nand_deselect_target(chip
);
1989 if (found_an_ncb_fingerprint
)
1990 dev_dbg(dev
, "\tFound a fingerprint\n");
1992 dev_dbg(dev
, "\tNo fingerprint found\n");
1993 return found_an_ncb_fingerprint
;
1996 /* Writes a transcription stamp. */
1997 static int mx23_write_transcription_stamp(struct gpmi_nand_data
*this)
1999 struct device
*dev
= this->dev
;
2000 struct boot_rom_geometry
*rom_geo
= &this->rom_geometry
;
2001 struct nand_chip
*chip
= &this->nand
;
2002 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2003 unsigned int block_size_in_pages
;
2004 unsigned int search_area_size_in_strides
;
2005 unsigned int search_area_size_in_pages
;
2006 unsigned int search_area_size_in_blocks
;
2008 unsigned int stride
;
2010 u8
*buffer
= nand_get_data_buf(chip
);
2013 /* Compute the search area geometry. */
2014 block_size_in_pages
= mtd
->erasesize
/ mtd
->writesize
;
2015 search_area_size_in_strides
= 1 << rom_geo
->search_area_stride_exponent
;
2016 search_area_size_in_pages
= search_area_size_in_strides
*
2017 rom_geo
->stride_size_in_pages
;
2018 search_area_size_in_blocks
=
2019 (search_area_size_in_pages
+ (block_size_in_pages
- 1)) /
2020 block_size_in_pages
;
2022 dev_dbg(dev
, "Search Area Geometry :\n");
2023 dev_dbg(dev
, "\tin Blocks : %u\n", search_area_size_in_blocks
);
2024 dev_dbg(dev
, "\tin Strides: %u\n", search_area_size_in_strides
);
2025 dev_dbg(dev
, "\tin Pages : %u\n", search_area_size_in_pages
);
2027 nand_select_target(chip
, 0);
2029 /* Loop over blocks in the first search area, erasing them. */
2030 dev_dbg(dev
, "Erasing the search area...\n");
2032 for (block
= 0; block
< search_area_size_in_blocks
; block
++) {
2033 /* Erase this block. */
2034 dev_dbg(dev
, "\tErasing block 0x%x\n", block
);
2035 status
= nand_erase_op(chip
, block
);
2037 dev_err(dev
, "[%s] Erase failed.\n", __func__
);
2040 /* Write the NCB fingerprint into the page buffer. */
2041 memset(buffer
, ~0, mtd
->writesize
);
2042 memcpy(buffer
+ 12, fingerprint
, strlen(fingerprint
));
2044 /* Loop through the first search area, writing NCB fingerprints. */
2045 dev_dbg(dev
, "Writing NCB fingerprints...\n");
2046 for (stride
= 0; stride
< search_area_size_in_strides
; stride
++) {
2047 /* Compute the page addresses. */
2048 page
= stride
* rom_geo
->stride_size_in_pages
;
2050 /* Write the first page of the current stride. */
2051 dev_dbg(dev
, "Writing an NCB fingerprint in page 0x%x\n", page
);
2053 status
= chip
->ecc
.write_page_raw(chip
, buffer
, 0, page
);
2055 dev_err(dev
, "[%s] Write failed.\n", __func__
);
2058 nand_deselect_target(chip
);
2063 static int mx23_boot_init(struct gpmi_nand_data
*this)
2065 struct device
*dev
= this->dev
;
2066 struct nand_chip
*chip
= &this->nand
;
2067 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2068 unsigned int block_count
;
2077 * If control arrives here, we can't use block mark swapping, which
2078 * means we're forced to use transcription. First, scan for the
2079 * transcription stamp. If we find it, then we don't have to do
2080 * anything -- the block marks are already transcribed.
2082 if (mx23_check_transcription_stamp(this))
2086 * If control arrives here, we couldn't find a transcription stamp, so
2087 * so we presume the block marks are in the conventional location.
2089 dev_dbg(dev
, "Transcribing bad block marks...\n");
2091 /* Compute the number of blocks in the entire medium. */
2092 block_count
= nanddev_eraseblocks_per_target(&chip
->base
);
2095 * Loop over all the blocks in the medium, transcribing block marks as
2098 for (block
= 0; block
< block_count
; block
++) {
2100 * Compute the chip, page and byte addresses for this block's
2101 * conventional mark.
2103 chipnr
= block
>> (chip
->chip_shift
- chip
->phys_erase_shift
);
2104 page
= block
<< (chip
->phys_erase_shift
- chip
->page_shift
);
2105 byte
= block
<< chip
->phys_erase_shift
;
2107 /* Send the command to read the conventional block mark. */
2108 nand_select_target(chip
, chipnr
);
2109 ret
= nand_read_page_op(chip
, page
, mtd
->writesize
, &block_mark
,
2111 nand_deselect_target(chip
);
2117 * Check if the block is marked bad. If so, we need to mark it
2118 * again, but this time the result will be a mark in the
2119 * location where we transcribe block marks.
2121 if (block_mark
!= 0xff) {
2122 dev_dbg(dev
, "Transcribing mark in block %u\n", block
);
2123 ret
= chip
->legacy
.block_markbad(chip
, byte
);
2126 "Failed to mark block bad with ret %d\n",
2131 /* Write the stamp that indicates we've transcribed the block marks. */
2132 mx23_write_transcription_stamp(this);
2136 static int nand_boot_init(struct gpmi_nand_data
*this)
2138 nand_boot_set_geometry(this);
2140 /* This is ROM arch-specific initilization before the BBT scanning. */
2141 if (GPMI_IS_MX23(this))
2142 return mx23_boot_init(this);
2146 static int gpmi_set_geometry(struct gpmi_nand_data
*this)
2150 /* Free the temporary DMA memory for reading ID. */
2151 gpmi_free_dma_buffer(this);
2153 /* Set up the NFC geometry which is used by BCH. */
2154 ret
= bch_set_geometry(this);
2156 dev_err(this->dev
, "Error setting BCH geometry : %d\n", ret
);
2160 /* Alloc the new DMA buffers according to the pagesize and oobsize */
2161 return gpmi_alloc_dma_buffer(this);
2164 static int gpmi_init_last(struct gpmi_nand_data
*this)
2166 struct nand_chip
*chip
= &this->nand
;
2167 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2168 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
2169 struct bch_geometry
*bch_geo
= &this->bch_geometry
;
2172 /* Set up the medium geometry */
2173 ret
= gpmi_set_geometry(this);
2177 /* Init the nand_ecc_ctrl{} */
2178 ecc
->read_page
= gpmi_ecc_read_page
;
2179 ecc
->write_page
= gpmi_ecc_write_page
;
2180 ecc
->read_oob
= gpmi_ecc_read_oob
;
2181 ecc
->write_oob
= gpmi_ecc_write_oob
;
2182 ecc
->read_page_raw
= gpmi_ecc_read_page_raw
;
2183 ecc
->write_page_raw
= gpmi_ecc_write_page_raw
;
2184 ecc
->read_oob_raw
= gpmi_ecc_read_oob_raw
;
2185 ecc
->write_oob_raw
= gpmi_ecc_write_oob_raw
;
2186 ecc
->mode
= NAND_ECC_HW
;
2187 ecc
->size
= bch_geo
->ecc_chunk_size
;
2188 ecc
->strength
= bch_geo
->ecc_strength
;
2189 mtd_set_ooblayout(mtd
, &gpmi_ooblayout_ops
);
2192 * We only enable the subpage read when:
2193 * (1) the chip is imx6, and
2194 * (2) the size of the ECC parity is byte aligned.
2196 if (GPMI_IS_MX6(this) &&
2197 ((bch_geo
->gf_len
* bch_geo
->ecc_strength
) % 8) == 0) {
2198 ecc
->read_subpage
= gpmi_ecc_read_subpage
;
2199 chip
->options
|= NAND_SUBPAGE_READ
;
2205 static int gpmi_nand_attach_chip(struct nand_chip
*chip
)
2207 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
2210 if (chip
->bbt_options
& NAND_BBT_USE_FLASH
) {
2211 chip
->bbt_options
|= NAND_BBT_NO_OOB
;
2213 if (of_property_read_bool(this->dev
->of_node
,
2214 "fsl,no-blockmark-swap"))
2215 this->swap_block_mark
= false;
2217 dev_dbg(this->dev
, "Blockmark swapping %sabled\n",
2218 this->swap_block_mark
? "en" : "dis");
2220 ret
= gpmi_init_last(this);
2224 chip
->options
|= NAND_SKIP_BBTSCAN
;
2229 static struct gpmi_transfer
*get_next_transfer(struct gpmi_nand_data
*this)
2231 struct gpmi_transfer
*transfer
= &this->transfers
[this->ntransfers
];
2235 if (this->ntransfers
== GPMI_MAX_TRANSFERS
)
2241 static struct dma_async_tx_descriptor
*gpmi_chain_command(
2242 struct gpmi_nand_data
*this, u8 cmd
, const u8
*addr
, int naddr
)
2244 struct dma_chan
*channel
= get_dma_chan(this);
2245 struct dma_async_tx_descriptor
*desc
;
2246 struct gpmi_transfer
*transfer
;
2247 int chip
= this->nand
.cur_cs
;
2250 /* [1] send out the PIO words */
2251 pio
[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE
)
2252 | BM_GPMI_CTRL0_WORD_LENGTH
2253 | BF_GPMI_CTRL0_CS(chip
, this)
2254 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE
, this)
2255 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE
)
2256 | BM_GPMI_CTRL0_ADDRESS_INCREMENT
2257 | BF_GPMI_CTRL0_XFER_COUNT(naddr
+ 1);
2260 desc
= mxs_dmaengine_prep_pio(channel
, pio
, ARRAY_SIZE(pio
),
2265 transfer
= get_next_transfer(this);
2269 transfer
->cmdbuf
[0] = cmd
;
2271 memcpy(&transfer
->cmdbuf
[1], addr
, naddr
);
2273 sg_init_one(&transfer
->sgl
, transfer
->cmdbuf
, naddr
+ 1);
2274 dma_map_sg(this->dev
, &transfer
->sgl
, 1, DMA_TO_DEVICE
);
2276 transfer
->direction
= DMA_TO_DEVICE
;
2278 desc
= dmaengine_prep_slave_sg(channel
, &transfer
->sgl
, 1, DMA_MEM_TO_DEV
,
2279 MXS_DMA_CTRL_WAIT4END
);
2283 static struct dma_async_tx_descriptor
*gpmi_chain_wait_ready(
2284 struct gpmi_nand_data
*this)
2286 struct dma_chan
*channel
= get_dma_chan(this);
2289 pio
[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY
)
2290 | BM_GPMI_CTRL0_WORD_LENGTH
2291 | BF_GPMI_CTRL0_CS(this->nand
.cur_cs
, this)
2292 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE
, this)
2293 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA
)
2294 | BF_GPMI_CTRL0_XFER_COUNT(0);
2297 return mxs_dmaengine_prep_pio(channel
, pio
, 2, DMA_TRANS_NONE
,
2298 MXS_DMA_CTRL_WAIT4END
| MXS_DMA_CTRL_WAIT4RDY
);
2301 static struct dma_async_tx_descriptor
*gpmi_chain_data_read(
2302 struct gpmi_nand_data
*this, void *buf
, int raw_len
, bool *direct
)
2304 struct dma_async_tx_descriptor
*desc
;
2305 struct dma_chan
*channel
= get_dma_chan(this);
2306 struct gpmi_transfer
*transfer
;
2309 transfer
= get_next_transfer(this);
2313 transfer
->direction
= DMA_FROM_DEVICE
;
2315 *direct
= prepare_data_dma(this, buf
, raw_len
, &transfer
->sgl
,
2318 pio
[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ
)
2319 | BM_GPMI_CTRL0_WORD_LENGTH
2320 | BF_GPMI_CTRL0_CS(this->nand
.cur_cs
, this)
2321 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE
, this)
2322 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA
)
2323 | BF_GPMI_CTRL0_XFER_COUNT(raw_len
);
2326 pio
[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
2327 | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE
)
2328 | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
2329 | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY
);
2331 pio
[4] = transfer
->sgl
.dma_address
;
2332 pio
[5] = this->auxiliary_phys
;
2335 desc
= mxs_dmaengine_prep_pio(channel
, pio
, ARRAY_SIZE(pio
),
2341 desc
= dmaengine_prep_slave_sg(channel
, &transfer
->sgl
, 1,
2343 MXS_DMA_CTRL_WAIT4END
);
2348 static struct dma_async_tx_descriptor
*gpmi_chain_data_write(
2349 struct gpmi_nand_data
*this, const void *buf
, int raw_len
)
2351 struct dma_chan
*channel
= get_dma_chan(this);
2352 struct dma_async_tx_descriptor
*desc
;
2353 struct gpmi_transfer
*transfer
;
2356 transfer
= get_next_transfer(this);
2360 transfer
->direction
= DMA_TO_DEVICE
;
2362 prepare_data_dma(this, buf
, raw_len
, &transfer
->sgl
, DMA_TO_DEVICE
);
2364 pio
[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE
)
2365 | BM_GPMI_CTRL0_WORD_LENGTH
2366 | BF_GPMI_CTRL0_CS(this->nand
.cur_cs
, this)
2367 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE
, this)
2368 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA
)
2369 | BF_GPMI_CTRL0_XFER_COUNT(raw_len
);
2372 pio
[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
2373 | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE
)
2374 | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
|
2375 BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY
);
2377 pio
[4] = transfer
->sgl
.dma_address
;
2378 pio
[5] = this->auxiliary_phys
;
2381 desc
= mxs_dmaengine_prep_pio(channel
, pio
, ARRAY_SIZE(pio
),
2383 (this->bch
? MXS_DMA_CTRL_WAIT4END
: 0));
2388 desc
= dmaengine_prep_slave_sg(channel
, &transfer
->sgl
, 1,
2390 MXS_DMA_CTRL_WAIT4END
);
2395 static int gpmi_nfc_exec_op(struct nand_chip
*chip
,
2396 const struct nand_operation
*op
,
2399 const struct nand_op_instr
*instr
;
2400 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
2401 struct dma_async_tx_descriptor
*desc
= NULL
;
2402 int i
, ret
, buf_len
= 0, nbufs
= 0;
2404 void *buf_read
= NULL
;
2405 const void *buf_write
= NULL
;
2406 bool direct
= false;
2407 struct completion
*completion
;
2410 this->ntransfers
= 0;
2411 for (i
= 0; i
< GPMI_MAX_TRANSFERS
; i
++)
2412 this->transfers
[i
].direction
= DMA_NONE
;
2414 ret
= pm_runtime_get_sync(this->dev
);
2419 * This driver currently supports only one NAND chip. Plus, dies share
2420 * the same configuration. So once timings have been applied on the
2421 * controller side, they will not change anymore. When the time will
2422 * come, the check on must_apply_timings will have to be dropped.
2424 if (this->hw
.must_apply_timings
) {
2425 this->hw
.must_apply_timings
= false;
2426 gpmi_nfc_apply_timings(this);
2429 dev_dbg(this->dev
, "%s: %d instructions\n", __func__
, op
->ninstrs
);
2431 for (i
= 0; i
< op
->ninstrs
; i
++) {
2432 instr
= &op
->instrs
[i
];
2434 nand_op_trace(" ", instr
);
2436 switch (instr
->type
) {
2437 case NAND_OP_WAITRDY_INSTR
:
2438 desc
= gpmi_chain_wait_ready(this);
2440 case NAND_OP_CMD_INSTR
:
2441 cmd
= instr
->ctx
.cmd
.opcode
;
2444 * When this command has an address cycle chain it
2445 * together with the address cycle
2447 if (i
+ 1 != op
->ninstrs
&&
2448 op
->instrs
[i
+ 1].type
== NAND_OP_ADDR_INSTR
)
2451 desc
= gpmi_chain_command(this, cmd
, NULL
, 0);
2454 case NAND_OP_ADDR_INSTR
:
2455 desc
= gpmi_chain_command(this, cmd
, instr
->ctx
.addr
.addrs
,
2456 instr
->ctx
.addr
.naddrs
);
2458 case NAND_OP_DATA_OUT_INSTR
:
2459 buf_write
= instr
->ctx
.data
.buf
.out
;
2460 buf_len
= instr
->ctx
.data
.len
;
2463 desc
= gpmi_chain_data_write(this, buf_write
, buf_len
);
2466 case NAND_OP_DATA_IN_INSTR
:
2467 if (!instr
->ctx
.data
.len
)
2469 buf_read
= instr
->ctx
.data
.buf
.in
;
2470 buf_len
= instr
->ctx
.data
.len
;
2473 desc
= gpmi_chain_data_read(this, buf_read
, buf_len
,
2484 dev_dbg(this->dev
, "%s setup done\n", __func__
);
2487 dev_err(this->dev
, "Multiple data instructions not supported\n");
2493 writel(this->bch_flashlayout0
,
2494 this->resources
.bch_regs
+ HW_BCH_FLASH0LAYOUT0
);
2495 writel(this->bch_flashlayout1
,
2496 this->resources
.bch_regs
+ HW_BCH_FLASH0LAYOUT1
);
2499 if (this->bch
&& buf_read
) {
2500 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN
,
2501 this->resources
.bch_regs
+ HW_BCH_CTRL_SET
);
2502 completion
= &this->bch_done
;
2504 desc
->callback
= dma_irq_callback
;
2505 desc
->callback_param
= this;
2506 completion
= &this->dma_done
;
2509 init_completion(completion
);
2511 dmaengine_submit(desc
);
2512 dma_async_issue_pending(get_dma_chan(this));
2514 to
= wait_for_completion_timeout(completion
, msecs_to_jiffies(1000));
2516 dev_err(this->dev
, "DMA timeout, last DMA\n");
2517 gpmi_dump_info(this);
2522 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN
,
2523 this->resources
.bch_regs
+ HW_BCH_CTRL_CLR
);
2524 gpmi_clear_bch(this);
2529 for (i
= 0; i
< this->ntransfers
; i
++) {
2530 struct gpmi_transfer
*transfer
= &this->transfers
[i
];
2532 if (transfer
->direction
!= DMA_NONE
)
2533 dma_unmap_sg(this->dev
, &transfer
->sgl
, 1,
2534 transfer
->direction
);
2537 if (!ret
&& buf_read
&& !direct
)
2538 memcpy(buf_read
, this->data_buffer_dma
,
2539 gpmi_raw_len_to_len(this, buf_len
));
2543 pm_runtime_mark_last_busy(this->dev
);
2544 pm_runtime_put_autosuspend(this->dev
);
2549 static const struct nand_controller_ops gpmi_nand_controller_ops
= {
2550 .attach_chip
= gpmi_nand_attach_chip
,
2551 .setup_data_interface
= gpmi_setup_data_interface
,
2552 .exec_op
= gpmi_nfc_exec_op
,
2555 static int gpmi_nand_init(struct gpmi_nand_data
*this)
2557 struct nand_chip
*chip
= &this->nand
;
2558 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2561 /* init the MTD data structures */
2562 mtd
->name
= "gpmi-nand";
2563 mtd
->dev
.parent
= this->dev
;
2565 /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
2566 nand_set_controller_data(chip
, this);
2567 nand_set_flash_node(chip
, this->pdev
->dev
.of_node
);
2568 chip
->legacy
.block_markbad
= gpmi_block_markbad
;
2569 chip
->badblock_pattern
= &gpmi_bbt_descr
;
2570 chip
->options
|= NAND_NO_SUBPAGE_WRITE
;
2572 /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
2573 this->swap_block_mark
= !GPMI_IS_MX23(this);
2576 * Allocate a temporary DMA buffer for reading ID in the
2577 * nand_scan_ident().
2579 this->bch_geometry
.payload_size
= 1024;
2580 this->bch_geometry
.auxiliary_size
= 128;
2581 ret
= gpmi_alloc_dma_buffer(this);
2585 nand_controller_init(&this->base
);
2586 this->base
.ops
= &gpmi_nand_controller_ops
;
2587 chip
->controller
= &this->base
;
2589 ret
= nand_scan(chip
, GPMI_IS_MX6(this) ? 2 : 1);
2593 ret
= nand_boot_init(this);
2595 goto err_nand_cleanup
;
2596 ret
= nand_create_bbt(chip
);
2598 goto err_nand_cleanup
;
2600 ret
= mtd_device_register(mtd
, NULL
, 0);
2602 goto err_nand_cleanup
;
2608 gpmi_free_dma_buffer(this);
2612 static const struct of_device_id gpmi_nand_id_table
[] = {
2614 .compatible
= "fsl,imx23-gpmi-nand",
2615 .data
= &gpmi_devdata_imx23
,
2617 .compatible
= "fsl,imx28-gpmi-nand",
2618 .data
= &gpmi_devdata_imx28
,
2620 .compatible
= "fsl,imx6q-gpmi-nand",
2621 .data
= &gpmi_devdata_imx6q
,
2623 .compatible
= "fsl,imx6sx-gpmi-nand",
2624 .data
= &gpmi_devdata_imx6sx
,
2626 .compatible
= "fsl,imx7d-gpmi-nand",
2627 .data
= &gpmi_devdata_imx7d
,
2630 MODULE_DEVICE_TABLE(of
, gpmi_nand_id_table
);
2632 static int gpmi_nand_probe(struct platform_device
*pdev
)
2634 struct gpmi_nand_data
*this;
2635 const struct of_device_id
*of_id
;
2638 this = devm_kzalloc(&pdev
->dev
, sizeof(*this), GFP_KERNEL
);
2642 of_id
= of_match_device(gpmi_nand_id_table
, &pdev
->dev
);
2644 this->devdata
= of_id
->data
;
2646 dev_err(&pdev
->dev
, "Failed to find the right device id.\n");
2650 platform_set_drvdata(pdev
, this);
2652 this->dev
= &pdev
->dev
;
2654 ret
= acquire_resources(this);
2656 goto exit_acquire_resources
;
2658 ret
= __gpmi_enable_clk(this, true);
2662 pm_runtime_set_autosuspend_delay(&pdev
->dev
, 500);
2663 pm_runtime_use_autosuspend(&pdev
->dev
);
2664 pm_runtime_set_active(&pdev
->dev
);
2665 pm_runtime_enable(&pdev
->dev
);
2666 pm_runtime_get_sync(&pdev
->dev
);
2668 ret
= gpmi_init(this);
2672 ret
= gpmi_nand_init(this);
2676 pm_runtime_mark_last_busy(&pdev
->dev
);
2677 pm_runtime_put_autosuspend(&pdev
->dev
);
2679 dev_info(this->dev
, "driver registered.\n");
2684 pm_runtime_put(&pdev
->dev
);
2685 pm_runtime_disable(&pdev
->dev
);
2686 release_resources(this);
2687 exit_acquire_resources
:
2692 static int gpmi_nand_remove(struct platform_device
*pdev
)
2694 struct gpmi_nand_data
*this = platform_get_drvdata(pdev
);
2696 pm_runtime_put_sync(&pdev
->dev
);
2697 pm_runtime_disable(&pdev
->dev
);
2699 nand_release(&this->nand
);
2700 gpmi_free_dma_buffer(this);
2701 release_resources(this);
2705 #ifdef CONFIG_PM_SLEEP
2706 static int gpmi_pm_suspend(struct device
*dev
)
2708 struct gpmi_nand_data
*this = dev_get_drvdata(dev
);
2710 release_dma_channels(this);
2714 static int gpmi_pm_resume(struct device
*dev
)
2716 struct gpmi_nand_data
*this = dev_get_drvdata(dev
);
2719 ret
= acquire_dma_channels(this);
2723 /* re-init the GPMI registers */
2724 ret
= gpmi_init(this);
2726 dev_err(this->dev
, "Error setting GPMI : %d\n", ret
);
2730 /* Set flag to get timing setup restored for next exec_op */
2731 if (this->hw
.clk_rate
)
2732 this->hw
.must_apply_timings
= true;
2734 /* re-init the BCH registers */
2735 ret
= bch_set_geometry(this);
2737 dev_err(this->dev
, "Error setting BCH : %d\n", ret
);
2743 #endif /* CONFIG_PM_SLEEP */
2745 static int __maybe_unused
gpmi_runtime_suspend(struct device
*dev
)
2747 struct gpmi_nand_data
*this = dev_get_drvdata(dev
);
2749 return __gpmi_enable_clk(this, false);
2752 static int __maybe_unused
gpmi_runtime_resume(struct device
*dev
)
2754 struct gpmi_nand_data
*this = dev_get_drvdata(dev
);
2756 return __gpmi_enable_clk(this, true);
2759 static const struct dev_pm_ops gpmi_pm_ops
= {
2760 SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend
, gpmi_pm_resume
)
2761 SET_RUNTIME_PM_OPS(gpmi_runtime_suspend
, gpmi_runtime_resume
, NULL
)
2764 static struct platform_driver gpmi_nand_driver
= {
2766 .name
= "gpmi-nand",
2768 .of_match_table
= gpmi_nand_id_table
,
2770 .probe
= gpmi_nand_probe
,
2771 .remove
= gpmi_nand_remove
,
2773 module_platform_driver(gpmi_nand_driver
);
2775 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
2776 MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
2777 MODULE_LICENSE("GPL");