2 * Freescale GPMI NAND Flash Driver
4 * Copyright (C) 2010-2015 Freescale Semiconductor, Inc.
5 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 #include <linux/clk.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/mtd/partitions.h>
27 #include <linux/of_device.h>
28 #include <linux/of_mtd.h>
29 #include "gpmi-nand.h"
32 /* Resource names for the GPMI NAND driver. */
33 #define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
34 #define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
35 #define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
37 /* add our owner bbt descriptor */
38 static uint8_t scan_ff_pattern
[] = { 0xff };
39 static struct nand_bbt_descr gpmi_bbt_descr
= {
43 .pattern
= scan_ff_pattern
47 * We may change the layout if we can get the ECC info from the datasheet,
48 * else we will use all the (page + OOB).
50 static struct nand_ecclayout gpmi_hw_ecclayout
= {
53 .oobfree
= { {.offset
= 0, .length
= 0} }
56 static const struct gpmi_devdata gpmi_devdata_imx23
= {
58 .bch_max_ecc_strength
= 20,
59 .max_chain_delay
= 16,
62 static const struct gpmi_devdata gpmi_devdata_imx28
= {
64 .bch_max_ecc_strength
= 20,
65 .max_chain_delay
= 16,
68 static const struct gpmi_devdata gpmi_devdata_imx6q
= {
70 .bch_max_ecc_strength
= 40,
71 .max_chain_delay
= 12,
74 static const struct gpmi_devdata gpmi_devdata_imx6sx
= {
76 .bch_max_ecc_strength
= 62,
77 .max_chain_delay
= 12,
80 static irqreturn_t
bch_irq(int irq
, void *cookie
)
82 struct gpmi_nand_data
*this = cookie
;
85 complete(&this->bch_done
);
90 * Calculate the ECC strength by hand:
91 * E : The ECC strength.
92 * G : the length of Galois Field.
93 * N : The chunk count of per page.
94 * O : the oobsize of the NAND chip.
95 * M : the metasize of per page.
99 * ------------ <= (O - M)
107 static inline int get_ecc_strength(struct gpmi_nand_data
*this)
109 struct bch_geometry
*geo
= &this->bch_geometry
;
110 struct mtd_info
*mtd
= nand_to_mtd(&this->nand
);
113 ecc_strength
= ((mtd
->oobsize
- geo
->metadata_size
) * 8)
114 / (geo
->gf_len
* geo
->ecc_chunk_count
);
116 /* We need the minor even number. */
117 return round_down(ecc_strength
, 2);
120 static inline bool gpmi_check_ecc(struct gpmi_nand_data
*this)
122 struct bch_geometry
*geo
= &this->bch_geometry
;
124 /* Do the sanity check. */
125 if (GPMI_IS_MX23(this) || GPMI_IS_MX28(this)) {
126 /* The mx23/mx28 only support the GF13. */
127 if (geo
->gf_len
== 14)
130 return geo
->ecc_strength
<= this->devdata
->bch_max_ecc_strength
;
134 * If we can get the ECC information from the nand chip, we do not
135 * need to calculate them ourselves.
137 * We may have available oob space in this case.
139 static int set_geometry_by_ecc_info(struct gpmi_nand_data
*this)
141 struct bch_geometry
*geo
= &this->bch_geometry
;
142 struct nand_chip
*chip
= &this->nand
;
143 struct mtd_info
*mtd
= nand_to_mtd(chip
);
144 struct nand_oobfree
*of
= gpmi_hw_ecclayout
.oobfree
;
145 unsigned int block_mark_bit_offset
;
147 if (!(chip
->ecc_strength_ds
> 0 && chip
->ecc_step_ds
> 0))
150 switch (chip
->ecc_step_ds
) {
159 "unsupported nand chip. ecc bits : %d, ecc size : %d\n",
160 chip
->ecc_strength_ds
, chip
->ecc_step_ds
);
163 geo
->ecc_chunk_size
= chip
->ecc_step_ds
;
164 geo
->ecc_strength
= round_up(chip
->ecc_strength_ds
, 2);
165 if (!gpmi_check_ecc(this))
168 /* Keep the C >= O */
169 if (geo
->ecc_chunk_size
< mtd
->oobsize
) {
171 "unsupported nand chip. ecc size: %d, oob size : %d\n",
172 chip
->ecc_step_ds
, mtd
->oobsize
);
176 /* The default value, see comment in the legacy_set_geometry(). */
177 geo
->metadata_size
= 10;
179 geo
->ecc_chunk_count
= mtd
->writesize
/ geo
->ecc_chunk_size
;
182 * Now, the NAND chip with 2K page(data chunk is 512byte) shows below:
185 * |<----------------------------------------------------->|
189 * |<-------------------------------------------->| D | | O' |
192 * +---+----------+-+----------+-+----------+-+----------+-+-----+
193 * | M | data |E| data |E| data |E| data |E| |
194 * +---+----------+-+----------+-+----------+-+----------+-+-----+
200 * P : the page size for BCH module.
201 * E : The ECC strength.
202 * G : the length of Galois Field.
203 * N : The chunk count of per page.
204 * M : the metasize of per page.
205 * C : the ecc chunk size, aka the "data" above.
206 * P': the nand chip's page size.
207 * O : the nand chip's oob size.
210 * The formula for P is :
213 * P = ------------ + P' + M
216 * The position of block mark moves forward in the ECC-based view
217 * of page, and the delta is:
220 * D = (---------------- + M)
223 * Please see the comment in legacy_set_geometry().
224 * With the condition C >= O , we still can get same result.
225 * So the bit position of the physical block mark within the ECC-based
226 * view of the page is :
229 geo
->page_size
= mtd
->writesize
+ geo
->metadata_size
+
230 (geo
->gf_len
* geo
->ecc_strength
* geo
->ecc_chunk_count
) / 8;
232 /* The available oob size we have. */
233 if (geo
->page_size
< mtd
->writesize
+ mtd
->oobsize
) {
234 of
->offset
= geo
->page_size
- mtd
->writesize
;
235 of
->length
= mtd
->oobsize
- of
->offset
;
238 geo
->payload_size
= mtd
->writesize
;
240 geo
->auxiliary_status_offset
= ALIGN(geo
->metadata_size
, 4);
241 geo
->auxiliary_size
= ALIGN(geo
->metadata_size
, 4)
242 + ALIGN(geo
->ecc_chunk_count
, 4);
244 if (!this->swap_block_mark
)
248 block_mark_bit_offset
= mtd
->writesize
* 8 -
249 (geo
->ecc_strength
* geo
->gf_len
* (geo
->ecc_chunk_count
- 1)
250 + geo
->metadata_size
* 8);
252 geo
->block_mark_byte_offset
= block_mark_bit_offset
/ 8;
253 geo
->block_mark_bit_offset
= block_mark_bit_offset
% 8;
257 static int legacy_set_geometry(struct gpmi_nand_data
*this)
259 struct bch_geometry
*geo
= &this->bch_geometry
;
260 struct mtd_info
*mtd
= nand_to_mtd(&this->nand
);
261 unsigned int metadata_size
;
262 unsigned int status_size
;
263 unsigned int block_mark_bit_offset
;
266 * The size of the metadata can be changed, though we set it to 10
267 * bytes now. But it can't be too large, because we have to save
268 * enough space for BCH.
270 geo
->metadata_size
= 10;
272 /* The default for the length of Galois Field. */
275 /* The default for chunk size. */
276 geo
->ecc_chunk_size
= 512;
277 while (geo
->ecc_chunk_size
< mtd
->oobsize
) {
278 geo
->ecc_chunk_size
*= 2; /* keep C >= O */
282 geo
->ecc_chunk_count
= mtd
->writesize
/ geo
->ecc_chunk_size
;
284 /* We use the same ECC strength for all chunks. */
285 geo
->ecc_strength
= get_ecc_strength(this);
286 if (!gpmi_check_ecc(this)) {
288 "ecc strength: %d cannot be supported by the controller (%d)\n"
289 "try to use minimum ecc strength that NAND chip required\n",
291 this->devdata
->bch_max_ecc_strength
);
295 geo
->page_size
= mtd
->writesize
+ mtd
->oobsize
;
296 geo
->payload_size
= mtd
->writesize
;
299 * The auxiliary buffer contains the metadata and the ECC status. The
300 * metadata is padded to the nearest 32-bit boundary. The ECC status
301 * contains one byte for every ECC chunk, and is also padded to the
302 * nearest 32-bit boundary.
304 metadata_size
= ALIGN(geo
->metadata_size
, 4);
305 status_size
= ALIGN(geo
->ecc_chunk_count
, 4);
307 geo
->auxiliary_size
= metadata_size
+ status_size
;
308 geo
->auxiliary_status_offset
= metadata_size
;
310 if (!this->swap_block_mark
)
314 * We need to compute the byte and bit offsets of
315 * the physical block mark within the ECC-based view of the page.
317 * NAND chip with 2K page shows below:
323 * +---+----------+-+----------+-+----------+-+----------+-+
324 * | M | data |E| data |E| data |E| data |E|
325 * +---+----------+-+----------+-+----------+-+----------+-+
327 * The position of block mark moves forward in the ECC-based view
328 * of page, and the delta is:
331 * D = (---------------- + M)
334 * With the formula to compute the ECC strength, and the condition
335 * : C >= O (C is the ecc chunk size)
337 * It's easy to deduce to the following result:
339 * E * G (O - M) C - M C - M
340 * ----------- <= ------- <= -------- < ---------
346 * D = (---------------- + M) < C
349 * The above inequality means the position of block mark
350 * within the ECC-based view of the page is still in the data chunk,
351 * and it's NOT in the ECC bits of the chunk.
353 * Use the following to compute the bit position of the
354 * physical block mark within the ECC-based view of the page:
355 * (page_size - D) * 8
359 block_mark_bit_offset
= mtd
->writesize
* 8 -
360 (geo
->ecc_strength
* geo
->gf_len
* (geo
->ecc_chunk_count
- 1)
361 + geo
->metadata_size
* 8);
363 geo
->block_mark_byte_offset
= block_mark_bit_offset
/ 8;
364 geo
->block_mark_bit_offset
= block_mark_bit_offset
% 8;
368 int common_nfc_set_geometry(struct gpmi_nand_data
*this)
370 if ((of_property_read_bool(this->dev
->of_node
, "fsl,use-minimum-ecc"))
371 || legacy_set_geometry(this))
372 return set_geometry_by_ecc_info(this);
377 struct dma_chan
*get_dma_chan(struct gpmi_nand_data
*this)
379 /* We use the DMA channel 0 to access all the nand chips. */
380 return this->dma_chans
[0];
383 /* Can we use the upper's buffer directly for DMA? */
384 void prepare_data_dma(struct gpmi_nand_data
*this, enum dma_data_direction dr
)
386 struct scatterlist
*sgl
= &this->data_sgl
;
389 /* first try to map the upper buffer directly */
390 if (virt_addr_valid(this->upper_buf
) &&
391 !object_is_on_stack(this->upper_buf
)) {
392 sg_init_one(sgl
, this->upper_buf
, this->upper_len
);
393 ret
= dma_map_sg(this->dev
, sgl
, 1, dr
);
397 this->direct_dma_map_ok
= true;
402 /* We have to use our own DMA buffer. */
403 sg_init_one(sgl
, this->data_buffer_dma
, this->upper_len
);
405 if (dr
== DMA_TO_DEVICE
)
406 memcpy(this->data_buffer_dma
, this->upper_buf
, this->upper_len
);
408 dma_map_sg(this->dev
, sgl
, 1, dr
);
410 this->direct_dma_map_ok
= false;
413 /* This will be called after the DMA operation is finished. */
414 static void dma_irq_callback(void *param
)
416 struct gpmi_nand_data
*this = param
;
417 struct completion
*dma_c
= &this->dma_done
;
419 switch (this->dma_type
) {
420 case DMA_FOR_COMMAND
:
421 dma_unmap_sg(this->dev
, &this->cmd_sgl
, 1, DMA_TO_DEVICE
);
424 case DMA_FOR_READ_DATA
:
425 dma_unmap_sg(this->dev
, &this->data_sgl
, 1, DMA_FROM_DEVICE
);
426 if (this->direct_dma_map_ok
== false)
427 memcpy(this->upper_buf
, this->data_buffer_dma
,
431 case DMA_FOR_WRITE_DATA
:
432 dma_unmap_sg(this->dev
, &this->data_sgl
, 1, DMA_TO_DEVICE
);
435 case DMA_FOR_READ_ECC_PAGE
:
436 case DMA_FOR_WRITE_ECC_PAGE
:
437 /* We have to wait the BCH interrupt to finish. */
441 dev_err(this->dev
, "in wrong DMA operation.\n");
447 int start_dma_without_bch_irq(struct gpmi_nand_data
*this,
448 struct dma_async_tx_descriptor
*desc
)
450 struct completion
*dma_c
= &this->dma_done
;
451 unsigned long timeout
;
453 init_completion(dma_c
);
455 desc
->callback
= dma_irq_callback
;
456 desc
->callback_param
= this;
457 dmaengine_submit(desc
);
458 dma_async_issue_pending(get_dma_chan(this));
460 /* Wait for the interrupt from the DMA block. */
461 timeout
= wait_for_completion_timeout(dma_c
, msecs_to_jiffies(1000));
463 dev_err(this->dev
, "DMA timeout, last DMA :%d\n",
464 this->last_dma_type
);
465 gpmi_dump_info(this);
472 * This function is used in BCH reading or BCH writing pages.
473 * It will wait for the BCH interrupt as long as ONE second.
474 * Actually, we must wait for two interrupts :
475 * [1] firstly the DMA interrupt and
476 * [2] secondly the BCH interrupt.
478 int start_dma_with_bch_irq(struct gpmi_nand_data
*this,
479 struct dma_async_tx_descriptor
*desc
)
481 struct completion
*bch_c
= &this->bch_done
;
482 unsigned long timeout
;
484 /* Prepare to receive an interrupt from the BCH block. */
485 init_completion(bch_c
);
488 start_dma_without_bch_irq(this, desc
);
490 /* Wait for the interrupt from the BCH block. */
491 timeout
= wait_for_completion_timeout(bch_c
, msecs_to_jiffies(1000));
493 dev_err(this->dev
, "BCH timeout, last DMA :%d\n",
494 this->last_dma_type
);
495 gpmi_dump_info(this);
501 static int acquire_register_block(struct gpmi_nand_data
*this,
502 const char *res_name
)
504 struct platform_device
*pdev
= this->pdev
;
505 struct resources
*res
= &this->resources
;
509 r
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, res_name
);
510 p
= devm_ioremap_resource(&pdev
->dev
, r
);
514 if (!strcmp(res_name
, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME
))
516 else if (!strcmp(res_name
, GPMI_NAND_BCH_REGS_ADDR_RES_NAME
))
519 dev_err(this->dev
, "unknown resource name : %s\n", res_name
);
524 static int acquire_bch_irq(struct gpmi_nand_data
*this, irq_handler_t irq_h
)
526 struct platform_device
*pdev
= this->pdev
;
527 const char *res_name
= GPMI_NAND_BCH_INTERRUPT_RES_NAME
;
531 r
= platform_get_resource_byname(pdev
, IORESOURCE_IRQ
, res_name
);
533 dev_err(this->dev
, "Can't get resource for %s\n", res_name
);
537 err
= devm_request_irq(this->dev
, r
->start
, irq_h
, 0, res_name
, this);
539 dev_err(this->dev
, "error requesting BCH IRQ\n");
544 static void release_dma_channels(struct gpmi_nand_data
*this)
547 for (i
= 0; i
< DMA_CHANS
; i
++)
548 if (this->dma_chans
[i
]) {
549 dma_release_channel(this->dma_chans
[i
]);
550 this->dma_chans
[i
] = NULL
;
554 static int acquire_dma_channels(struct gpmi_nand_data
*this)
556 struct platform_device
*pdev
= this->pdev
;
557 struct dma_chan
*dma_chan
;
559 /* request dma channel */
560 dma_chan
= dma_request_slave_channel(&pdev
->dev
, "rx-tx");
562 dev_err(this->dev
, "Failed to request DMA channel.\n");
566 this->dma_chans
[0] = dma_chan
;
570 release_dma_channels(this);
574 static char *extra_clks_for_mx6q
[GPMI_CLK_MAX
] = {
575 "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
578 static int gpmi_get_clks(struct gpmi_nand_data
*this)
580 struct resources
*r
= &this->resources
;
581 char **extra_clks
= NULL
;
585 /* The main clock is stored in the first. */
586 r
->clock
[0] = devm_clk_get(this->dev
, "gpmi_io");
587 if (IS_ERR(r
->clock
[0])) {
588 err
= PTR_ERR(r
->clock
[0]);
592 /* Get extra clocks */
593 if (GPMI_IS_MX6(this))
594 extra_clks
= extra_clks_for_mx6q
;
598 for (i
= 1; i
< GPMI_CLK_MAX
; i
++) {
599 if (extra_clks
[i
- 1] == NULL
)
602 clk
= devm_clk_get(this->dev
, extra_clks
[i
- 1]);
611 if (GPMI_IS_MX6(this))
613 * Set the default value for the gpmi clock.
615 * If you want to use the ONFI nand which is in the
616 * Synchronous Mode, you should change the clock as you need.
618 clk_set_rate(r
->clock
[0], 22000000);
623 dev_dbg(this->dev
, "failed in finding the clocks.\n");
627 static int acquire_resources(struct gpmi_nand_data
*this)
631 ret
= acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME
);
635 ret
= acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME
);
639 ret
= acquire_bch_irq(this, bch_irq
);
643 ret
= acquire_dma_channels(this);
647 ret
= gpmi_get_clks(this);
653 release_dma_channels(this);
658 static void release_resources(struct gpmi_nand_data
*this)
660 release_dma_channels(this);
663 static int init_hardware(struct gpmi_nand_data
*this)
668 * This structure contains the "safe" GPMI timing that should succeed
669 * with any NAND Flash device
670 * (although, with less-than-optimal performance).
672 struct nand_timing safe_timing
= {
673 .data_setup_in_ns
= 80,
674 .data_hold_in_ns
= 60,
675 .address_setup_in_ns
= 25,
676 .gpmi_sample_delay_in_ns
= 6,
682 /* Initialize the hardwares. */
683 ret
= gpmi_init(this);
687 this->timing
= safe_timing
;
691 static int read_page_prepare(struct gpmi_nand_data
*this,
692 void *destination
, unsigned length
,
693 void *alt_virt
, dma_addr_t alt_phys
, unsigned alt_size
,
694 void **use_virt
, dma_addr_t
*use_phys
)
696 struct device
*dev
= this->dev
;
698 if (virt_addr_valid(destination
)) {
699 dma_addr_t dest_phys
;
701 dest_phys
= dma_map_single(dev
, destination
,
702 length
, DMA_FROM_DEVICE
);
703 if (dma_mapping_error(dev
, dest_phys
)) {
704 if (alt_size
< length
) {
705 dev_err(dev
, "Alternate buffer is too small\n");
710 *use_virt
= destination
;
711 *use_phys
= dest_phys
;
712 this->direct_dma_map_ok
= true;
717 *use_virt
= alt_virt
;
718 *use_phys
= alt_phys
;
719 this->direct_dma_map_ok
= false;
723 static inline void read_page_end(struct gpmi_nand_data
*this,
724 void *destination
, unsigned length
,
725 void *alt_virt
, dma_addr_t alt_phys
, unsigned alt_size
,
726 void *used_virt
, dma_addr_t used_phys
)
728 if (this->direct_dma_map_ok
)
729 dma_unmap_single(this->dev
, used_phys
, length
, DMA_FROM_DEVICE
);
732 static inline void read_page_swap_end(struct gpmi_nand_data
*this,
733 void *destination
, unsigned length
,
734 void *alt_virt
, dma_addr_t alt_phys
, unsigned alt_size
,
735 void *used_virt
, dma_addr_t used_phys
)
737 if (!this->direct_dma_map_ok
)
738 memcpy(destination
, alt_virt
, length
);
741 static int send_page_prepare(struct gpmi_nand_data
*this,
742 const void *source
, unsigned length
,
743 void *alt_virt
, dma_addr_t alt_phys
, unsigned alt_size
,
744 const void **use_virt
, dma_addr_t
*use_phys
)
746 struct device
*dev
= this->dev
;
748 if (virt_addr_valid(source
)) {
749 dma_addr_t source_phys
;
751 source_phys
= dma_map_single(dev
, (void *)source
, length
,
753 if (dma_mapping_error(dev
, source_phys
)) {
754 if (alt_size
< length
) {
755 dev_err(dev
, "Alternate buffer is too small\n");
761 *use_phys
= source_phys
;
766 * Copy the content of the source buffer into the alternate
767 * buffer and set up the return values accordingly.
769 memcpy(alt_virt
, source
, length
);
771 *use_virt
= alt_virt
;
772 *use_phys
= alt_phys
;
776 static void send_page_end(struct gpmi_nand_data
*this,
777 const void *source
, unsigned length
,
778 void *alt_virt
, dma_addr_t alt_phys
, unsigned alt_size
,
779 const void *used_virt
, dma_addr_t used_phys
)
781 struct device
*dev
= this->dev
;
782 if (used_virt
== source
)
783 dma_unmap_single(dev
, used_phys
, length
, DMA_TO_DEVICE
);
786 static void gpmi_free_dma_buffer(struct gpmi_nand_data
*this)
788 struct device
*dev
= this->dev
;
790 if (this->page_buffer_virt
&& virt_addr_valid(this->page_buffer_virt
))
791 dma_free_coherent(dev
, this->page_buffer_size
,
792 this->page_buffer_virt
,
793 this->page_buffer_phys
);
794 kfree(this->cmd_buffer
);
795 kfree(this->data_buffer_dma
);
796 kfree(this->raw_buffer
);
798 this->cmd_buffer
= NULL
;
799 this->data_buffer_dma
= NULL
;
800 this->page_buffer_virt
= NULL
;
801 this->page_buffer_size
= 0;
804 /* Allocate the DMA buffers */
805 static int gpmi_alloc_dma_buffer(struct gpmi_nand_data
*this)
807 struct bch_geometry
*geo
= &this->bch_geometry
;
808 struct device
*dev
= this->dev
;
809 struct mtd_info
*mtd
= nand_to_mtd(&this->nand
);
811 /* [1] Allocate a command buffer. PAGE_SIZE is enough. */
812 this->cmd_buffer
= kzalloc(PAGE_SIZE
, GFP_DMA
| GFP_KERNEL
);
813 if (this->cmd_buffer
== NULL
)
817 * [2] Allocate a read/write data buffer.
818 * The gpmi_alloc_dma_buffer can be called twice.
819 * We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer
820 * is called before the nand_scan_ident; and we allocate a buffer
821 * of the real NAND page size when the gpmi_alloc_dma_buffer is
822 * called after the nand_scan_ident.
824 this->data_buffer_dma
= kzalloc(mtd
->writesize
?: PAGE_SIZE
,
825 GFP_DMA
| GFP_KERNEL
);
826 if (this->data_buffer_dma
== NULL
)
830 * [3] Allocate the page buffer.
832 * Both the payload buffer and the auxiliary buffer must appear on
833 * 32-bit boundaries. We presume the size of the payload buffer is a
834 * power of two and is much larger than four, which guarantees the
835 * auxiliary buffer will appear on a 32-bit boundary.
837 this->page_buffer_size
= geo
->payload_size
+ geo
->auxiliary_size
;
838 this->page_buffer_virt
= dma_alloc_coherent(dev
, this->page_buffer_size
,
839 &this->page_buffer_phys
, GFP_DMA
);
840 if (!this->page_buffer_virt
)
843 this->raw_buffer
= kzalloc(mtd
->writesize
+ mtd
->oobsize
, GFP_KERNEL
);
844 if (!this->raw_buffer
)
847 /* Slice up the page buffer. */
848 this->payload_virt
= this->page_buffer_virt
;
849 this->payload_phys
= this->page_buffer_phys
;
850 this->auxiliary_virt
= this->payload_virt
+ geo
->payload_size
;
851 this->auxiliary_phys
= this->payload_phys
+ geo
->payload_size
;
855 gpmi_free_dma_buffer(this);
859 static void gpmi_cmd_ctrl(struct mtd_info
*mtd
, int data
, unsigned int ctrl
)
861 struct nand_chip
*chip
= mtd_to_nand(mtd
);
862 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
866 * Every operation begins with a command byte and a series of zero or
867 * more address bytes. These are distinguished by either the Address
868 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
869 * asserted. When MTD is ready to execute the command, it will deassert
870 * both latch enables.
872 * Rather than run a separate DMA operation for every single byte, we
873 * queue them up and run a single DMA operation for the entire series
874 * of command and data bytes. NAND_CMD_NONE means the END of the queue.
876 if ((ctrl
& (NAND_ALE
| NAND_CLE
))) {
877 if (data
!= NAND_CMD_NONE
)
878 this->cmd_buffer
[this->command_length
++] = data
;
882 if (!this->command_length
)
885 ret
= gpmi_send_command(this);
887 dev_err(this->dev
, "Chip: %u, Error %d\n",
888 this->current_chip
, ret
);
890 this->command_length
= 0;
893 static int gpmi_dev_ready(struct mtd_info
*mtd
)
895 struct nand_chip
*chip
= mtd_to_nand(mtd
);
896 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
898 return gpmi_is_ready(this, this->current_chip
);
901 static void gpmi_select_chip(struct mtd_info
*mtd
, int chipnr
)
903 struct nand_chip
*chip
= mtd_to_nand(mtd
);
904 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
906 if ((this->current_chip
< 0) && (chipnr
>= 0))
908 else if ((this->current_chip
>= 0) && (chipnr
< 0))
911 this->current_chip
= chipnr
;
914 static void gpmi_read_buf(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
916 struct nand_chip
*chip
= mtd_to_nand(mtd
);
917 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
919 dev_dbg(this->dev
, "len is %d\n", len
);
920 this->upper_buf
= buf
;
921 this->upper_len
= len
;
923 gpmi_read_data(this);
926 static void gpmi_write_buf(struct mtd_info
*mtd
, const uint8_t *buf
, int len
)
928 struct nand_chip
*chip
= mtd_to_nand(mtd
);
929 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
931 dev_dbg(this->dev
, "len is %d\n", len
);
932 this->upper_buf
= (uint8_t *)buf
;
933 this->upper_len
= len
;
935 gpmi_send_data(this);
938 static uint8_t gpmi_read_byte(struct mtd_info
*mtd
)
940 struct nand_chip
*chip
= mtd_to_nand(mtd
);
941 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
942 uint8_t *buf
= this->data_buffer_dma
;
944 gpmi_read_buf(mtd
, buf
, 1);
949 * Handles block mark swapping.
950 * It can be called in swapping the block mark, or swapping it back,
951 * because the the operations are the same.
953 static void block_mark_swapping(struct gpmi_nand_data
*this,
954 void *payload
, void *auxiliary
)
956 struct bch_geometry
*nfc_geo
= &this->bch_geometry
;
961 unsigned char from_data
;
962 unsigned char from_oob
;
964 if (!this->swap_block_mark
)
968 * If control arrives here, we're swapping. Make some convenience
971 bit
= nfc_geo
->block_mark_bit_offset
;
972 p
= payload
+ nfc_geo
->block_mark_byte_offset
;
976 * Get the byte from the data area that overlays the block mark. Since
977 * the ECC engine applies its own view to the bits in the page, the
978 * physical block mark won't (in general) appear on a byte boundary in
981 from_data
= (p
[0] >> bit
) | (p
[1] << (8 - bit
));
983 /* Get the byte from the OOB. */
989 mask
= (0x1 << bit
) - 1;
990 p
[0] = (p
[0] & mask
) | (from_oob
<< bit
);
993 p
[1] = (p
[1] & mask
) | (from_oob
>> (8 - bit
));
996 static int gpmi_ecc_read_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
997 uint8_t *buf
, int oob_required
, int page
)
999 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
1000 struct bch_geometry
*nfc_geo
= &this->bch_geometry
;
1002 dma_addr_t payload_phys
;
1003 void *auxiliary_virt
;
1004 dma_addr_t auxiliary_phys
;
1006 unsigned char *status
;
1007 unsigned int max_bitflips
= 0;
1010 dev_dbg(this->dev
, "page number is : %d\n", page
);
1011 ret
= read_page_prepare(this, buf
, nfc_geo
->payload_size
,
1012 this->payload_virt
, this->payload_phys
,
1013 nfc_geo
->payload_size
,
1014 &payload_virt
, &payload_phys
);
1016 dev_err(this->dev
, "Inadequate DMA buffer\n");
1020 auxiliary_virt
= this->auxiliary_virt
;
1021 auxiliary_phys
= this->auxiliary_phys
;
1024 ret
= gpmi_read_page(this, payload_phys
, auxiliary_phys
);
1025 read_page_end(this, buf
, nfc_geo
->payload_size
,
1026 this->payload_virt
, this->payload_phys
,
1027 nfc_geo
->payload_size
,
1028 payload_virt
, payload_phys
);
1030 dev_err(this->dev
, "Error in ECC-based read: %d\n", ret
);
1034 /* handle the block mark swapping */
1035 block_mark_swapping(this, payload_virt
, auxiliary_virt
);
1037 /* Loop over status bytes, accumulating ECC status. */
1038 status
= auxiliary_virt
+ nfc_geo
->auxiliary_status_offset
;
1040 for (i
= 0; i
< nfc_geo
->ecc_chunk_count
; i
++, status
++) {
1041 if ((*status
== STATUS_GOOD
) || (*status
== STATUS_ERASED
))
1044 if (*status
== STATUS_UNCORRECTABLE
) {
1045 mtd
->ecc_stats
.failed
++;
1048 mtd
->ecc_stats
.corrected
+= *status
;
1049 max_bitflips
= max_t(unsigned int, max_bitflips
, *status
);
1054 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
1055 * for details about our policy for delivering the OOB.
1057 * We fill the caller's buffer with set bits, and then copy the
1058 * block mark to th caller's buffer. Note that, if block mark
1059 * swapping was necessary, it has already been done, so we can
1060 * rely on the first byte of the auxiliary buffer to contain
1063 memset(chip
->oob_poi
, ~0, mtd
->oobsize
);
1064 chip
->oob_poi
[0] = ((uint8_t *) auxiliary_virt
)[0];
1067 read_page_swap_end(this, buf
, nfc_geo
->payload_size
,
1068 this->payload_virt
, this->payload_phys
,
1069 nfc_geo
->payload_size
,
1070 payload_virt
, payload_phys
);
1072 return max_bitflips
;
1075 /* Fake a virtual small page for the subpage read */
1076 static int gpmi_ecc_read_subpage(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1077 uint32_t offs
, uint32_t len
, uint8_t *buf
, int page
)
1079 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
1080 void __iomem
*bch_regs
= this->resources
.bch_regs
;
1081 struct bch_geometry old_geo
= this->bch_geometry
;
1082 struct bch_geometry
*geo
= &this->bch_geometry
;
1083 int size
= chip
->ecc
.size
; /* ECC chunk size */
1084 int meta
, n
, page_size
;
1085 u32 r1_old
, r2_old
, r1_new
, r2_new
;
1086 unsigned int max_bitflips
;
1087 int first
, last
, marker_pos
;
1088 int ecc_parity_size
;
1090 int old_swap_block_mark
= this->swap_block_mark
;
1092 /* The size of ECC parity */
1093 ecc_parity_size
= geo
->gf_len
* geo
->ecc_strength
/ 8;
1095 /* Align it with the chunk size */
1096 first
= offs
/ size
;
1097 last
= (offs
+ len
- 1) / size
;
1099 if (this->swap_block_mark
) {
1101 * Find the chunk which contains the Block Marker.
1102 * If this chunk is in the range of [first, last],
1103 * we have to read out the whole page.
1104 * Why? since we had swapped the data at the position of Block
1105 * Marker to the metadata which is bound with the chunk 0.
1107 marker_pos
= geo
->block_mark_byte_offset
/ size
;
1108 if (last
>= marker_pos
&& first
<= marker_pos
) {
1110 "page:%d, first:%d, last:%d, marker at:%d\n",
1111 page
, first
, last
, marker_pos
);
1112 return gpmi_ecc_read_page(mtd
, chip
, buf
, 0, page
);
1116 meta
= geo
->metadata_size
;
1118 col
= meta
+ (size
+ ecc_parity_size
) * first
;
1119 chip
->cmdfunc(mtd
, NAND_CMD_RNDOUT
, col
, -1);
1122 buf
= buf
+ first
* size
;
1125 /* Save the old environment */
1126 r1_old
= r1_new
= readl(bch_regs
+ HW_BCH_FLASH0LAYOUT0
);
1127 r2_old
= r2_new
= readl(bch_regs
+ HW_BCH_FLASH0LAYOUT1
);
1129 /* change the BCH registers and bch_geometry{} */
1130 n
= last
- first
+ 1;
1131 page_size
= meta
+ (size
+ ecc_parity_size
) * n
;
1133 r1_new
&= ~(BM_BCH_FLASH0LAYOUT0_NBLOCKS
|
1134 BM_BCH_FLASH0LAYOUT0_META_SIZE
);
1135 r1_new
|= BF_BCH_FLASH0LAYOUT0_NBLOCKS(n
- 1)
1136 | BF_BCH_FLASH0LAYOUT0_META_SIZE(meta
);
1137 writel(r1_new
, bch_regs
+ HW_BCH_FLASH0LAYOUT0
);
1139 r2_new
&= ~BM_BCH_FLASH0LAYOUT1_PAGE_SIZE
;
1140 r2_new
|= BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size
);
1141 writel(r2_new
, bch_regs
+ HW_BCH_FLASH0LAYOUT1
);
1143 geo
->ecc_chunk_count
= n
;
1144 geo
->payload_size
= n
* size
;
1145 geo
->page_size
= page_size
;
1146 geo
->auxiliary_status_offset
= ALIGN(meta
, 4);
1148 dev_dbg(this->dev
, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
1149 page
, offs
, len
, col
, first
, n
, page_size
);
1151 /* Read the subpage now */
1152 this->swap_block_mark
= false;
1153 max_bitflips
= gpmi_ecc_read_page(mtd
, chip
, buf
, 0, page
);
1156 writel(r1_old
, bch_regs
+ HW_BCH_FLASH0LAYOUT0
);
1157 writel(r2_old
, bch_regs
+ HW_BCH_FLASH0LAYOUT1
);
1158 this->bch_geometry
= old_geo
;
1159 this->swap_block_mark
= old_swap_block_mark
;
1161 return max_bitflips
;
1164 static int gpmi_ecc_write_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1165 const uint8_t *buf
, int oob_required
, int page
)
1167 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
1168 struct bch_geometry
*nfc_geo
= &this->bch_geometry
;
1169 const void *payload_virt
;
1170 dma_addr_t payload_phys
;
1171 const void *auxiliary_virt
;
1172 dma_addr_t auxiliary_phys
;
1175 dev_dbg(this->dev
, "ecc write page.\n");
1176 if (this->swap_block_mark
) {
1178 * If control arrives here, we're doing block mark swapping.
1179 * Since we can't modify the caller's buffers, we must copy them
1182 memcpy(this->payload_virt
, buf
, mtd
->writesize
);
1183 payload_virt
= this->payload_virt
;
1184 payload_phys
= this->payload_phys
;
1186 memcpy(this->auxiliary_virt
, chip
->oob_poi
,
1187 nfc_geo
->auxiliary_size
);
1188 auxiliary_virt
= this->auxiliary_virt
;
1189 auxiliary_phys
= this->auxiliary_phys
;
1191 /* Handle block mark swapping. */
1192 block_mark_swapping(this,
1193 (void *)payload_virt
, (void *)auxiliary_virt
);
1196 * If control arrives here, we're not doing block mark swapping,
1197 * so we can to try and use the caller's buffers.
1199 ret
= send_page_prepare(this,
1200 buf
, mtd
->writesize
,
1201 this->payload_virt
, this->payload_phys
,
1202 nfc_geo
->payload_size
,
1203 &payload_virt
, &payload_phys
);
1205 dev_err(this->dev
, "Inadequate payload DMA buffer\n");
1209 ret
= send_page_prepare(this,
1210 chip
->oob_poi
, mtd
->oobsize
,
1211 this->auxiliary_virt
, this->auxiliary_phys
,
1212 nfc_geo
->auxiliary_size
,
1213 &auxiliary_virt
, &auxiliary_phys
);
1215 dev_err(this->dev
, "Inadequate auxiliary DMA buffer\n");
1216 goto exit_auxiliary
;
1221 ret
= gpmi_send_page(this, payload_phys
, auxiliary_phys
);
1223 dev_err(this->dev
, "Error in ECC-based write: %d\n", ret
);
1225 if (!this->swap_block_mark
) {
1226 send_page_end(this, chip
->oob_poi
, mtd
->oobsize
,
1227 this->auxiliary_virt
, this->auxiliary_phys
,
1228 nfc_geo
->auxiliary_size
,
1229 auxiliary_virt
, auxiliary_phys
);
1231 send_page_end(this, buf
, mtd
->writesize
,
1232 this->payload_virt
, this->payload_phys
,
1233 nfc_geo
->payload_size
,
1234 payload_virt
, payload_phys
);
1241 * There are several places in this driver where we have to handle the OOB and
1242 * block marks. This is the function where things are the most complicated, so
1243 * this is where we try to explain it all. All the other places refer back to
1246 * These are the rules, in order of decreasing importance:
1248 * 1) Nothing the caller does can be allowed to imperil the block mark.
1250 * 2) In read operations, the first byte of the OOB we return must reflect the
1251 * true state of the block mark, no matter where that block mark appears in
1252 * the physical page.
1254 * 3) ECC-based read operations return an OOB full of set bits (since we never
1255 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
1258 * 4) "Raw" read operations return a direct view of the physical bytes in the
1259 * page, using the conventional definition of which bytes are data and which
1260 * are OOB. This gives the caller a way to see the actual, physical bytes
1261 * in the page, without the distortions applied by our ECC engine.
1264 * What we do for this specific read operation depends on two questions:
1266 * 1) Are we doing a "raw" read, or an ECC-based read?
1268 * 2) Are we using block mark swapping or transcription?
1270 * There are four cases, illustrated by the following Karnaugh map:
1272 * | Raw | ECC-based |
1273 * -------------+-------------------------+-------------------------+
1274 * | Read the conventional | |
1275 * | OOB at the end of the | |
1276 * Swapping | page and return it. It | |
1277 * | contains exactly what | |
1278 * | we want. | Read the block mark and |
1279 * -------------+-------------------------+ return it in a buffer |
1280 * | Read the conventional | full of set bits. |
1281 * | OOB at the end of the | |
1282 * | page and also the block | |
1283 * Transcribing | mark in the metadata. | |
1284 * | Copy the block mark | |
1285 * | into the first byte of | |
1287 * -------------+-------------------------+-------------------------+
1289 * Note that we break rule #4 in the Transcribing/Raw case because we're not
1290 * giving an accurate view of the actual, physical bytes in the page (we're
1291 * overwriting the block mark). That's OK because it's more important to follow
1294 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
1295 * easy. When reading a page, for example, the NAND Flash MTD code calls our
1296 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1297 * ECC-based or raw view of the page is implicit in which function it calls
1298 * (there is a similar pair of ECC-based/raw functions for writing).
1300 static int gpmi_ecc_read_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1303 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
1305 dev_dbg(this->dev
, "page number is %d\n", page
);
1306 /* clear the OOB buffer */
1307 memset(chip
->oob_poi
, ~0, mtd
->oobsize
);
1309 /* Read out the conventional OOB. */
1310 chip
->cmdfunc(mtd
, NAND_CMD_READ0
, mtd
->writesize
, page
);
1311 chip
->read_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
1314 * Now, we want to make sure the block mark is correct. In the
1315 * non-transcribing case (!GPMI_IS_MX23()), we already have it.
1316 * Otherwise, we need to explicitly read it.
1318 if (GPMI_IS_MX23(this)) {
1319 /* Read the block mark into the first byte of the OOB buffer. */
1320 chip
->cmdfunc(mtd
, NAND_CMD_READ0
, 0, page
);
1321 chip
->oob_poi
[0] = chip
->read_byte(mtd
);
1328 gpmi_ecc_write_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
, int page
)
1330 struct nand_oobfree
*of
= mtd
->ecclayout
->oobfree
;
1333 /* Do we have available oob area? */
1337 if (!nand_is_slc(chip
))
1340 chip
->cmdfunc(mtd
, NAND_CMD_SEQIN
, mtd
->writesize
+ of
->offset
, page
);
1341 chip
->write_buf(mtd
, chip
->oob_poi
+ of
->offset
, of
->length
);
1342 chip
->cmdfunc(mtd
, NAND_CMD_PAGEPROG
, -1, -1);
1344 status
= chip
->waitfunc(mtd
, chip
);
1345 return status
& NAND_STATUS_FAIL
? -EIO
: 0;
1349 * This function reads a NAND page without involving the ECC engine (no HW
1351 * The tricky part in the GPMI/BCH controller is that it stores ECC bits
1352 * inline (interleaved with payload DATA), and do not align data chunk on
1354 * We thus need to take care moving the payload data and ECC bits stored in the
1355 * page into the provided buffers, which is why we're using gpmi_copy_bits.
1357 * See set_geometry_by_ecc_info inline comments to have a full description
1358 * of the layout used by the GPMI controller.
1360 static int gpmi_ecc_read_page_raw(struct mtd_info
*mtd
,
1361 struct nand_chip
*chip
, uint8_t *buf
,
1362 int oob_required
, int page
)
1364 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
1365 struct bch_geometry
*nfc_geo
= &this->bch_geometry
;
1366 int eccsize
= nfc_geo
->ecc_chunk_size
;
1367 int eccbits
= nfc_geo
->ecc_strength
* nfc_geo
->gf_len
;
1368 u8
*tmp_buf
= this->raw_buffer
;
1371 size_t oob_byte_off
;
1372 uint8_t *oob
= chip
->oob_poi
;
1375 chip
->read_buf(mtd
, tmp_buf
,
1376 mtd
->writesize
+ mtd
->oobsize
);
1379 * If required, swap the bad block marker and the data stored in the
1380 * metadata section, so that we don't wrongly consider a block as bad.
1382 * See the layout description for a detailed explanation on why this
1385 if (this->swap_block_mark
) {
1386 u8 swap
= tmp_buf
[0];
1388 tmp_buf
[0] = tmp_buf
[mtd
->writesize
];
1389 tmp_buf
[mtd
->writesize
] = swap
;
1393 * Copy the metadata section into the oob buffer (this section is
1394 * guaranteed to be aligned on a byte boundary).
1397 memcpy(oob
, tmp_buf
, nfc_geo
->metadata_size
);
1399 oob_bit_off
= nfc_geo
->metadata_size
* 8;
1400 src_bit_off
= oob_bit_off
;
1402 /* Extract interleaved payload data and ECC bits */
1403 for (step
= 0; step
< nfc_geo
->ecc_chunk_count
; step
++) {
1405 gpmi_copy_bits(buf
, step
* eccsize
* 8,
1406 tmp_buf
, src_bit_off
,
1408 src_bit_off
+= eccsize
* 8;
1410 /* Align last ECC block to align a byte boundary */
1411 if (step
== nfc_geo
->ecc_chunk_count
- 1 &&
1412 (oob_bit_off
+ eccbits
) % 8)
1413 eccbits
+= 8 - ((oob_bit_off
+ eccbits
) % 8);
1416 gpmi_copy_bits(oob
, oob_bit_off
,
1417 tmp_buf
, src_bit_off
,
1420 src_bit_off
+= eccbits
;
1421 oob_bit_off
+= eccbits
;
1425 oob_byte_off
= oob_bit_off
/ 8;
1427 if (oob_byte_off
< mtd
->oobsize
)
1428 memcpy(oob
+ oob_byte_off
,
1429 tmp_buf
+ mtd
->writesize
+ oob_byte_off
,
1430 mtd
->oobsize
- oob_byte_off
);
1437 * This function writes a NAND page without involving the ECC engine (no HW
1439 * The tricky part in the GPMI/BCH controller is that it stores ECC bits
1440 * inline (interleaved with payload DATA), and do not align data chunk on
1442 * We thus need to take care moving the OOB area at the right place in the
1443 * final page, which is why we're using gpmi_copy_bits.
1445 * See set_geometry_by_ecc_info inline comments to have a full description
1446 * of the layout used by the GPMI controller.
1448 static int gpmi_ecc_write_page_raw(struct mtd_info
*mtd
,
1449 struct nand_chip
*chip
,
1451 int oob_required
, int page
)
1453 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
1454 struct bch_geometry
*nfc_geo
= &this->bch_geometry
;
1455 int eccsize
= nfc_geo
->ecc_chunk_size
;
1456 int eccbits
= nfc_geo
->ecc_strength
* nfc_geo
->gf_len
;
1457 u8
*tmp_buf
= this->raw_buffer
;
1458 uint8_t *oob
= chip
->oob_poi
;
1461 size_t oob_byte_off
;
1465 * Initialize all bits to 1 in case we don't have a buffer for the
1466 * payload or oob data in order to leave unspecified bits of data
1467 * to their initial state.
1469 if (!buf
|| !oob_required
)
1470 memset(tmp_buf
, 0xff, mtd
->writesize
+ mtd
->oobsize
);
1473 * First copy the metadata section (stored in oob buffer) at the
1474 * beginning of the page, as imposed by the GPMI layout.
1476 memcpy(tmp_buf
, oob
, nfc_geo
->metadata_size
);
1477 oob_bit_off
= nfc_geo
->metadata_size
* 8;
1478 dst_bit_off
= oob_bit_off
;
1480 /* Interleave payload data and ECC bits */
1481 for (step
= 0; step
< nfc_geo
->ecc_chunk_count
; step
++) {
1483 gpmi_copy_bits(tmp_buf
, dst_bit_off
,
1484 buf
, step
* eccsize
* 8, eccsize
* 8);
1485 dst_bit_off
+= eccsize
* 8;
1487 /* Align last ECC block to align a byte boundary */
1488 if (step
== nfc_geo
->ecc_chunk_count
- 1 &&
1489 (oob_bit_off
+ eccbits
) % 8)
1490 eccbits
+= 8 - ((oob_bit_off
+ eccbits
) % 8);
1493 gpmi_copy_bits(tmp_buf
, dst_bit_off
,
1494 oob
, oob_bit_off
, eccbits
);
1496 dst_bit_off
+= eccbits
;
1497 oob_bit_off
+= eccbits
;
1500 oob_byte_off
= oob_bit_off
/ 8;
1502 if (oob_required
&& oob_byte_off
< mtd
->oobsize
)
1503 memcpy(tmp_buf
+ mtd
->writesize
+ oob_byte_off
,
1504 oob
+ oob_byte_off
, mtd
->oobsize
- oob_byte_off
);
1507 * If required, swap the bad block marker and the first byte of the
1508 * metadata section, so that we don't modify the bad block marker.
1510 * See the layout description for a detailed explanation on why this
1513 if (this->swap_block_mark
) {
1514 u8 swap
= tmp_buf
[0];
1516 tmp_buf
[0] = tmp_buf
[mtd
->writesize
];
1517 tmp_buf
[mtd
->writesize
] = swap
;
1520 chip
->write_buf(mtd
, tmp_buf
, mtd
->writesize
+ mtd
->oobsize
);
1525 static int gpmi_ecc_read_oob_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1528 chip
->cmdfunc(mtd
, NAND_CMD_READ0
, 0, page
);
1530 return gpmi_ecc_read_page_raw(mtd
, chip
, NULL
, 1, page
);
1533 static int gpmi_ecc_write_oob_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1536 chip
->cmdfunc(mtd
, NAND_CMD_SEQIN
, 0, page
);
1538 return gpmi_ecc_write_page_raw(mtd
, chip
, NULL
, 1, page
);
1541 static int gpmi_block_markbad(struct mtd_info
*mtd
, loff_t ofs
)
1543 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1544 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
1546 uint8_t *block_mark
;
1547 int column
, page
, status
, chipnr
;
1549 chipnr
= (int)(ofs
>> chip
->chip_shift
);
1550 chip
->select_chip(mtd
, chipnr
);
1552 column
= !GPMI_IS_MX23(this) ? mtd
->writesize
: 0;
1554 /* Write the block mark. */
1555 block_mark
= this->data_buffer_dma
;
1556 block_mark
[0] = 0; /* bad block marker */
1558 /* Shift to get page */
1559 page
= (int)(ofs
>> chip
->page_shift
);
1561 chip
->cmdfunc(mtd
, NAND_CMD_SEQIN
, column
, page
);
1562 chip
->write_buf(mtd
, block_mark
, 1);
1563 chip
->cmdfunc(mtd
, NAND_CMD_PAGEPROG
, -1, -1);
1565 status
= chip
->waitfunc(mtd
, chip
);
1566 if (status
& NAND_STATUS_FAIL
)
1569 chip
->select_chip(mtd
, -1);
1574 static int nand_boot_set_geometry(struct gpmi_nand_data
*this)
1576 struct boot_rom_geometry
*geometry
= &this->rom_geometry
;
1579 * Set the boot block stride size.
1581 * In principle, we should be reading this from the OTP bits, since
1582 * that's where the ROM is going to get it. In fact, we don't have any
1583 * way to read the OTP bits, so we go with the default and hope for the
1586 geometry
->stride_size_in_pages
= 64;
1589 * Set the search area stride exponent.
1591 * In principle, we should be reading this from the OTP bits, since
1592 * that's where the ROM is going to get it. In fact, we don't have any
1593 * way to read the OTP bits, so we go with the default and hope for the
1596 geometry
->search_area_stride_exponent
= 2;
1600 static const char *fingerprint
= "STMP";
1601 static int mx23_check_transcription_stamp(struct gpmi_nand_data
*this)
1603 struct boot_rom_geometry
*rom_geo
= &this->rom_geometry
;
1604 struct device
*dev
= this->dev
;
1605 struct nand_chip
*chip
= &this->nand
;
1606 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1607 unsigned int search_area_size_in_strides
;
1608 unsigned int stride
;
1610 uint8_t *buffer
= chip
->buffers
->databuf
;
1611 int saved_chip_number
;
1612 int found_an_ncb_fingerprint
= false;
1614 /* Compute the number of strides in a search area. */
1615 search_area_size_in_strides
= 1 << rom_geo
->search_area_stride_exponent
;
1617 saved_chip_number
= this->current_chip
;
1618 chip
->select_chip(mtd
, 0);
1621 * Loop through the first search area, looking for the NCB fingerprint.
1623 dev_dbg(dev
, "Scanning for an NCB fingerprint...\n");
1625 for (stride
= 0; stride
< search_area_size_in_strides
; stride
++) {
1626 /* Compute the page addresses. */
1627 page
= stride
* rom_geo
->stride_size_in_pages
;
1629 dev_dbg(dev
, "Looking for a fingerprint in page 0x%x\n", page
);
1632 * Read the NCB fingerprint. The fingerprint is four bytes long
1633 * and starts in the 12th byte of the page.
1635 chip
->cmdfunc(mtd
, NAND_CMD_READ0
, 12, page
);
1636 chip
->read_buf(mtd
, buffer
, strlen(fingerprint
));
1638 /* Look for the fingerprint. */
1639 if (!memcmp(buffer
, fingerprint
, strlen(fingerprint
))) {
1640 found_an_ncb_fingerprint
= true;
1646 chip
->select_chip(mtd
, saved_chip_number
);
1648 if (found_an_ncb_fingerprint
)
1649 dev_dbg(dev
, "\tFound a fingerprint\n");
1651 dev_dbg(dev
, "\tNo fingerprint found\n");
1652 return found_an_ncb_fingerprint
;
1655 /* Writes a transcription stamp. */
1656 static int mx23_write_transcription_stamp(struct gpmi_nand_data
*this)
1658 struct device
*dev
= this->dev
;
1659 struct boot_rom_geometry
*rom_geo
= &this->rom_geometry
;
1660 struct nand_chip
*chip
= &this->nand
;
1661 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1662 unsigned int block_size_in_pages
;
1663 unsigned int search_area_size_in_strides
;
1664 unsigned int search_area_size_in_pages
;
1665 unsigned int search_area_size_in_blocks
;
1667 unsigned int stride
;
1669 uint8_t *buffer
= chip
->buffers
->databuf
;
1670 int saved_chip_number
;
1673 /* Compute the search area geometry. */
1674 block_size_in_pages
= mtd
->erasesize
/ mtd
->writesize
;
1675 search_area_size_in_strides
= 1 << rom_geo
->search_area_stride_exponent
;
1676 search_area_size_in_pages
= search_area_size_in_strides
*
1677 rom_geo
->stride_size_in_pages
;
1678 search_area_size_in_blocks
=
1679 (search_area_size_in_pages
+ (block_size_in_pages
- 1)) /
1680 block_size_in_pages
;
1682 dev_dbg(dev
, "Search Area Geometry :\n");
1683 dev_dbg(dev
, "\tin Blocks : %u\n", search_area_size_in_blocks
);
1684 dev_dbg(dev
, "\tin Strides: %u\n", search_area_size_in_strides
);
1685 dev_dbg(dev
, "\tin Pages : %u\n", search_area_size_in_pages
);
1687 /* Select chip 0. */
1688 saved_chip_number
= this->current_chip
;
1689 chip
->select_chip(mtd
, 0);
1691 /* Loop over blocks in the first search area, erasing them. */
1692 dev_dbg(dev
, "Erasing the search area...\n");
1694 for (block
= 0; block
< search_area_size_in_blocks
; block
++) {
1695 /* Compute the page address. */
1696 page
= block
* block_size_in_pages
;
1698 /* Erase this block. */
1699 dev_dbg(dev
, "\tErasing block 0x%x\n", block
);
1700 chip
->cmdfunc(mtd
, NAND_CMD_ERASE1
, -1, page
);
1701 chip
->cmdfunc(mtd
, NAND_CMD_ERASE2
, -1, -1);
1703 /* Wait for the erase to finish. */
1704 status
= chip
->waitfunc(mtd
, chip
);
1705 if (status
& NAND_STATUS_FAIL
)
1706 dev_err(dev
, "[%s] Erase failed.\n", __func__
);
1709 /* Write the NCB fingerprint into the page buffer. */
1710 memset(buffer
, ~0, mtd
->writesize
);
1711 memcpy(buffer
+ 12, fingerprint
, strlen(fingerprint
));
1713 /* Loop through the first search area, writing NCB fingerprints. */
1714 dev_dbg(dev
, "Writing NCB fingerprints...\n");
1715 for (stride
= 0; stride
< search_area_size_in_strides
; stride
++) {
1716 /* Compute the page addresses. */
1717 page
= stride
* rom_geo
->stride_size_in_pages
;
1719 /* Write the first page of the current stride. */
1720 dev_dbg(dev
, "Writing an NCB fingerprint in page 0x%x\n", page
);
1721 chip
->cmdfunc(mtd
, NAND_CMD_SEQIN
, 0x00, page
);
1722 chip
->ecc
.write_page_raw(mtd
, chip
, buffer
, 0, page
);
1723 chip
->cmdfunc(mtd
, NAND_CMD_PAGEPROG
, -1, -1);
1725 /* Wait for the write to finish. */
1726 status
= chip
->waitfunc(mtd
, chip
);
1727 if (status
& NAND_STATUS_FAIL
)
1728 dev_err(dev
, "[%s] Write failed.\n", __func__
);
1731 /* Deselect chip 0. */
1732 chip
->select_chip(mtd
, saved_chip_number
);
1736 static int mx23_boot_init(struct gpmi_nand_data
*this)
1738 struct device
*dev
= this->dev
;
1739 struct nand_chip
*chip
= &this->nand
;
1740 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1741 unsigned int block_count
;
1750 * If control arrives here, we can't use block mark swapping, which
1751 * means we're forced to use transcription. First, scan for the
1752 * transcription stamp. If we find it, then we don't have to do
1753 * anything -- the block marks are already transcribed.
1755 if (mx23_check_transcription_stamp(this))
1759 * If control arrives here, we couldn't find a transcription stamp, so
1760 * so we presume the block marks are in the conventional location.
1762 dev_dbg(dev
, "Transcribing bad block marks...\n");
1764 /* Compute the number of blocks in the entire medium. */
1765 block_count
= chip
->chipsize
>> chip
->phys_erase_shift
;
1768 * Loop over all the blocks in the medium, transcribing block marks as
1771 for (block
= 0; block
< block_count
; block
++) {
1773 * Compute the chip, page and byte addresses for this block's
1774 * conventional mark.
1776 chipnr
= block
>> (chip
->chip_shift
- chip
->phys_erase_shift
);
1777 page
= block
<< (chip
->phys_erase_shift
- chip
->page_shift
);
1778 byte
= block
<< chip
->phys_erase_shift
;
1780 /* Send the command to read the conventional block mark. */
1781 chip
->select_chip(mtd
, chipnr
);
1782 chip
->cmdfunc(mtd
, NAND_CMD_READ0
, mtd
->writesize
, page
);
1783 block_mark
= chip
->read_byte(mtd
);
1784 chip
->select_chip(mtd
, -1);
1787 * Check if the block is marked bad. If so, we need to mark it
1788 * again, but this time the result will be a mark in the
1789 * location where we transcribe block marks.
1791 if (block_mark
!= 0xff) {
1792 dev_dbg(dev
, "Transcribing mark in block %u\n", block
);
1793 ret
= chip
->block_markbad(mtd
, byte
);
1796 "Failed to mark block bad with ret %d\n",
1801 /* Write the stamp that indicates we've transcribed the block marks. */
1802 mx23_write_transcription_stamp(this);
1806 static int nand_boot_init(struct gpmi_nand_data
*this)
1808 nand_boot_set_geometry(this);
1810 /* This is ROM arch-specific initilization before the BBT scanning. */
1811 if (GPMI_IS_MX23(this))
1812 return mx23_boot_init(this);
1816 static int gpmi_set_geometry(struct gpmi_nand_data
*this)
1820 /* Free the temporary DMA memory for reading ID. */
1821 gpmi_free_dma_buffer(this);
1823 /* Set up the NFC geometry which is used by BCH. */
1824 ret
= bch_set_geometry(this);
1826 dev_err(this->dev
, "Error setting BCH geometry : %d\n", ret
);
1830 /* Alloc the new DMA buffers according to the pagesize and oobsize */
1831 return gpmi_alloc_dma_buffer(this);
1834 static void gpmi_nand_exit(struct gpmi_nand_data
*this)
1836 nand_release(nand_to_mtd(&this->nand
));
1837 gpmi_free_dma_buffer(this);
1840 static int gpmi_init_last(struct gpmi_nand_data
*this)
1842 struct nand_chip
*chip
= &this->nand
;
1843 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
1844 struct bch_geometry
*bch_geo
= &this->bch_geometry
;
1847 /* Set up the medium geometry */
1848 ret
= gpmi_set_geometry(this);
1852 /* Init the nand_ecc_ctrl{} */
1853 ecc
->read_page
= gpmi_ecc_read_page
;
1854 ecc
->write_page
= gpmi_ecc_write_page
;
1855 ecc
->read_oob
= gpmi_ecc_read_oob
;
1856 ecc
->write_oob
= gpmi_ecc_write_oob
;
1857 ecc
->read_page_raw
= gpmi_ecc_read_page_raw
;
1858 ecc
->write_page_raw
= gpmi_ecc_write_page_raw
;
1859 ecc
->read_oob_raw
= gpmi_ecc_read_oob_raw
;
1860 ecc
->write_oob_raw
= gpmi_ecc_write_oob_raw
;
1861 ecc
->mode
= NAND_ECC_HW
;
1862 ecc
->size
= bch_geo
->ecc_chunk_size
;
1863 ecc
->strength
= bch_geo
->ecc_strength
;
1864 ecc
->layout
= &gpmi_hw_ecclayout
;
1867 * We only enable the subpage read when:
1868 * (1) the chip is imx6, and
1869 * (2) the size of the ECC parity is byte aligned.
1871 if (GPMI_IS_MX6(this) &&
1872 ((bch_geo
->gf_len
* bch_geo
->ecc_strength
) % 8) == 0) {
1873 ecc
->read_subpage
= gpmi_ecc_read_subpage
;
1874 chip
->options
|= NAND_SUBPAGE_READ
;
1878 * Can we enable the extra features? such as EDO or Sync mode.
1880 * We do not check the return value now. That's means if we fail in
1881 * enable the extra features, we still can run in the normal way.
1883 gpmi_extra_init(this);
1888 static int gpmi_nand_init(struct gpmi_nand_data
*this)
1890 struct nand_chip
*chip
= &this->nand
;
1891 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1894 /* init current chip */
1895 this->current_chip
= -1;
1897 /* init the MTD data structures */
1898 mtd
->name
= "gpmi-nand";
1899 mtd
->dev
.parent
= this->dev
;
1901 /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
1902 nand_set_controller_data(chip
, this);
1903 nand_set_flash_node(chip
, this->pdev
->dev
.of_node
);
1904 chip
->select_chip
= gpmi_select_chip
;
1905 chip
->cmd_ctrl
= gpmi_cmd_ctrl
;
1906 chip
->dev_ready
= gpmi_dev_ready
;
1907 chip
->read_byte
= gpmi_read_byte
;
1908 chip
->read_buf
= gpmi_read_buf
;
1909 chip
->write_buf
= gpmi_write_buf
;
1910 chip
->badblock_pattern
= &gpmi_bbt_descr
;
1911 chip
->block_markbad
= gpmi_block_markbad
;
1912 chip
->options
|= NAND_NO_SUBPAGE_WRITE
;
1914 /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
1915 this->swap_block_mark
= !GPMI_IS_MX23(this);
1917 if (of_get_nand_on_flash_bbt(this->dev
->of_node
)) {
1918 chip
->bbt_options
|= NAND_BBT_USE_FLASH
| NAND_BBT_NO_OOB
;
1920 if (of_property_read_bool(this->dev
->of_node
,
1921 "fsl,no-blockmark-swap"))
1922 this->swap_block_mark
= false;
1924 dev_dbg(this->dev
, "Blockmark swapping %sabled\n",
1925 this->swap_block_mark
? "en" : "dis");
1928 * Allocate a temporary DMA buffer for reading ID in the
1929 * nand_scan_ident().
1931 this->bch_geometry
.payload_size
= 1024;
1932 this->bch_geometry
.auxiliary_size
= 128;
1933 ret
= gpmi_alloc_dma_buffer(this);
1937 ret
= nand_scan_ident(mtd
, GPMI_IS_MX6(this) ? 2 : 1, NULL
);
1941 ret
= gpmi_init_last(this);
1945 chip
->options
|= NAND_SKIP_BBTSCAN
;
1946 ret
= nand_scan_tail(mtd
);
1950 ret
= nand_boot_init(this);
1953 ret
= chip
->scan_bbt(mtd
);
1957 ret
= mtd_device_register(mtd
, NULL
, 0);
1963 gpmi_nand_exit(this);
1967 static const struct of_device_id gpmi_nand_id_table
[] = {
1969 .compatible
= "fsl,imx23-gpmi-nand",
1970 .data
= &gpmi_devdata_imx23
,
1972 .compatible
= "fsl,imx28-gpmi-nand",
1973 .data
= &gpmi_devdata_imx28
,
1975 .compatible
= "fsl,imx6q-gpmi-nand",
1976 .data
= &gpmi_devdata_imx6q
,
1978 .compatible
= "fsl,imx6sx-gpmi-nand",
1979 .data
= &gpmi_devdata_imx6sx
,
1982 MODULE_DEVICE_TABLE(of
, gpmi_nand_id_table
);
1984 static int gpmi_nand_probe(struct platform_device
*pdev
)
1986 struct gpmi_nand_data
*this;
1987 const struct of_device_id
*of_id
;
1990 this = devm_kzalloc(&pdev
->dev
, sizeof(*this), GFP_KERNEL
);
1994 of_id
= of_match_device(gpmi_nand_id_table
, &pdev
->dev
);
1996 this->devdata
= of_id
->data
;
1998 dev_err(&pdev
->dev
, "Failed to find the right device id.\n");
2002 platform_set_drvdata(pdev
, this);
2004 this->dev
= &pdev
->dev
;
2006 ret
= acquire_resources(this);
2008 goto exit_acquire_resources
;
2010 ret
= init_hardware(this);
2014 ret
= gpmi_nand_init(this);
2018 dev_info(this->dev
, "driver registered.\n");
2023 release_resources(this);
2024 exit_acquire_resources
:
2029 static int gpmi_nand_remove(struct platform_device
*pdev
)
2031 struct gpmi_nand_data
*this = platform_get_drvdata(pdev
);
2033 gpmi_nand_exit(this);
2034 release_resources(this);
2038 #ifdef CONFIG_PM_SLEEP
2039 static int gpmi_pm_suspend(struct device
*dev
)
2041 struct gpmi_nand_data
*this = dev_get_drvdata(dev
);
2043 release_dma_channels(this);
2047 static int gpmi_pm_resume(struct device
*dev
)
2049 struct gpmi_nand_data
*this = dev_get_drvdata(dev
);
2052 ret
= acquire_dma_channels(this);
2056 /* re-init the GPMI registers */
2057 this->flags
&= ~GPMI_TIMING_INIT_OK
;
2058 ret
= gpmi_init(this);
2060 dev_err(this->dev
, "Error setting GPMI : %d\n", ret
);
2064 /* re-init the BCH registers */
2065 ret
= bch_set_geometry(this);
2067 dev_err(this->dev
, "Error setting BCH : %d\n", ret
);
2071 /* re-init others */
2072 gpmi_extra_init(this);
2076 #endif /* CONFIG_PM_SLEEP */
2078 static const struct dev_pm_ops gpmi_pm_ops
= {
2079 SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend
, gpmi_pm_resume
)
2082 static struct platform_driver gpmi_nand_driver
= {
2084 .name
= "gpmi-nand",
2086 .of_match_table
= gpmi_nand_id_table
,
2088 .probe
= gpmi_nand_probe
,
2089 .remove
= gpmi_nand_remove
,
2091 module_platform_driver(gpmi_nand_driver
);
2093 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
2094 MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
2095 MODULE_LICENSE("GPL");