1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright © 2009 - Maxim Levitsky
4 * SmartMedia/xD translation layer
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/random.h>
10 #include <linux/hdreg.h>
11 #include <linux/kthread.h>
12 #include <linux/freezer.h>
13 #include <linux/sysfs.h>
14 #include <linux/bitops.h>
15 #include <linux/slab.h>
16 #include <linux/mtd/nand-ecc-sw-hamming.h>
17 #include "nand/raw/sm_common.h"
22 static struct workqueue_struct
*cache_flush_workqueue
;
24 static int cache_timeout
= 1000;
25 module_param(cache_timeout
, int, S_IRUGO
);
26 MODULE_PARM_DESC(cache_timeout
,
27 "Timeout (in ms) for cache flush (1000 ms default");
30 module_param(debug
, int, S_IRUGO
| S_IWUSR
);
31 MODULE_PARM_DESC(debug
, "Debug level (0-2)");
34 /* ------------------- sysfs attributes ---------------------------------- */
35 struct sm_sysfs_attribute
{
36 struct device_attribute dev_attr
;
41 static ssize_t
sm_attr_show(struct device
*dev
, struct device_attribute
*attr
,
44 struct sm_sysfs_attribute
*sm_attr
=
45 container_of(attr
, struct sm_sysfs_attribute
, dev_attr
);
47 strncpy(buf
, sm_attr
->data
, sm_attr
->len
);
52 #define NUM_ATTRIBUTES 1
53 #define SM_CIS_VENDOR_OFFSET 0x59
54 static struct attribute_group
*sm_create_sysfs_attributes(struct sm_ftl
*ftl
)
56 struct attribute_group
*attr_group
;
57 struct attribute
**attributes
;
58 struct sm_sysfs_attribute
*vendor_attribute
;
61 vendor
= kstrndup(ftl
->cis_buffer
+ SM_CIS_VENDOR_OFFSET
,
62 SM_SMALL_PAGE
- SM_CIS_VENDOR_OFFSET
, GFP_KERNEL
);
66 /* Initialize sysfs attributes */
68 kzalloc(sizeof(struct sm_sysfs_attribute
), GFP_KERNEL
);
69 if (!vendor_attribute
)
72 sysfs_attr_init(&vendor_attribute
->dev_attr
.attr
);
74 vendor_attribute
->data
= vendor
;
75 vendor_attribute
->len
= strlen(vendor
);
76 vendor_attribute
->dev_attr
.attr
.name
= "vendor";
77 vendor_attribute
->dev_attr
.attr
.mode
= S_IRUGO
;
78 vendor_attribute
->dev_attr
.show
= sm_attr_show
;
81 /* Create array of pointers to the attributes */
82 attributes
= kcalloc(NUM_ATTRIBUTES
+ 1, sizeof(struct attribute
*),
86 attributes
[0] = &vendor_attribute
->dev_attr
.attr
;
88 /* Finally create the attribute group */
89 attr_group
= kzalloc(sizeof(struct attribute_group
), GFP_KERNEL
);
92 attr_group
->attrs
= attributes
;
97 kfree(vendor_attribute
);
104 static void sm_delete_sysfs_attributes(struct sm_ftl
*ftl
)
106 struct attribute
**attributes
= ftl
->disk_attributes
->attrs
;
109 for (i
= 0; attributes
[i
] ; i
++) {
111 struct device_attribute
*dev_attr
= container_of(attributes
[i
],
112 struct device_attribute
, attr
);
114 struct sm_sysfs_attribute
*sm_attr
=
115 container_of(dev_attr
,
116 struct sm_sysfs_attribute
, dev_attr
);
118 kfree(sm_attr
->data
);
122 kfree(ftl
->disk_attributes
->attrs
);
123 kfree(ftl
->disk_attributes
);
127 /* ----------------------- oob helpers -------------------------------------- */
129 static int sm_get_lba(uint8_t *lba
)
131 /* check fixed bits */
132 if ((lba
[0] & 0xF8) != 0x10)
135 /* check parity - endianness doesn't matter */
136 if (hweight16(*(uint16_t *)lba
) & 1)
139 return (lba
[1] >> 1) | ((lba
[0] & 0x07) << 7);
144 * Read LBA associated with block
145 * returns -1, if block is erased
146 * returns -2 if error happens
148 static int sm_read_lba(struct sm_oob
*oob
)
150 static const uint32_t erased_pattern
[4] = {
151 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
156 /* First test for erased block */
157 if (!memcmp(oob
, erased_pattern
, SM_OOB_SIZE
))
160 /* Now check is both copies of the LBA differ too much */
161 lba_test
= *(uint16_t *)oob
->lba_copy1
^ *(uint16_t*)oob
->lba_copy2
;
162 if (lba_test
&& !is_power_of_2(lba_test
))
166 lba
= sm_get_lba(oob
->lba_copy1
);
169 lba
= sm_get_lba(oob
->lba_copy2
);
174 static void sm_write_lba(struct sm_oob
*oob
, uint16_t lba
)
178 WARN_ON(lba
>= 1000);
180 tmp
[0] = 0x10 | ((lba
>> 7) & 0x07);
181 tmp
[1] = (lba
<< 1) & 0xFF;
183 if (hweight16(*(uint16_t *)tmp
) & 0x01)
186 oob
->lba_copy1
[0] = oob
->lba_copy2
[0] = tmp
[0];
187 oob
->lba_copy1
[1] = oob
->lba_copy2
[1] = tmp
[1];
191 /* Make offset from parts */
192 static loff_t
sm_mkoffset(struct sm_ftl
*ftl
, int zone
, int block
, int boffset
)
194 WARN_ON(boffset
& (SM_SECTOR_SIZE
- 1));
195 WARN_ON(zone
< 0 || zone
>= ftl
->zone_count
);
196 WARN_ON(block
>= ftl
->zone_size
);
197 WARN_ON(boffset
>= ftl
->block_size
);
202 return (zone
* SM_MAX_ZONE_SIZE
+ block
) * ftl
->block_size
+ boffset
;
205 /* Breaks offset into parts */
206 static void sm_break_offset(struct sm_ftl
*ftl
, loff_t loffset
,
207 int *zone
, int *block
, int *boffset
)
209 u64 offset
= loffset
;
210 *boffset
= do_div(offset
, ftl
->block_size
);
211 *block
= do_div(offset
, ftl
->max_lba
);
212 *zone
= offset
>= ftl
->zone_count
? -1 : offset
;
215 /* ---------------------- low level IO ------------------------------------- */
217 static int sm_correct_sector(uint8_t *buffer
, struct sm_oob
*oob
)
219 bool sm_order
= IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC
);
222 ecc_sw_hamming_calculate(buffer
, SM_SMALL_PAGE
, ecc
, sm_order
);
223 if (ecc_sw_hamming_correct(buffer
, ecc
, oob
->ecc1
, SM_SMALL_PAGE
,
227 buffer
+= SM_SMALL_PAGE
;
229 ecc_sw_hamming_calculate(buffer
, SM_SMALL_PAGE
, ecc
, sm_order
);
230 if (ecc_sw_hamming_correct(buffer
, ecc
, oob
->ecc2
, SM_SMALL_PAGE
,
236 /* Reads a sector + oob*/
237 static int sm_read_sector(struct sm_ftl
*ftl
,
238 int zone
, int block
, int boffset
,
239 uint8_t *buffer
, struct sm_oob
*oob
)
241 struct mtd_info
*mtd
= ftl
->trans
->mtd
;
242 struct mtd_oob_ops ops
= { };
243 struct sm_oob tmp_oob
;
247 /* FTL can contain -1 entries that are by default filled with bits */
250 memset(buffer
, 0xFF, SM_SECTOR_SIZE
);
254 /* User might not need the oob, but we do for data verification */
258 ops
.mode
= ftl
->smallpagenand
? MTD_OPS_RAW
: MTD_OPS_PLACE_OOB
;
260 ops
.ooblen
= SM_OOB_SIZE
;
261 ops
.oobbuf
= (void *)oob
;
262 ops
.len
= SM_SECTOR_SIZE
;
267 /* Avoid infinite recursion on CIS reads, sm_recheck_media
270 if (zone
== 0 && block
== ftl
->cis_block
&& boffset
==
274 /* Test if media is stable */
275 if (try == 3 || sm_recheck_media(ftl
))
279 /* Unfortunately, oob read will _always_ succeed,
280 * despite card removal.....
282 ret
= mtd_read_oob(mtd
, sm_mkoffset(ftl
, zone
, block
, boffset
), &ops
);
284 /* Test for unknown errors */
285 if (ret
!= 0 && !mtd_is_bitflip_or_eccerr(ret
)) {
286 dbg("read of block %d at zone %d, failed due to error (%d)",
291 /* Do a basic test on the oob, to guard against returned garbage */
292 if (oob
->reserved
!= 0xFFFFFFFF && !is_power_of_2(~oob
->reserved
))
295 /* This should never happen, unless there is a bug in the mtd driver */
296 WARN_ON(ops
.oobretlen
!= SM_OOB_SIZE
);
297 WARN_ON(buffer
&& ops
.retlen
!= SM_SECTOR_SIZE
);
302 /* Test if sector marked as bad */
303 if (!sm_sector_valid(oob
)) {
304 dbg("read of block %d at zone %d, failed because it is marked"
305 " as bad" , block
, zone
);
310 if (mtd_is_eccerr(ret
) ||
311 (ftl
->smallpagenand
&& sm_correct_sector(buffer
, oob
))) {
313 dbg("read of block %d at zone %d, failed due to ECC error",
321 /* Writes a sector to media */
322 static int sm_write_sector(struct sm_ftl
*ftl
,
323 int zone
, int block
, int boffset
,
324 uint8_t *buffer
, struct sm_oob
*oob
)
326 struct mtd_oob_ops ops
= { };
327 struct mtd_info
*mtd
= ftl
->trans
->mtd
;
330 BUG_ON(ftl
->readonly
);
332 if (zone
== 0 && (block
== ftl
->cis_block
|| block
== 0)) {
333 dbg("attempted to write the CIS!");
340 ops
.mode
= ftl
->smallpagenand
? MTD_OPS_RAW
: MTD_OPS_PLACE_OOB
;
341 ops
.len
= SM_SECTOR_SIZE
;
344 ops
.ooblen
= SM_OOB_SIZE
;
345 ops
.oobbuf
= (void *)oob
;
347 ret
= mtd_write_oob(mtd
, sm_mkoffset(ftl
, zone
, block
, boffset
), &ops
);
349 /* Now we assume that hardware will catch write bitflip errors */
352 dbg("write to block %d at zone %d, failed with error %d",
355 sm_recheck_media(ftl
);
359 /* This should never happen, unless there is a bug in the driver */
360 WARN_ON(ops
.oobretlen
!= SM_OOB_SIZE
);
361 WARN_ON(buffer
&& ops
.retlen
!= SM_SECTOR_SIZE
);
366 /* ------------------------ block IO ------------------------------------- */
368 /* Write a block using data and lba, and invalid sector bitmap */
369 static int sm_write_block(struct sm_ftl
*ftl
, uint8_t *buf
,
370 int zone
, int block
, int lba
,
371 unsigned long invalid_bitmap
)
373 bool sm_order
= IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC
);
378 /* Initialize the oob with requested values */
379 memset(&oob
, 0xFF, SM_OOB_SIZE
);
380 sm_write_lba(&oob
, lba
);
385 for (boffset
= 0; boffset
< ftl
->block_size
;
386 boffset
+= SM_SECTOR_SIZE
) {
388 oob
.data_status
= 0xFF;
390 if (test_bit(boffset
/ SM_SECTOR_SIZE
, &invalid_bitmap
)) {
392 sm_printk("sector %d of block at LBA %d of zone %d"
393 " couldn't be read, marking it as invalid",
394 boffset
/ SM_SECTOR_SIZE
, lba
, zone
);
399 if (ftl
->smallpagenand
) {
400 ecc_sw_hamming_calculate(buf
+ boffset
,
401 SM_SMALL_PAGE
, oob
.ecc1
,
404 ecc_sw_hamming_calculate(buf
+ boffset
+ SM_SMALL_PAGE
,
405 SM_SMALL_PAGE
, oob
.ecc2
,
408 if (!sm_write_sector(ftl
, zone
, block
, boffset
,
409 buf
+ boffset
, &oob
))
414 /* If write fails. try to erase the block */
415 /* This is safe, because we never write in blocks
416 * that contain valuable data.
417 * This is intended to repair block that are marked
418 * as erased, but that isn't fully erased
421 if (sm_erase_block(ftl
, zone
, block
, 0))
427 sm_mark_block_bad(ftl
, zone
, block
);
435 /* Mark whole block at offset 'offs' as bad. */
436 static void sm_mark_block_bad(struct sm_ftl
*ftl
, int zone
, int block
)
441 memset(&oob
, 0xFF, SM_OOB_SIZE
);
442 oob
.block_status
= 0xF0;
447 if (sm_recheck_media(ftl
))
450 sm_printk("marking block %d of zone %d as bad", block
, zone
);
452 /* We aren't checking the return value, because we don't care */
453 /* This also fails on fake xD cards, but I guess these won't expose
454 * any bad blocks till fail completely
456 for (boffset
= 0; boffset
< ftl
->block_size
; boffset
+= SM_SECTOR_SIZE
)
457 sm_write_sector(ftl
, zone
, block
, boffset
, NULL
, &oob
);
461 * Erase a block within a zone
462 * If erase succeeds, it updates free block fifo, otherwise marks block as bad
464 static int sm_erase_block(struct sm_ftl
*ftl
, int zone_num
, uint16_t block
,
467 struct ftl_zone
*zone
= &ftl
->zones
[zone_num
];
468 struct mtd_info
*mtd
= ftl
->trans
->mtd
;
469 struct erase_info erase
;
471 erase
.addr
= sm_mkoffset(ftl
, zone_num
, block
, 0);
472 erase
.len
= ftl
->block_size
;
477 BUG_ON(ftl
->readonly
);
479 if (zone_num
== 0 && (block
== ftl
->cis_block
|| block
== 0)) {
480 sm_printk("attempted to erase the CIS!");
484 if (mtd_erase(mtd
, &erase
)) {
485 sm_printk("erase of block %d in zone %d failed",
491 kfifo_in(&zone
->free_sectors
,
492 (const unsigned char *)&block
, sizeof(block
));
496 sm_mark_block_bad(ftl
, zone_num
, block
);
500 /* Thoroughly test that block is valid. */
501 static int sm_check_block(struct sm_ftl
*ftl
, int zone
, int block
)
505 int lbas
[] = { -3, 0, 0, 0 };
510 /* First just check that block doesn't look fishy */
511 /* Only blocks that are valid or are sliced in two parts, are
514 for (boffset
= 0; boffset
< ftl
->block_size
;
515 boffset
+= SM_SECTOR_SIZE
) {
517 /* This shouldn't happen anyway */
518 if (sm_read_sector(ftl
, zone
, block
, boffset
, NULL
, &oob
))
521 test_lba
= sm_read_lba(&oob
);
523 if (lbas
[i
] != test_lba
)
524 lbas
[++i
] = test_lba
;
526 /* If we found three different LBAs, something is fishy */
531 /* If the block is sliced (partially erased usually) erase it */
533 sm_erase_block(ftl
, zone
, block
, 1);
540 /* ----------------- media scanning --------------------------------- */
541 static const struct chs_entry chs_table
[] = {
549 { 128, 500, 16, 32 },
550 { 256, 1000, 16, 32 },
551 { 512, 1015, 32, 63 },
552 { 1024, 985, 33, 63 },
553 { 2048, 985, 33, 63 },
558 static const uint8_t cis_signature
[] = {
559 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
561 /* Find out media parameters.
562 * This ideally has to be based on nand id, but for now device size is enough
564 static int sm_get_media_info(struct sm_ftl
*ftl
, struct mtd_info
*mtd
)
567 int size_in_megs
= mtd
->size
/ (1024 * 1024);
569 ftl
->readonly
= mtd
->type
== MTD_ROM
;
571 /* Manual settings for very old devices */
573 ftl
->smallpagenand
= 0;
575 switch (size_in_megs
) {
577 /* 1 MiB flash/rom SmartMedia card (256 byte pages)*/
578 ftl
->zone_size
= 256;
580 ftl
->block_size
= 8 * SM_SECTOR_SIZE
;
581 ftl
->smallpagenand
= 1;
585 /* 2 MiB flash SmartMedia (256 byte pages)*/
586 if (mtd
->writesize
== SM_SMALL_PAGE
) {
587 ftl
->zone_size
= 512;
589 ftl
->block_size
= 8 * SM_SECTOR_SIZE
;
590 ftl
->smallpagenand
= 1;
591 /* 2 MiB rom SmartMedia */
597 ftl
->zone_size
= 256;
599 ftl
->block_size
= 16 * SM_SECTOR_SIZE
;
603 /* 4 MiB flash/rom SmartMedia device */
604 ftl
->zone_size
= 512;
606 ftl
->block_size
= 16 * SM_SECTOR_SIZE
;
609 /* 8 MiB flash/rom SmartMedia device */
610 ftl
->zone_size
= 1024;
612 ftl
->block_size
= 16 * SM_SECTOR_SIZE
;
615 /* Minimum xD size is 16MiB. Also, all xD cards have standard zone
616 * sizes. SmartMedia cards exist up to 128 MiB and have same layout
618 if (size_in_megs
>= 16) {
619 ftl
->zone_count
= size_in_megs
/ 16;
620 ftl
->zone_size
= 1024;
622 ftl
->block_size
= 32 * SM_SECTOR_SIZE
;
625 /* Test for proper write,erase and oob sizes */
626 if (mtd
->erasesize
> ftl
->block_size
)
629 if (mtd
->writesize
> SM_SECTOR_SIZE
)
632 if (ftl
->smallpagenand
&& mtd
->oobsize
< SM_SMALL_OOB_SIZE
)
635 if (!ftl
->smallpagenand
&& mtd
->oobsize
< SM_OOB_SIZE
)
639 if (!mtd_has_oob(mtd
))
642 /* Find geometry information */
643 for (i
= 0 ; i
< ARRAY_SIZE(chs_table
) ; i
++) {
644 if (chs_table
[i
].size
== size_in_megs
) {
645 ftl
->cylinders
= chs_table
[i
].cyl
;
646 ftl
->heads
= chs_table
[i
].head
;
647 ftl
->sectors
= chs_table
[i
].sec
;
652 sm_printk("media has unknown size : %dMiB", size_in_megs
);
653 ftl
->cylinders
= 985;
659 /* Validate the CIS */
660 static int sm_read_cis(struct sm_ftl
*ftl
)
664 if (sm_read_sector(ftl
,
665 0, ftl
->cis_block
, ftl
->cis_boffset
, ftl
->cis_buffer
, &oob
))
668 if (!sm_sector_valid(&oob
) || !sm_block_valid(&oob
))
671 if (!memcmp(ftl
->cis_buffer
+ ftl
->cis_page_offset
,
672 cis_signature
, sizeof(cis_signature
))) {
679 /* Scan the media for the CIS */
680 static int sm_find_cis(struct sm_ftl
*ftl
)
687 /* Search for first valid block */
688 for (block
= 0 ; block
< ftl
->zone_size
- ftl
->max_lba
; block
++) {
690 if (sm_read_sector(ftl
, 0, block
, 0, NULL
, &oob
))
693 if (!sm_block_valid(&oob
))
702 /* Search for first valid sector in this block */
703 for (boffset
= 0 ; boffset
< ftl
->block_size
;
704 boffset
+= SM_SECTOR_SIZE
) {
706 if (sm_read_sector(ftl
, 0, block
, boffset
, NULL
, &oob
))
709 if (!sm_sector_valid(&oob
))
714 if (boffset
== ftl
->block_size
)
717 ftl
->cis_block
= block
;
718 ftl
->cis_boffset
= boffset
;
719 ftl
->cis_page_offset
= 0;
721 cis_found
= !sm_read_cis(ftl
);
724 ftl
->cis_page_offset
= SM_SMALL_PAGE
;
725 cis_found
= !sm_read_cis(ftl
);
729 dbg("CIS block found at offset %x",
730 block
* ftl
->block_size
+
731 boffset
+ ftl
->cis_page_offset
);
737 /* Basic test to determine if underlying mtd device if functional */
738 static int sm_recheck_media(struct sm_ftl
*ftl
)
740 if (sm_read_cis(ftl
)) {
742 if (!ftl
->unstable
) {
743 sm_printk("media unstable, not allowing writes");
751 /* Initialize a FTL zone */
752 static int sm_init_zone(struct sm_ftl
*ftl
, int zone_num
)
754 struct ftl_zone
*zone
= &ftl
->zones
[zone_num
];
761 dbg("initializing zone %d", zone_num
);
763 /* Allocate memory for FTL table */
764 zone
->lba_to_phys_table
= kmalloc_array(ftl
->max_lba
, 2, GFP_KERNEL
);
766 if (!zone
->lba_to_phys_table
)
768 memset(zone
->lba_to_phys_table
, -1, ftl
->max_lba
* 2);
771 /* Allocate memory for free sectors FIFO */
772 if (kfifo_alloc(&zone
->free_sectors
, ftl
->zone_size
* 2, GFP_KERNEL
)) {
773 kfree(zone
->lba_to_phys_table
);
777 /* Now scan the zone */
778 for (block
= 0 ; block
< ftl
->zone_size
; block
++) {
780 /* Skip blocks till the CIS (including) */
781 if (zone_num
== 0 && block
<= ftl
->cis_block
)
784 /* Read the oob of first sector */
785 if (sm_read_sector(ftl
, zone_num
, block
, 0, NULL
, &oob
)) {
786 kfifo_free(&zone
->free_sectors
);
787 kfree(zone
->lba_to_phys_table
);
791 /* Test to see if block is erased. It is enough to test
792 * first sector, because erase happens in one shot
794 if (sm_block_erased(&oob
)) {
795 kfifo_in(&zone
->free_sectors
,
796 (unsigned char *)&block
, 2);
800 /* If block is marked as bad, skip it */
801 /* This assumes we can trust first sector*/
802 /* However the way the block valid status is defined, ensures
803 * very low probability of failure here
805 if (!sm_block_valid(&oob
)) {
806 dbg("PH %04d <-> <marked bad>", block
);
811 lba
= sm_read_lba(&oob
);
813 /* Invalid LBA means that block is damaged. */
814 /* We can try to erase it, or mark it as bad, but
815 * lets leave that to recovery application
817 if (lba
== -2 || lba
>= ftl
->max_lba
) {
818 dbg("PH %04d <-> LBA %04d(bad)", block
, lba
);
823 /* If there is no collision,
824 * just put the sector in the FTL table
826 if (zone
->lba_to_phys_table
[lba
] < 0) {
827 dbg_verbose("PH %04d <-> LBA %04d", block
, lba
);
828 zone
->lba_to_phys_table
[lba
] = block
;
832 sm_printk("collision"
833 " of LBA %d between blocks %d and %d in zone %d",
834 lba
, zone
->lba_to_phys_table
[lba
], block
, zone_num
);
836 /* Test that this block is valid*/
837 if (sm_check_block(ftl
, zone_num
, block
))
840 /* Test now the old block */
841 if (sm_check_block(ftl
, zone_num
,
842 zone
->lba_to_phys_table
[lba
])) {
843 zone
->lba_to_phys_table
[lba
] = block
;
847 /* If both blocks are valid and share same LBA, it means that
848 * they hold different versions of same data. It not
849 * known which is more recent, thus just erase one of them
851 sm_printk("both blocks are valid, erasing the later");
852 sm_erase_block(ftl
, zone_num
, block
, 1);
855 dbg("zone initialized");
856 zone
->initialized
= 1;
858 /* No free sectors, means that the zone is heavily damaged, write won't
859 * work, but it can still can be (partially) read
861 if (!kfifo_len(&zone
->free_sectors
)) {
862 sm_printk("no free blocks in zone %d", zone_num
);
866 /* Randomize first block we write to */
867 get_random_bytes(&i
, 2);
868 i
%= (kfifo_len(&zone
->free_sectors
) / 2);
871 len
= kfifo_out(&zone
->free_sectors
,
872 (unsigned char *)&block
, 2);
874 kfifo_in(&zone
->free_sectors
, (const unsigned char *)&block
, 2);
879 /* Get and automatically initialize an FTL mapping for one zone */
880 static struct ftl_zone
*sm_get_zone(struct sm_ftl
*ftl
, int zone_num
)
882 struct ftl_zone
*zone
;
885 BUG_ON(zone_num
>= ftl
->zone_count
);
886 zone
= &ftl
->zones
[zone_num
];
888 if (!zone
->initialized
) {
889 error
= sm_init_zone(ftl
, zone_num
);
892 return ERR_PTR(error
);
898 /* ----------------- cache handling ------------------------------------------*/
900 /* Initialize the one block cache */
901 static void sm_cache_init(struct sm_ftl
*ftl
)
903 ftl
->cache_data_invalid_bitmap
= 0xFFFFFFFF;
904 ftl
->cache_clean
= 1;
905 ftl
->cache_zone
= -1;
906 ftl
->cache_block
= -1;
907 /*memset(ftl->cache_data, 0xAA, ftl->block_size);*/
910 /* Put sector in one block cache */
911 static void sm_cache_put(struct sm_ftl
*ftl
, char *buffer
, int boffset
)
913 memcpy(ftl
->cache_data
+ boffset
, buffer
, SM_SECTOR_SIZE
);
914 clear_bit(boffset
/ SM_SECTOR_SIZE
, &ftl
->cache_data_invalid_bitmap
);
915 ftl
->cache_clean
= 0;
918 /* Read a sector from the cache */
919 static int sm_cache_get(struct sm_ftl
*ftl
, char *buffer
, int boffset
)
921 if (test_bit(boffset
/ SM_SECTOR_SIZE
,
922 &ftl
->cache_data_invalid_bitmap
))
925 memcpy(buffer
, ftl
->cache_data
+ boffset
, SM_SECTOR_SIZE
);
929 /* Write the cache to hardware */
930 static int sm_cache_flush(struct sm_ftl
*ftl
)
932 struct ftl_zone
*zone
;
935 uint16_t write_sector
;
936 int zone_num
= ftl
->cache_zone
;
939 if (ftl
->cache_clean
)
945 BUG_ON(zone_num
< 0);
946 zone
= &ftl
->zones
[zone_num
];
947 block_num
= zone
->lba_to_phys_table
[ftl
->cache_block
];
950 /* Try to read all unread areas of the cache block*/
951 for_each_set_bit(sector_num
, &ftl
->cache_data_invalid_bitmap
,
952 ftl
->block_size
/ SM_SECTOR_SIZE
) {
954 if (!sm_read_sector(ftl
,
955 zone_num
, block_num
, sector_num
* SM_SECTOR_SIZE
,
956 ftl
->cache_data
+ sector_num
* SM_SECTOR_SIZE
, NULL
))
957 clear_bit(sector_num
,
958 &ftl
->cache_data_invalid_bitmap
);
965 /* If there are no spare blocks, */
966 /* we could still continue by erasing/writing the current block,
967 * but for such worn out media it doesn't worth the trouble,
970 if (kfifo_out(&zone
->free_sectors
,
971 (unsigned char *)&write_sector
, 2) != 2) {
972 dbg("no free sectors for write!");
977 if (sm_write_block(ftl
, ftl
->cache_data
, zone_num
, write_sector
,
978 ftl
->cache_block
, ftl
->cache_data_invalid_bitmap
))
981 /* Update the FTL table */
982 zone
->lba_to_phys_table
[ftl
->cache_block
] = write_sector
;
984 /* Write successful, so erase and free the old block */
986 sm_erase_block(ftl
, zone_num
, block_num
, 1);
993 /* flush timer, runs a second after last write */
994 static void sm_cache_flush_timer(struct timer_list
*t
)
996 struct sm_ftl
*ftl
= from_timer(ftl
, t
, timer
);
997 queue_work(cache_flush_workqueue
, &ftl
->flush_work
);
1000 /* cache flush work, kicked by timer */
1001 static void sm_cache_flush_work(struct work_struct
*work
)
1003 struct sm_ftl
*ftl
= container_of(work
, struct sm_ftl
, flush_work
);
1004 mutex_lock(&ftl
->mutex
);
1005 sm_cache_flush(ftl
);
1006 mutex_unlock(&ftl
->mutex
);
1010 /* ---------------- outside interface -------------------------------------- */
1012 /* outside interface: read a sector */
1013 static int sm_read(struct mtd_blktrans_dev
*dev
,
1014 unsigned long sect_no
, char *buf
)
1016 struct sm_ftl
*ftl
= dev
->priv
;
1017 struct ftl_zone
*zone
;
1018 int error
= 0, in_cache
= 0;
1019 int zone_num
, block
, boffset
;
1021 sm_break_offset(ftl
, sect_no
<< 9, &zone_num
, &block
, &boffset
);
1022 mutex_lock(&ftl
->mutex
);
1025 zone
= sm_get_zone(ftl
, zone_num
);
1027 error
= PTR_ERR(zone
);
1031 /* Have to look at cache first */
1032 if (ftl
->cache_zone
== zone_num
&& ftl
->cache_block
== block
) {
1034 if (!sm_cache_get(ftl
, buf
, boffset
))
1038 /* Translate the block and return if doesn't exist in the table */
1039 block
= zone
->lba_to_phys_table
[block
];
1042 memset(buf
, 0xFF, SM_SECTOR_SIZE
);
1046 if (sm_read_sector(ftl
, zone_num
, block
, boffset
, buf
, NULL
)) {
1052 sm_cache_put(ftl
, buf
, boffset
);
1054 mutex_unlock(&ftl
->mutex
);
1058 /* outside interface: write a sector */
1059 static int sm_write(struct mtd_blktrans_dev
*dev
,
1060 unsigned long sec_no
, char *buf
)
1062 struct sm_ftl
*ftl
= dev
->priv
;
1063 struct ftl_zone
*zone
;
1064 int error
= 0, zone_num
, block
, boffset
;
1066 BUG_ON(ftl
->readonly
);
1067 sm_break_offset(ftl
, sec_no
<< 9, &zone_num
, &block
, &boffset
);
1069 /* No need in flush thread running now */
1070 del_timer(&ftl
->timer
);
1071 mutex_lock(&ftl
->mutex
);
1073 zone
= sm_get_zone(ftl
, zone_num
);
1075 error
= PTR_ERR(zone
);
1079 /* If entry is not in cache, flush it */
1080 if (ftl
->cache_block
!= block
|| ftl
->cache_zone
!= zone_num
) {
1082 error
= sm_cache_flush(ftl
);
1086 ftl
->cache_block
= block
;
1087 ftl
->cache_zone
= zone_num
;
1090 sm_cache_put(ftl
, buf
, boffset
);
1092 mod_timer(&ftl
->timer
, jiffies
+ msecs_to_jiffies(cache_timeout
));
1093 mutex_unlock(&ftl
->mutex
);
1097 /* outside interface: flush everything */
1098 static int sm_flush(struct mtd_blktrans_dev
*dev
)
1100 struct sm_ftl
*ftl
= dev
->priv
;
1103 mutex_lock(&ftl
->mutex
);
1104 retval
= sm_cache_flush(ftl
);
1105 mutex_unlock(&ftl
->mutex
);
1109 /* outside interface: device is released */
1110 static void sm_release(struct mtd_blktrans_dev
*dev
)
1112 struct sm_ftl
*ftl
= dev
->priv
;
1114 del_timer_sync(&ftl
->timer
);
1115 cancel_work_sync(&ftl
->flush_work
);
1116 mutex_lock(&ftl
->mutex
);
1117 sm_cache_flush(ftl
);
1118 mutex_unlock(&ftl
->mutex
);
1121 /* outside interface: get geometry */
1122 static int sm_getgeo(struct mtd_blktrans_dev
*dev
, struct hd_geometry
*geo
)
1124 struct sm_ftl
*ftl
= dev
->priv
;
1125 geo
->heads
= ftl
->heads
;
1126 geo
->sectors
= ftl
->sectors
;
1127 geo
->cylinders
= ftl
->cylinders
;
1131 /* external interface: main initialization function */
1132 static void sm_add_mtd(struct mtd_blktrans_ops
*tr
, struct mtd_info
*mtd
)
1134 struct mtd_blktrans_dev
*trans
;
1137 /* Allocate & initialize our private structure */
1138 ftl
= kzalloc(sizeof(struct sm_ftl
), GFP_KERNEL
);
1143 mutex_init(&ftl
->mutex
);
1144 timer_setup(&ftl
->timer
, sm_cache_flush_timer
, 0);
1145 INIT_WORK(&ftl
->flush_work
, sm_cache_flush_work
);
1147 /* Read media information */
1148 if (sm_get_media_info(ftl
, mtd
)) {
1149 dbg("found unsupported mtd device, aborting");
1154 /* Allocate temporary CIS buffer for read retry support */
1155 ftl
->cis_buffer
= kzalloc(SM_SECTOR_SIZE
, GFP_KERNEL
);
1156 if (!ftl
->cis_buffer
)
1159 /* Allocate zone array, it will be initialized on demand */
1160 ftl
->zones
= kcalloc(ftl
->zone_count
, sizeof(struct ftl_zone
),
1165 /* Allocate the cache*/
1166 ftl
->cache_data
= kzalloc(ftl
->block_size
, GFP_KERNEL
);
1168 if (!ftl
->cache_data
)
1174 /* Allocate upper layer structure and initialize it */
1175 trans
= kzalloc(sizeof(struct mtd_blktrans_dev
), GFP_KERNEL
);
1185 trans
->size
= (ftl
->block_size
* ftl
->max_lba
* ftl
->zone_count
) >> 9;
1186 trans
->readonly
= ftl
->readonly
;
1188 if (sm_find_cis(ftl
)) {
1189 dbg("CIS not found on mtd device, aborting");
1193 ftl
->disk_attributes
= sm_create_sysfs_attributes(ftl
);
1194 if (!ftl
->disk_attributes
)
1196 trans
->disk_attributes
= ftl
->disk_attributes
;
1198 sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d",
1199 (int)(mtd
->size
/ (1024 * 1024)), mtd
->index
);
1202 dbg("%d zone(s), each consists of %d blocks (+%d spares)",
1203 ftl
->zone_count
, ftl
->max_lba
,
1204 ftl
->zone_size
- ftl
->max_lba
);
1205 dbg("each block consists of %d bytes",
1209 /* Register device*/
1210 if (add_mtd_blktrans_dev(trans
)) {
1211 dbg("error in mtdblktrans layer");
1218 kfree(ftl
->cache_data
);
1222 kfree(ftl
->cis_buffer
);
1229 /* main interface: device {surprise,} removal */
1230 static void sm_remove_dev(struct mtd_blktrans_dev
*dev
)
1232 struct sm_ftl
*ftl
= dev
->priv
;
1235 del_mtd_blktrans_dev(dev
);
1238 for (i
= 0 ; i
< ftl
->zone_count
; i
++) {
1240 if (!ftl
->zones
[i
].initialized
)
1243 kfree(ftl
->zones
[i
].lba_to_phys_table
);
1244 kfifo_free(&ftl
->zones
[i
].free_sectors
);
1247 sm_delete_sysfs_attributes(ftl
);
1248 kfree(ftl
->cis_buffer
);
1250 kfree(ftl
->cache_data
);
1254 static struct mtd_blktrans_ops sm_ftl_ops
= {
1257 .part_bits
= SM_FTL_PARTN_BITS
,
1258 .blksize
= SM_SECTOR_SIZE
,
1259 .getgeo
= sm_getgeo
,
1261 .add_mtd
= sm_add_mtd
,
1262 .remove_dev
= sm_remove_dev
,
1264 .readsect
= sm_read
,
1265 .writesect
= sm_write
,
1268 .release
= sm_release
,
1270 .owner
= THIS_MODULE
,
1273 static __init
int sm_module_init(void)
1277 cache_flush_workqueue
= create_freezable_workqueue("smflush");
1278 if (!cache_flush_workqueue
)
1281 error
= register_mtd_blktrans(&sm_ftl_ops
);
1283 destroy_workqueue(cache_flush_workqueue
);
1288 static void __exit
sm_module_exit(void)
1290 destroy_workqueue(cache_flush_workqueue
);
1291 deregister_mtd_blktrans(&sm_ftl_ops
);
1294 module_init(sm_module_init
);
1295 module_exit(sm_module_exit
);
1297 MODULE_LICENSE("GPL");
1298 MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
1299 MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer");