1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright © 2009 - Maxim Levitsky
4 * SmartMedia/xD translation layer
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/random.h>
10 #include <linux/hdreg.h>
11 #include <linux/kthread.h>
12 #include <linux/freezer.h>
13 #include <linux/sysfs.h>
14 #include <linux/bitops.h>
15 #include <linux/slab.h>
16 #include <linux/mtd/nand-ecc-sw-hamming.h>
17 #include "nand/raw/sm_common.h"
22 static struct workqueue_struct
*cache_flush_workqueue
;
24 static int cache_timeout
= 1000;
25 module_param(cache_timeout
, int, S_IRUGO
);
26 MODULE_PARM_DESC(cache_timeout
,
27 "Timeout (in ms) for cache flush (1000 ms default");
30 module_param(debug
, int, S_IRUGO
| S_IWUSR
);
31 MODULE_PARM_DESC(debug
, "Debug level (0-2)");
34 /* ------------------- sysfs attributes ---------------------------------- */
35 struct sm_sysfs_attribute
{
36 struct device_attribute dev_attr
;
41 static ssize_t
sm_attr_show(struct device
*dev
, struct device_attribute
*attr
,
44 struct sm_sysfs_attribute
*sm_attr
=
45 container_of(attr
, struct sm_sysfs_attribute
, dev_attr
);
47 strncpy(buf
, sm_attr
->data
, sm_attr
->len
);
52 #define NUM_ATTRIBUTES 1
53 #define SM_CIS_VENDOR_OFFSET 0x59
54 static struct attribute_group
*sm_create_sysfs_attributes(struct sm_ftl
*ftl
)
56 struct attribute_group
*attr_group
;
57 struct attribute
**attributes
;
58 struct sm_sysfs_attribute
*vendor_attribute
;
61 vendor
= kstrndup(ftl
->cis_buffer
+ SM_CIS_VENDOR_OFFSET
,
62 SM_SMALL_PAGE
- SM_CIS_VENDOR_OFFSET
, GFP_KERNEL
);
66 /* Initialize sysfs attributes */
68 kzalloc(sizeof(struct sm_sysfs_attribute
), GFP_KERNEL
);
69 if (!vendor_attribute
)
72 sysfs_attr_init(&vendor_attribute
->dev_attr
.attr
);
74 vendor_attribute
->data
= vendor
;
75 vendor_attribute
->len
= strlen(vendor
);
76 vendor_attribute
->dev_attr
.attr
.name
= "vendor";
77 vendor_attribute
->dev_attr
.attr
.mode
= S_IRUGO
;
78 vendor_attribute
->dev_attr
.show
= sm_attr_show
;
81 /* Create array of pointers to the attributes */
82 attributes
= kcalloc(NUM_ATTRIBUTES
+ 1, sizeof(struct attribute
*),
86 attributes
[0] = &vendor_attribute
->dev_attr
.attr
;
88 /* Finally create the attribute group */
89 attr_group
= kzalloc(sizeof(struct attribute_group
), GFP_KERNEL
);
92 attr_group
->attrs
= attributes
;
97 kfree(vendor_attribute
);
104 static void sm_delete_sysfs_attributes(struct sm_ftl
*ftl
)
106 struct attribute
**attributes
= ftl
->disk_attributes
->attrs
;
109 for (i
= 0; attributes
[i
] ; i
++) {
111 struct device_attribute
*dev_attr
= container_of(attributes
[i
],
112 struct device_attribute
, attr
);
114 struct sm_sysfs_attribute
*sm_attr
=
115 container_of(dev_attr
,
116 struct sm_sysfs_attribute
, dev_attr
);
118 kfree(sm_attr
->data
);
122 kfree(ftl
->disk_attributes
->attrs
);
123 kfree(ftl
->disk_attributes
);
127 /* ----------------------- oob helpers -------------------------------------- */
129 static int sm_get_lba(uint8_t *lba
)
131 /* check fixed bits */
132 if ((lba
[0] & 0xF8) != 0x10)
135 /* check parity - endianness doesn't matter */
136 if (hweight16(*(uint16_t *)lba
) & 1)
139 return (lba
[1] >> 1) | ((lba
[0] & 0x07) << 7);
144 * Read LBA associated with block
145 * returns -1, if block is erased
146 * returns -2 if error happens
148 static int sm_read_lba(struct sm_oob
*oob
)
150 static const uint32_t erased_pattern
[4] = {
151 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
156 /* First test for erased block */
157 if (!memcmp(oob
, erased_pattern
, SM_OOB_SIZE
))
160 /* Now check is both copies of the LBA differ too much */
161 lba_test
= *(uint16_t *)oob
->lba_copy1
^ *(uint16_t*)oob
->lba_copy2
;
162 if (lba_test
&& !is_power_of_2(lba_test
))
166 lba
= sm_get_lba(oob
->lba_copy1
);
169 lba
= sm_get_lba(oob
->lba_copy2
);
174 static void sm_write_lba(struct sm_oob
*oob
, uint16_t lba
)
178 WARN_ON(lba
>= 1000);
180 tmp
[0] = 0x10 | ((lba
>> 7) & 0x07);
181 tmp
[1] = (lba
<< 1) & 0xFF;
183 if (hweight16(*(uint16_t *)tmp
) & 0x01)
186 oob
->lba_copy1
[0] = oob
->lba_copy2
[0] = tmp
[0];
187 oob
->lba_copy1
[1] = oob
->lba_copy2
[1] = tmp
[1];
191 /* Make offset from parts */
192 static loff_t
sm_mkoffset(struct sm_ftl
*ftl
, int zone
, int block
, int boffset
)
194 WARN_ON(boffset
& (SM_SECTOR_SIZE
- 1));
195 WARN_ON(zone
< 0 || zone
>= ftl
->zone_count
);
196 WARN_ON(block
>= ftl
->zone_size
);
197 WARN_ON(boffset
>= ftl
->block_size
);
202 return (zone
* SM_MAX_ZONE_SIZE
+ block
) * ftl
->block_size
+ boffset
;
205 /* Breaks offset into parts */
206 static void sm_break_offset(struct sm_ftl
*ftl
, loff_t loffset
,
207 int *zone
, int *block
, int *boffset
)
209 u64 offset
= loffset
;
210 *boffset
= do_div(offset
, ftl
->block_size
);
211 *block
= do_div(offset
, ftl
->max_lba
);
212 *zone
= offset
>= ftl
->zone_count
? -1 : offset
;
215 /* ---------------------- low level IO ------------------------------------- */
217 static int sm_correct_sector(uint8_t *buffer
, struct sm_oob
*oob
)
219 bool sm_order
= IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC
);
222 ecc_sw_hamming_calculate(buffer
, SM_SMALL_PAGE
, ecc
, sm_order
);
223 if (ecc_sw_hamming_correct(buffer
, ecc
, oob
->ecc1
, SM_SMALL_PAGE
,
227 buffer
+= SM_SMALL_PAGE
;
229 ecc_sw_hamming_calculate(buffer
, SM_SMALL_PAGE
, ecc
, sm_order
);
230 if (ecc_sw_hamming_correct(buffer
, ecc
, oob
->ecc2
, SM_SMALL_PAGE
,
236 /* Reads a sector + oob*/
237 static int sm_read_sector(struct sm_ftl
*ftl
,
238 int zone
, int block
, int boffset
,
239 uint8_t *buffer
, struct sm_oob
*oob
)
241 struct mtd_info
*mtd
= ftl
->trans
->mtd
;
242 struct mtd_oob_ops ops
;
243 struct sm_oob tmp_oob
;
247 /* FTL can contain -1 entries that are by default filled with bits */
250 memset(buffer
, 0xFF, SM_SECTOR_SIZE
);
254 /* User might not need the oob, but we do for data verification */
258 ops
.mode
= ftl
->smallpagenand
? MTD_OPS_RAW
: MTD_OPS_PLACE_OOB
;
260 ops
.ooblen
= SM_OOB_SIZE
;
261 ops
.oobbuf
= (void *)oob
;
262 ops
.len
= SM_SECTOR_SIZE
;
267 /* Avoid infinite recursion on CIS reads, sm_recheck_media
269 if (zone
== 0 && block
== ftl
->cis_block
&& boffset
==
273 /* Test if media is stable */
274 if (try == 3 || sm_recheck_media(ftl
))
278 /* Unfortunately, oob read will _always_ succeed,
279 despite card removal..... */
280 ret
= mtd_read_oob(mtd
, sm_mkoffset(ftl
, zone
, block
, boffset
), &ops
);
282 /* Test for unknown errors */
283 if (ret
!= 0 && !mtd_is_bitflip_or_eccerr(ret
)) {
284 dbg("read of block %d at zone %d, failed due to error (%d)",
289 /* Do a basic test on the oob, to guard against returned garbage */
290 if (oob
->reserved
!= 0xFFFFFFFF && !is_power_of_2(~oob
->reserved
))
293 /* This should never happen, unless there is a bug in the mtd driver */
294 WARN_ON(ops
.oobretlen
!= SM_OOB_SIZE
);
295 WARN_ON(buffer
&& ops
.retlen
!= SM_SECTOR_SIZE
);
300 /* Test if sector marked as bad */
301 if (!sm_sector_valid(oob
)) {
302 dbg("read of block %d at zone %d, failed because it is marked"
303 " as bad" , block
, zone
);
308 if (mtd_is_eccerr(ret
) ||
309 (ftl
->smallpagenand
&& sm_correct_sector(buffer
, oob
))) {
311 dbg("read of block %d at zone %d, failed due to ECC error",
319 /* Writes a sector to media */
320 static int sm_write_sector(struct sm_ftl
*ftl
,
321 int zone
, int block
, int boffset
,
322 uint8_t *buffer
, struct sm_oob
*oob
)
324 struct mtd_oob_ops ops
;
325 struct mtd_info
*mtd
= ftl
->trans
->mtd
;
328 BUG_ON(ftl
->readonly
);
330 if (zone
== 0 && (block
== ftl
->cis_block
|| block
== 0)) {
331 dbg("attempted to write the CIS!");
338 ops
.mode
= ftl
->smallpagenand
? MTD_OPS_RAW
: MTD_OPS_PLACE_OOB
;
339 ops
.len
= SM_SECTOR_SIZE
;
342 ops
.ooblen
= SM_OOB_SIZE
;
343 ops
.oobbuf
= (void *)oob
;
345 ret
= mtd_write_oob(mtd
, sm_mkoffset(ftl
, zone
, block
, boffset
), &ops
);
347 /* Now we assume that hardware will catch write bitflip errors */
350 dbg("write to block %d at zone %d, failed with error %d",
353 sm_recheck_media(ftl
);
357 /* This should never happen, unless there is a bug in the driver */
358 WARN_ON(ops
.oobretlen
!= SM_OOB_SIZE
);
359 WARN_ON(buffer
&& ops
.retlen
!= SM_SECTOR_SIZE
);
364 /* ------------------------ block IO ------------------------------------- */
366 /* Write a block using data and lba, and invalid sector bitmap */
367 static int sm_write_block(struct sm_ftl
*ftl
, uint8_t *buf
,
368 int zone
, int block
, int lba
,
369 unsigned long invalid_bitmap
)
371 bool sm_order
= IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC
);
376 /* Initialize the oob with requested values */
377 memset(&oob
, 0xFF, SM_OOB_SIZE
);
378 sm_write_lba(&oob
, lba
);
383 for (boffset
= 0; boffset
< ftl
->block_size
;
384 boffset
+= SM_SECTOR_SIZE
) {
386 oob
.data_status
= 0xFF;
388 if (test_bit(boffset
/ SM_SECTOR_SIZE
, &invalid_bitmap
)) {
390 sm_printk("sector %d of block at LBA %d of zone %d"
391 " couldn't be read, marking it as invalid",
392 boffset
/ SM_SECTOR_SIZE
, lba
, zone
);
397 if (ftl
->smallpagenand
) {
398 ecc_sw_hamming_calculate(buf
+ boffset
,
399 SM_SMALL_PAGE
, oob
.ecc1
,
402 ecc_sw_hamming_calculate(buf
+ boffset
+ SM_SMALL_PAGE
,
403 SM_SMALL_PAGE
, oob
.ecc2
,
406 if (!sm_write_sector(ftl
, zone
, block
, boffset
,
407 buf
+ boffset
, &oob
))
412 /* If write fails. try to erase the block */
413 /* This is safe, because we never write in blocks
414 that contain valuable data.
415 This is intended to repair block that are marked
416 as erased, but that isn't fully erased*/
418 if (sm_erase_block(ftl
, zone
, block
, 0))
424 sm_mark_block_bad(ftl
, zone
, block
);
432 /* Mark whole block at offset 'offs' as bad. */
433 static void sm_mark_block_bad(struct sm_ftl
*ftl
, int zone
, int block
)
438 memset(&oob
, 0xFF, SM_OOB_SIZE
);
439 oob
.block_status
= 0xF0;
444 if (sm_recheck_media(ftl
))
447 sm_printk("marking block %d of zone %d as bad", block
, zone
);
449 /* We aren't checking the return value, because we don't care */
450 /* This also fails on fake xD cards, but I guess these won't expose
451 any bad blocks till fail completely */
452 for (boffset
= 0; boffset
< ftl
->block_size
; boffset
+= SM_SECTOR_SIZE
)
453 sm_write_sector(ftl
, zone
, block
, boffset
, NULL
, &oob
);
457 * Erase a block within a zone
458 * If erase succeeds, it updates free block fifo, otherwise marks block as bad
460 static int sm_erase_block(struct sm_ftl
*ftl
, int zone_num
, uint16_t block
,
463 struct ftl_zone
*zone
= &ftl
->zones
[zone_num
];
464 struct mtd_info
*mtd
= ftl
->trans
->mtd
;
465 struct erase_info erase
;
467 erase
.addr
= sm_mkoffset(ftl
, zone_num
, block
, 0);
468 erase
.len
= ftl
->block_size
;
473 BUG_ON(ftl
->readonly
);
475 if (zone_num
== 0 && (block
== ftl
->cis_block
|| block
== 0)) {
476 sm_printk("attempted to erase the CIS!");
480 if (mtd_erase(mtd
, &erase
)) {
481 sm_printk("erase of block %d in zone %d failed",
487 kfifo_in(&zone
->free_sectors
,
488 (const unsigned char *)&block
, sizeof(block
));
492 sm_mark_block_bad(ftl
, zone_num
, block
);
496 /* Thoroughly test that block is valid. */
497 static int sm_check_block(struct sm_ftl
*ftl
, int zone
, int block
)
501 int lbas
[] = { -3, 0, 0, 0 };
506 /* First just check that block doesn't look fishy */
507 /* Only blocks that are valid or are sliced in two parts, are
509 for (boffset
= 0; boffset
< ftl
->block_size
;
510 boffset
+= SM_SECTOR_SIZE
) {
512 /* This shouldn't happen anyway */
513 if (sm_read_sector(ftl
, zone
, block
, boffset
, NULL
, &oob
))
516 test_lba
= sm_read_lba(&oob
);
518 if (lbas
[i
] != test_lba
)
519 lbas
[++i
] = test_lba
;
521 /* If we found three different LBAs, something is fishy */
526 /* If the block is sliced (partially erased usually) erase it */
528 sm_erase_block(ftl
, zone
, block
, 1);
535 /* ----------------- media scanning --------------------------------- */
536 static const struct chs_entry chs_table
[] = {
544 { 128, 500, 16, 32 },
545 { 256, 1000, 16, 32 },
546 { 512, 1015, 32, 63 },
547 { 1024, 985, 33, 63 },
548 { 2048, 985, 33, 63 },
553 static const uint8_t cis_signature
[] = {
554 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
556 /* Find out media parameters.
557 * This ideally has to be based on nand id, but for now device size is enough */
558 static int sm_get_media_info(struct sm_ftl
*ftl
, struct mtd_info
*mtd
)
561 int size_in_megs
= mtd
->size
/ (1024 * 1024);
563 ftl
->readonly
= mtd
->type
== MTD_ROM
;
565 /* Manual settings for very old devices */
567 ftl
->smallpagenand
= 0;
569 switch (size_in_megs
) {
571 /* 1 MiB flash/rom SmartMedia card (256 byte pages)*/
572 ftl
->zone_size
= 256;
574 ftl
->block_size
= 8 * SM_SECTOR_SIZE
;
575 ftl
->smallpagenand
= 1;
579 /* 2 MiB flash SmartMedia (256 byte pages)*/
580 if (mtd
->writesize
== SM_SMALL_PAGE
) {
581 ftl
->zone_size
= 512;
583 ftl
->block_size
= 8 * SM_SECTOR_SIZE
;
584 ftl
->smallpagenand
= 1;
585 /* 2 MiB rom SmartMedia */
591 ftl
->zone_size
= 256;
593 ftl
->block_size
= 16 * SM_SECTOR_SIZE
;
597 /* 4 MiB flash/rom SmartMedia device */
598 ftl
->zone_size
= 512;
600 ftl
->block_size
= 16 * SM_SECTOR_SIZE
;
603 /* 8 MiB flash/rom SmartMedia device */
604 ftl
->zone_size
= 1024;
606 ftl
->block_size
= 16 * SM_SECTOR_SIZE
;
609 /* Minimum xD size is 16MiB. Also, all xD cards have standard zone
610 sizes. SmartMedia cards exist up to 128 MiB and have same layout*/
611 if (size_in_megs
>= 16) {
612 ftl
->zone_count
= size_in_megs
/ 16;
613 ftl
->zone_size
= 1024;
615 ftl
->block_size
= 32 * SM_SECTOR_SIZE
;
618 /* Test for proper write,erase and oob sizes */
619 if (mtd
->erasesize
> ftl
->block_size
)
622 if (mtd
->writesize
> SM_SECTOR_SIZE
)
625 if (ftl
->smallpagenand
&& mtd
->oobsize
< SM_SMALL_OOB_SIZE
)
628 if (!ftl
->smallpagenand
&& mtd
->oobsize
< SM_OOB_SIZE
)
632 if (!mtd_has_oob(mtd
))
635 /* Find geometry information */
636 for (i
= 0 ; i
< ARRAY_SIZE(chs_table
) ; i
++) {
637 if (chs_table
[i
].size
== size_in_megs
) {
638 ftl
->cylinders
= chs_table
[i
].cyl
;
639 ftl
->heads
= chs_table
[i
].head
;
640 ftl
->sectors
= chs_table
[i
].sec
;
645 sm_printk("media has unknown size : %dMiB", size_in_megs
);
646 ftl
->cylinders
= 985;
652 /* Validate the CIS */
653 static int sm_read_cis(struct sm_ftl
*ftl
)
657 if (sm_read_sector(ftl
,
658 0, ftl
->cis_block
, ftl
->cis_boffset
, ftl
->cis_buffer
, &oob
))
661 if (!sm_sector_valid(&oob
) || !sm_block_valid(&oob
))
664 if (!memcmp(ftl
->cis_buffer
+ ftl
->cis_page_offset
,
665 cis_signature
, sizeof(cis_signature
))) {
672 /* Scan the media for the CIS */
673 static int sm_find_cis(struct sm_ftl
*ftl
)
680 /* Search for first valid block */
681 for (block
= 0 ; block
< ftl
->zone_size
- ftl
->max_lba
; block
++) {
683 if (sm_read_sector(ftl
, 0, block
, 0, NULL
, &oob
))
686 if (!sm_block_valid(&oob
))
695 /* Search for first valid sector in this block */
696 for (boffset
= 0 ; boffset
< ftl
->block_size
;
697 boffset
+= SM_SECTOR_SIZE
) {
699 if (sm_read_sector(ftl
, 0, block
, boffset
, NULL
, &oob
))
702 if (!sm_sector_valid(&oob
))
707 if (boffset
== ftl
->block_size
)
710 ftl
->cis_block
= block
;
711 ftl
->cis_boffset
= boffset
;
712 ftl
->cis_page_offset
= 0;
714 cis_found
= !sm_read_cis(ftl
);
717 ftl
->cis_page_offset
= SM_SMALL_PAGE
;
718 cis_found
= !sm_read_cis(ftl
);
722 dbg("CIS block found at offset %x",
723 block
* ftl
->block_size
+
724 boffset
+ ftl
->cis_page_offset
);
730 /* Basic test to determine if underlying mtd device if functional */
731 static int sm_recheck_media(struct sm_ftl
*ftl
)
733 if (sm_read_cis(ftl
)) {
735 if (!ftl
->unstable
) {
736 sm_printk("media unstable, not allowing writes");
744 /* Initialize a FTL zone */
745 static int sm_init_zone(struct sm_ftl
*ftl
, int zone_num
)
747 struct ftl_zone
*zone
= &ftl
->zones
[zone_num
];
754 dbg("initializing zone %d", zone_num
);
756 /* Allocate memory for FTL table */
757 zone
->lba_to_phys_table
= kmalloc_array(ftl
->max_lba
, 2, GFP_KERNEL
);
759 if (!zone
->lba_to_phys_table
)
761 memset(zone
->lba_to_phys_table
, -1, ftl
->max_lba
* 2);
764 /* Allocate memory for free sectors FIFO */
765 if (kfifo_alloc(&zone
->free_sectors
, ftl
->zone_size
* 2, GFP_KERNEL
)) {
766 kfree(zone
->lba_to_phys_table
);
770 /* Now scan the zone */
771 for (block
= 0 ; block
< ftl
->zone_size
; block
++) {
773 /* Skip blocks till the CIS (including) */
774 if (zone_num
== 0 && block
<= ftl
->cis_block
)
777 /* Read the oob of first sector */
778 if (sm_read_sector(ftl
, zone_num
, block
, 0, NULL
, &oob
)) {
779 kfifo_free(&zone
->free_sectors
);
780 kfree(zone
->lba_to_phys_table
);
784 /* Test to see if block is erased. It is enough to test
785 first sector, because erase happens in one shot */
786 if (sm_block_erased(&oob
)) {
787 kfifo_in(&zone
->free_sectors
,
788 (unsigned char *)&block
, 2);
792 /* If block is marked as bad, skip it */
793 /* This assumes we can trust first sector*/
794 /* However the way the block valid status is defined, ensures
795 very low probability of failure here */
796 if (!sm_block_valid(&oob
)) {
797 dbg("PH %04d <-> <marked bad>", block
);
802 lba
= sm_read_lba(&oob
);
804 /* Invalid LBA means that block is damaged. */
805 /* We can try to erase it, or mark it as bad, but
806 lets leave that to recovery application */
807 if (lba
== -2 || lba
>= ftl
->max_lba
) {
808 dbg("PH %04d <-> LBA %04d(bad)", block
, lba
);
813 /* If there is no collision,
814 just put the sector in the FTL table */
815 if (zone
->lba_to_phys_table
[lba
] < 0) {
816 dbg_verbose("PH %04d <-> LBA %04d", block
, lba
);
817 zone
->lba_to_phys_table
[lba
] = block
;
821 sm_printk("collision"
822 " of LBA %d between blocks %d and %d in zone %d",
823 lba
, zone
->lba_to_phys_table
[lba
], block
, zone_num
);
825 /* Test that this block is valid*/
826 if (sm_check_block(ftl
, zone_num
, block
))
829 /* Test now the old block */
830 if (sm_check_block(ftl
, zone_num
,
831 zone
->lba_to_phys_table
[lba
])) {
832 zone
->lba_to_phys_table
[lba
] = block
;
836 /* If both blocks are valid and share same LBA, it means that
837 they hold different versions of same data. It not
838 known which is more recent, thus just erase one of them
840 sm_printk("both blocks are valid, erasing the later");
841 sm_erase_block(ftl
, zone_num
, block
, 1);
844 dbg("zone initialized");
845 zone
->initialized
= 1;
847 /* No free sectors, means that the zone is heavily damaged, write won't
848 work, but it can still can be (partially) read */
849 if (!kfifo_len(&zone
->free_sectors
)) {
850 sm_printk("no free blocks in zone %d", zone_num
);
854 /* Randomize first block we write to */
855 get_random_bytes(&i
, 2);
856 i
%= (kfifo_len(&zone
->free_sectors
) / 2);
859 len
= kfifo_out(&zone
->free_sectors
,
860 (unsigned char *)&block
, 2);
862 kfifo_in(&zone
->free_sectors
, (const unsigned char *)&block
, 2);
867 /* Get and automatically initialize an FTL mapping for one zone */
868 static struct ftl_zone
*sm_get_zone(struct sm_ftl
*ftl
, int zone_num
)
870 struct ftl_zone
*zone
;
873 BUG_ON(zone_num
>= ftl
->zone_count
);
874 zone
= &ftl
->zones
[zone_num
];
876 if (!zone
->initialized
) {
877 error
= sm_init_zone(ftl
, zone_num
);
880 return ERR_PTR(error
);
886 /* ----------------- cache handling ------------------------------------------*/
888 /* Initialize the one block cache */
889 static void sm_cache_init(struct sm_ftl
*ftl
)
891 ftl
->cache_data_invalid_bitmap
= 0xFFFFFFFF;
892 ftl
->cache_clean
= 1;
893 ftl
->cache_zone
= -1;
894 ftl
->cache_block
= -1;
895 /*memset(ftl->cache_data, 0xAA, ftl->block_size);*/
898 /* Put sector in one block cache */
899 static void sm_cache_put(struct sm_ftl
*ftl
, char *buffer
, int boffset
)
901 memcpy(ftl
->cache_data
+ boffset
, buffer
, SM_SECTOR_SIZE
);
902 clear_bit(boffset
/ SM_SECTOR_SIZE
, &ftl
->cache_data_invalid_bitmap
);
903 ftl
->cache_clean
= 0;
906 /* Read a sector from the cache */
907 static int sm_cache_get(struct sm_ftl
*ftl
, char *buffer
, int boffset
)
909 if (test_bit(boffset
/ SM_SECTOR_SIZE
,
910 &ftl
->cache_data_invalid_bitmap
))
913 memcpy(buffer
, ftl
->cache_data
+ boffset
, SM_SECTOR_SIZE
);
917 /* Write the cache to hardware */
918 static int sm_cache_flush(struct sm_ftl
*ftl
)
920 struct ftl_zone
*zone
;
923 uint16_t write_sector
;
924 int zone_num
= ftl
->cache_zone
;
927 if (ftl
->cache_clean
)
933 BUG_ON(zone_num
< 0);
934 zone
= &ftl
->zones
[zone_num
];
935 block_num
= zone
->lba_to_phys_table
[ftl
->cache_block
];
938 /* Try to read all unread areas of the cache block*/
939 for_each_set_bit(sector_num
, &ftl
->cache_data_invalid_bitmap
,
940 ftl
->block_size
/ SM_SECTOR_SIZE
) {
942 if (!sm_read_sector(ftl
,
943 zone_num
, block_num
, sector_num
* SM_SECTOR_SIZE
,
944 ftl
->cache_data
+ sector_num
* SM_SECTOR_SIZE
, NULL
))
945 clear_bit(sector_num
,
946 &ftl
->cache_data_invalid_bitmap
);
953 /* If there are no spare blocks, */
954 /* we could still continue by erasing/writing the current block,
955 but for such worn out media it doesn't worth the trouble,
957 if (kfifo_out(&zone
->free_sectors
,
958 (unsigned char *)&write_sector
, 2) != 2) {
959 dbg("no free sectors for write!");
964 if (sm_write_block(ftl
, ftl
->cache_data
, zone_num
, write_sector
,
965 ftl
->cache_block
, ftl
->cache_data_invalid_bitmap
))
968 /* Update the FTL table */
969 zone
->lba_to_phys_table
[ftl
->cache_block
] = write_sector
;
971 /* Write succesfull, so erase and free the old block */
973 sm_erase_block(ftl
, zone_num
, block_num
, 1);
980 /* flush timer, runs a second after last write */
981 static void sm_cache_flush_timer(struct timer_list
*t
)
983 struct sm_ftl
*ftl
= from_timer(ftl
, t
, timer
);
984 queue_work(cache_flush_workqueue
, &ftl
->flush_work
);
987 /* cache flush work, kicked by timer */
988 static void sm_cache_flush_work(struct work_struct
*work
)
990 struct sm_ftl
*ftl
= container_of(work
, struct sm_ftl
, flush_work
);
991 mutex_lock(&ftl
->mutex
);
993 mutex_unlock(&ftl
->mutex
);
997 /* ---------------- outside interface -------------------------------------- */
999 /* outside interface: read a sector */
1000 static int sm_read(struct mtd_blktrans_dev
*dev
,
1001 unsigned long sect_no
, char *buf
)
1003 struct sm_ftl
*ftl
= dev
->priv
;
1004 struct ftl_zone
*zone
;
1005 int error
= 0, in_cache
= 0;
1006 int zone_num
, block
, boffset
;
1008 sm_break_offset(ftl
, sect_no
<< 9, &zone_num
, &block
, &boffset
);
1009 mutex_lock(&ftl
->mutex
);
1012 zone
= sm_get_zone(ftl
, zone_num
);
1014 error
= PTR_ERR(zone
);
1018 /* Have to look at cache first */
1019 if (ftl
->cache_zone
== zone_num
&& ftl
->cache_block
== block
) {
1021 if (!sm_cache_get(ftl
, buf
, boffset
))
1025 /* Translate the block and return if doesn't exist in the table */
1026 block
= zone
->lba_to_phys_table
[block
];
1029 memset(buf
, 0xFF, SM_SECTOR_SIZE
);
1033 if (sm_read_sector(ftl
, zone_num
, block
, boffset
, buf
, NULL
)) {
1039 sm_cache_put(ftl
, buf
, boffset
);
1041 mutex_unlock(&ftl
->mutex
);
1045 /* outside interface: write a sector */
1046 static int sm_write(struct mtd_blktrans_dev
*dev
,
1047 unsigned long sec_no
, char *buf
)
1049 struct sm_ftl
*ftl
= dev
->priv
;
1050 struct ftl_zone
*zone
;
1051 int error
= 0, zone_num
, block
, boffset
;
1053 BUG_ON(ftl
->readonly
);
1054 sm_break_offset(ftl
, sec_no
<< 9, &zone_num
, &block
, &boffset
);
1056 /* No need in flush thread running now */
1057 del_timer(&ftl
->timer
);
1058 mutex_lock(&ftl
->mutex
);
1060 zone
= sm_get_zone(ftl
, zone_num
);
1062 error
= PTR_ERR(zone
);
1066 /* If entry is not in cache, flush it */
1067 if (ftl
->cache_block
!= block
|| ftl
->cache_zone
!= zone_num
) {
1069 error
= sm_cache_flush(ftl
);
1073 ftl
->cache_block
= block
;
1074 ftl
->cache_zone
= zone_num
;
1077 sm_cache_put(ftl
, buf
, boffset
);
1079 mod_timer(&ftl
->timer
, jiffies
+ msecs_to_jiffies(cache_timeout
));
1080 mutex_unlock(&ftl
->mutex
);
1084 /* outside interface: flush everything */
1085 static int sm_flush(struct mtd_blktrans_dev
*dev
)
1087 struct sm_ftl
*ftl
= dev
->priv
;
1090 mutex_lock(&ftl
->mutex
);
1091 retval
= sm_cache_flush(ftl
);
1092 mutex_unlock(&ftl
->mutex
);
1096 /* outside interface: device is released */
1097 static void sm_release(struct mtd_blktrans_dev
*dev
)
1099 struct sm_ftl
*ftl
= dev
->priv
;
1101 mutex_lock(&ftl
->mutex
);
1102 del_timer_sync(&ftl
->timer
);
1103 cancel_work_sync(&ftl
->flush_work
);
1104 sm_cache_flush(ftl
);
1105 mutex_unlock(&ftl
->mutex
);
1108 /* outside interface: get geometry */
1109 static int sm_getgeo(struct mtd_blktrans_dev
*dev
, struct hd_geometry
*geo
)
1111 struct sm_ftl
*ftl
= dev
->priv
;
1112 geo
->heads
= ftl
->heads
;
1113 geo
->sectors
= ftl
->sectors
;
1114 geo
->cylinders
= ftl
->cylinders
;
1118 /* external interface: main initialization function */
1119 static void sm_add_mtd(struct mtd_blktrans_ops
*tr
, struct mtd_info
*mtd
)
1121 struct mtd_blktrans_dev
*trans
;
1124 /* Allocate & initialize our private structure */
1125 ftl
= kzalloc(sizeof(struct sm_ftl
), GFP_KERNEL
);
1130 mutex_init(&ftl
->mutex
);
1131 timer_setup(&ftl
->timer
, sm_cache_flush_timer
, 0);
1132 INIT_WORK(&ftl
->flush_work
, sm_cache_flush_work
);
1134 /* Read media information */
1135 if (sm_get_media_info(ftl
, mtd
)) {
1136 dbg("found unsupported mtd device, aborting");
1141 /* Allocate temporary CIS buffer for read retry support */
1142 ftl
->cis_buffer
= kzalloc(SM_SECTOR_SIZE
, GFP_KERNEL
);
1143 if (!ftl
->cis_buffer
)
1146 /* Allocate zone array, it will be initialized on demand */
1147 ftl
->zones
= kcalloc(ftl
->zone_count
, sizeof(struct ftl_zone
),
1152 /* Allocate the cache*/
1153 ftl
->cache_data
= kzalloc(ftl
->block_size
, GFP_KERNEL
);
1155 if (!ftl
->cache_data
)
1161 /* Allocate upper layer structure and initialize it */
1162 trans
= kzalloc(sizeof(struct mtd_blktrans_dev
), GFP_KERNEL
);
1172 trans
->size
= (ftl
->block_size
* ftl
->max_lba
* ftl
->zone_count
) >> 9;
1173 trans
->readonly
= ftl
->readonly
;
1175 if (sm_find_cis(ftl
)) {
1176 dbg("CIS not found on mtd device, aborting");
1180 ftl
->disk_attributes
= sm_create_sysfs_attributes(ftl
);
1181 if (!ftl
->disk_attributes
)
1183 trans
->disk_attributes
= ftl
->disk_attributes
;
1185 sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d",
1186 (int)(mtd
->size
/ (1024 * 1024)), mtd
->index
);
1189 dbg("%d zone(s), each consists of %d blocks (+%d spares)",
1190 ftl
->zone_count
, ftl
->max_lba
,
1191 ftl
->zone_size
- ftl
->max_lba
);
1192 dbg("each block consists of %d bytes",
1196 /* Register device*/
1197 if (add_mtd_blktrans_dev(trans
)) {
1198 dbg("error in mtdblktrans layer");
1205 kfree(ftl
->cache_data
);
1209 kfree(ftl
->cis_buffer
);
1216 /* main interface: device {surprise,} removal */
1217 static void sm_remove_dev(struct mtd_blktrans_dev
*dev
)
1219 struct sm_ftl
*ftl
= dev
->priv
;
1222 del_mtd_blktrans_dev(dev
);
1225 for (i
= 0 ; i
< ftl
->zone_count
; i
++) {
1227 if (!ftl
->zones
[i
].initialized
)
1230 kfree(ftl
->zones
[i
].lba_to_phys_table
);
1231 kfifo_free(&ftl
->zones
[i
].free_sectors
);
1234 sm_delete_sysfs_attributes(ftl
);
1235 kfree(ftl
->cis_buffer
);
1237 kfree(ftl
->cache_data
);
1241 static struct mtd_blktrans_ops sm_ftl_ops
= {
1244 .part_bits
= SM_FTL_PARTN_BITS
,
1245 .blksize
= SM_SECTOR_SIZE
,
1246 .getgeo
= sm_getgeo
,
1248 .add_mtd
= sm_add_mtd
,
1249 .remove_dev
= sm_remove_dev
,
1251 .readsect
= sm_read
,
1252 .writesect
= sm_write
,
1255 .release
= sm_release
,
1257 .owner
= THIS_MODULE
,
1260 static __init
int sm_module_init(void)
1264 cache_flush_workqueue
= create_freezable_workqueue("smflush");
1265 if (!cache_flush_workqueue
)
1268 error
= register_mtd_blktrans(&sm_ftl_ops
);
1270 destroy_workqueue(cache_flush_workqueue
);
1275 static void __exit
sm_module_exit(void)
1277 destroy_workqueue(cache_flush_workqueue
);
1278 deregister_mtd_blktrans(&sm_ftl_ops
);
1281 module_init(sm_module_init
);
1282 module_exit(sm_module_exit
);
1284 MODULE_LICENSE("GPL");
1285 MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
1286 MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer");