QE/FHCI: fixed the CONTROL bug
[zen-stable.git] / drivers / mtd / rfd_ftl.c
blobcc4d1805b864d3504f57b227a94b1747f2f7fe50
1 /*
2 * rfd_ftl.c -- resident flash disk (flash translation layer)
4 * Copyright © 2005 Sean Young <sean@mess.org>
6 * This type of flash translation layer (FTL) is used by the Embedded BIOS
7 * by General Software. It is known as the Resident Flash Disk (RFD), see:
9 * http://www.gensw.com/pages/prod/bios/rfd.htm
11 * based on ftl.c
14 #include <linux/hdreg.h>
15 #include <linux/init.h>
16 #include <linux/mtd/blktrans.h>
17 #include <linux/mtd/mtd.h>
18 #include <linux/vmalloc.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
22 #include <asm/types.h>
24 static int block_size = 0;
25 module_param(block_size, int, 0);
26 MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
28 #define PREFIX "rfd_ftl: "
30 /* This major has been assigned by device@lanana.org */
31 #ifndef RFD_FTL_MAJOR
32 #define RFD_FTL_MAJOR 256
33 #endif
35 /* Maximum number of partitions in an FTL region */
36 #define PART_BITS 4
38 /* An erase unit should start with this value */
39 #define RFD_MAGIC 0x9193
41 /* the second value is 0xffff or 0xffc8; function unknown */
43 /* the third value is always 0xffff, ignored */
45 /* next is an array of mapping for each corresponding sector */
46 #define HEADER_MAP_OFFSET 3
47 #define SECTOR_DELETED 0x0000
48 #define SECTOR_ZERO 0xfffe
49 #define SECTOR_FREE 0xffff
51 #define SECTOR_SIZE 512
53 #define SECTORS_PER_TRACK 63
55 struct block {
56 enum {
57 BLOCK_OK,
58 BLOCK_ERASING,
59 BLOCK_ERASED,
60 BLOCK_UNUSED,
61 BLOCK_FAILED
62 } state;
63 int free_sectors;
64 int used_sectors;
65 int erases;
66 u_long offset;
69 struct partition {
70 struct mtd_blktrans_dev mbd;
72 u_int block_size; /* size of erase unit */
73 u_int total_blocks; /* number of erase units */
74 u_int header_sectors_per_block; /* header sectors in erase unit */
75 u_int data_sectors_per_block; /* data sectors in erase unit */
76 u_int sector_count; /* sectors in translated disk */
77 u_int header_size; /* bytes in header sector */
78 int reserved_block; /* block next up for reclaim */
79 int current_block; /* block to write to */
80 u16 *header_cache; /* cached header */
82 int is_reclaiming;
83 int cylinders;
84 int errors;
85 u_long *sector_map;
86 struct block *blocks;
89 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
91 static int build_block_map(struct partition *part, int block_no)
93 struct block *block = &part->blocks[block_no];
94 int i;
96 block->offset = part->block_size * block_no;
98 if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
99 block->state = BLOCK_UNUSED;
100 return -ENOENT;
103 block->state = BLOCK_OK;
105 for (i=0; i<part->data_sectors_per_block; i++) {
106 u16 entry;
108 entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
110 if (entry == SECTOR_DELETED)
111 continue;
113 if (entry == SECTOR_FREE) {
114 block->free_sectors++;
115 continue;
118 if (entry == SECTOR_ZERO)
119 entry = 0;
121 if (entry >= part->sector_count) {
122 printk(KERN_WARNING PREFIX
123 "'%s': unit #%d: entry %d corrupt, "
124 "sector %d out of range\n",
125 part->mbd.mtd->name, block_no, i, entry);
126 continue;
129 if (part->sector_map[entry] != -1) {
130 printk(KERN_WARNING PREFIX
131 "'%s': more than one entry for sector %d\n",
132 part->mbd.mtd->name, entry);
133 part->errors = 1;
134 continue;
137 part->sector_map[entry] = block->offset +
138 (i + part->header_sectors_per_block) * SECTOR_SIZE;
140 block->used_sectors++;
143 if (block->free_sectors == part->data_sectors_per_block)
144 part->reserved_block = block_no;
146 return 0;
149 static int scan_header(struct partition *part)
151 int sectors_per_block;
152 int i, rc = -ENOMEM;
153 int blocks_found;
154 size_t retlen;
156 sectors_per_block = part->block_size / SECTOR_SIZE;
157 part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
159 if (part->total_blocks < 2)
160 return -ENOENT;
162 /* each erase block has three bytes header, followed by the map */
163 part->header_sectors_per_block =
164 ((HEADER_MAP_OFFSET + sectors_per_block) *
165 sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
167 part->data_sectors_per_block = sectors_per_block -
168 part->header_sectors_per_block;
170 part->header_size = (HEADER_MAP_OFFSET +
171 part->data_sectors_per_block) * sizeof(u16);
173 part->cylinders = (part->data_sectors_per_block *
174 (part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
176 part->sector_count = part->cylinders * SECTORS_PER_TRACK;
178 part->current_block = -1;
179 part->reserved_block = -1;
180 part->is_reclaiming = 0;
182 part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
183 if (!part->header_cache)
184 goto err;
186 part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
187 GFP_KERNEL);
188 if (!part->blocks)
189 goto err;
191 part->sector_map = vmalloc(part->sector_count * sizeof(u_long));
192 if (!part->sector_map) {
193 printk(KERN_ERR PREFIX "'%s': unable to allocate memory for "
194 "sector map", part->mbd.mtd->name);
195 goto err;
198 for (i=0; i<part->sector_count; i++)
199 part->sector_map[i] = -1;
201 for (i=0, blocks_found=0; i<part->total_blocks; i++) {
202 rc = part->mbd.mtd->read(part->mbd.mtd,
203 i * part->block_size, part->header_size,
204 &retlen, (u_char*)part->header_cache);
206 if (!rc && retlen != part->header_size)
207 rc = -EIO;
209 if (rc)
210 goto err;
212 if (!build_block_map(part, i))
213 blocks_found++;
216 if (blocks_found == 0) {
217 printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
218 part->mbd.mtd->name);
219 rc = -ENOENT;
220 goto err;
223 if (part->reserved_block == -1) {
224 printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n",
225 part->mbd.mtd->name);
227 part->errors = 1;
230 return 0;
232 err:
233 vfree(part->sector_map);
234 kfree(part->header_cache);
235 kfree(part->blocks);
237 return rc;
240 static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
242 struct partition *part = (struct partition*)dev;
243 u_long addr;
244 size_t retlen;
245 int rc;
247 if (sector >= part->sector_count)
248 return -EIO;
250 addr = part->sector_map[sector];
251 if (addr != -1) {
252 rc = part->mbd.mtd->read(part->mbd.mtd, addr, SECTOR_SIZE,
253 &retlen, (u_char*)buf);
254 if (!rc && retlen != SECTOR_SIZE)
255 rc = -EIO;
257 if (rc) {
258 printk(KERN_WARNING PREFIX "error reading '%s' at "
259 "0x%lx\n", part->mbd.mtd->name, addr);
260 return rc;
262 } else
263 memset(buf, 0, SECTOR_SIZE);
265 return 0;
268 static void erase_callback(struct erase_info *erase)
270 struct partition *part;
271 u16 magic;
272 int i, rc;
273 size_t retlen;
275 part = (struct partition*)erase->priv;
277 i = (u32)erase->addr / part->block_size;
278 if (i >= part->total_blocks || part->blocks[i].offset != erase->addr ||
279 erase->addr > UINT_MAX) {
280 printk(KERN_ERR PREFIX "erase callback for unknown offset %llx "
281 "on '%s'\n", (unsigned long long)erase->addr, part->mbd.mtd->name);
282 return;
285 if (erase->state != MTD_ERASE_DONE) {
286 printk(KERN_WARNING PREFIX "erase failed at 0x%llx on '%s', "
287 "state %d\n", (unsigned long long)erase->addr,
288 part->mbd.mtd->name, erase->state);
290 part->blocks[i].state = BLOCK_FAILED;
291 part->blocks[i].free_sectors = 0;
292 part->blocks[i].used_sectors = 0;
294 kfree(erase);
296 return;
299 magic = cpu_to_le16(RFD_MAGIC);
301 part->blocks[i].state = BLOCK_ERASED;
302 part->blocks[i].free_sectors = part->data_sectors_per_block;
303 part->blocks[i].used_sectors = 0;
304 part->blocks[i].erases++;
306 rc = part->mbd.mtd->write(part->mbd.mtd,
307 part->blocks[i].offset, sizeof(magic), &retlen,
308 (u_char*)&magic);
310 if (!rc && retlen != sizeof(magic))
311 rc = -EIO;
313 if (rc) {
314 printk(KERN_ERR PREFIX "'%s': unable to write RFD "
315 "header at 0x%lx\n",
316 part->mbd.mtd->name,
317 part->blocks[i].offset);
318 part->blocks[i].state = BLOCK_FAILED;
320 else
321 part->blocks[i].state = BLOCK_OK;
323 kfree(erase);
326 static int erase_block(struct partition *part, int block)
328 struct erase_info *erase;
329 int rc = -ENOMEM;
331 erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
332 if (!erase)
333 goto err;
335 erase->mtd = part->mbd.mtd;
336 erase->callback = erase_callback;
337 erase->addr = part->blocks[block].offset;
338 erase->len = part->block_size;
339 erase->priv = (u_long)part;
341 part->blocks[block].state = BLOCK_ERASING;
342 part->blocks[block].free_sectors = 0;
344 rc = part->mbd.mtd->erase(part->mbd.mtd, erase);
346 if (rc) {
347 printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
348 "failed\n", (unsigned long long)erase->addr,
349 (unsigned long long)erase->len, part->mbd.mtd->name);
350 kfree(erase);
353 err:
354 return rc;
357 static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
359 void *sector_data;
360 u16 *map;
361 size_t retlen;
362 int i, rc = -ENOMEM;
364 part->is_reclaiming = 1;
366 sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
367 if (!sector_data)
368 goto err3;
370 map = kmalloc(part->header_size, GFP_KERNEL);
371 if (!map)
372 goto err2;
374 rc = part->mbd.mtd->read(part->mbd.mtd,
375 part->blocks[block_no].offset, part->header_size,
376 &retlen, (u_char*)map);
378 if (!rc && retlen != part->header_size)
379 rc = -EIO;
381 if (rc) {
382 printk(KERN_ERR PREFIX "error reading '%s' at "
383 "0x%lx\n", part->mbd.mtd->name,
384 part->blocks[block_no].offset);
386 goto err;
389 for (i=0; i<part->data_sectors_per_block; i++) {
390 u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
391 u_long addr;
394 if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
395 continue;
397 if (entry == SECTOR_ZERO)
398 entry = 0;
400 /* already warned about and ignored in build_block_map() */
401 if (entry >= part->sector_count)
402 continue;
404 addr = part->blocks[block_no].offset +
405 (i + part->header_sectors_per_block) * SECTOR_SIZE;
407 if (*old_sector == addr) {
408 *old_sector = -1;
409 if (!part->blocks[block_no].used_sectors--) {
410 rc = erase_block(part, block_no);
411 break;
413 continue;
415 rc = part->mbd.mtd->read(part->mbd.mtd, addr,
416 SECTOR_SIZE, &retlen, sector_data);
418 if (!rc && retlen != SECTOR_SIZE)
419 rc = -EIO;
421 if (rc) {
422 printk(KERN_ERR PREFIX "'%s': Unable to "
423 "read sector for relocation\n",
424 part->mbd.mtd->name);
426 goto err;
429 rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
430 entry, sector_data);
432 if (rc)
433 goto err;
436 err:
437 kfree(map);
438 err2:
439 kfree(sector_data);
440 err3:
441 part->is_reclaiming = 0;
443 return rc;
446 static int reclaim_block(struct partition *part, u_long *old_sector)
448 int block, best_block, score, old_sector_block;
449 int rc;
451 /* we have a race if sync doesn't exist */
452 if (part->mbd.mtd->sync)
453 part->mbd.mtd->sync(part->mbd.mtd);
455 score = 0x7fffffff; /* MAX_INT */
456 best_block = -1;
457 if (*old_sector != -1)
458 old_sector_block = *old_sector / part->block_size;
459 else
460 old_sector_block = -1;
462 for (block=0; block<part->total_blocks; block++) {
463 int this_score;
465 if (block == part->reserved_block)
466 continue;
469 * Postpone reclaiming if there is a free sector as
470 * more removed sectors is more efficient (have to move
471 * less).
473 if (part->blocks[block].free_sectors)
474 return 0;
476 this_score = part->blocks[block].used_sectors;
478 if (block == old_sector_block)
479 this_score--;
480 else {
481 /* no point in moving a full block */
482 if (part->blocks[block].used_sectors ==
483 part->data_sectors_per_block)
484 continue;
487 this_score += part->blocks[block].erases;
489 if (this_score < score) {
490 best_block = block;
491 score = this_score;
495 if (best_block == -1)
496 return -ENOSPC;
498 part->current_block = -1;
499 part->reserved_block = best_block;
501 pr_debug("reclaim_block: reclaiming block #%d with %d used "
502 "%d free sectors\n", best_block,
503 part->blocks[best_block].used_sectors,
504 part->blocks[best_block].free_sectors);
506 if (part->blocks[best_block].used_sectors)
507 rc = move_block_contents(part, best_block, old_sector);
508 else
509 rc = erase_block(part, best_block);
511 return rc;
515 * IMPROVE: It would be best to choose the block with the most deleted sectors,
516 * because if we fill that one up first it'll have the most chance of having
517 * the least live sectors at reclaim.
519 static int find_free_block(struct partition *part)
521 int block, stop;
523 block = part->current_block == -1 ?
524 jiffies % part->total_blocks : part->current_block;
525 stop = block;
527 do {
528 if (part->blocks[block].free_sectors &&
529 block != part->reserved_block)
530 return block;
532 if (part->blocks[block].state == BLOCK_UNUSED)
533 erase_block(part, block);
535 if (++block >= part->total_blocks)
536 block = 0;
538 } while (block != stop);
540 return -1;
543 static int find_writable_block(struct partition *part, u_long *old_sector)
545 int rc, block;
546 size_t retlen;
548 block = find_free_block(part);
550 if (block == -1) {
551 if (!part->is_reclaiming) {
552 rc = reclaim_block(part, old_sector);
553 if (rc)
554 goto err;
556 block = find_free_block(part);
559 if (block == -1) {
560 rc = -ENOSPC;
561 goto err;
565 rc = part->mbd.mtd->read(part->mbd.mtd, part->blocks[block].offset,
566 part->header_size, &retlen, (u_char*)part->header_cache);
568 if (!rc && retlen != part->header_size)
569 rc = -EIO;
571 if (rc) {
572 printk(KERN_ERR PREFIX "'%s': unable to read header at "
573 "0x%lx\n", part->mbd.mtd->name,
574 part->blocks[block].offset);
575 goto err;
578 part->current_block = block;
580 err:
581 return rc;
584 static int mark_sector_deleted(struct partition *part, u_long old_addr)
586 int block, offset, rc;
587 u_long addr;
588 size_t retlen;
589 u16 del = cpu_to_le16(SECTOR_DELETED);
591 block = old_addr / part->block_size;
592 offset = (old_addr % part->block_size) / SECTOR_SIZE -
593 part->header_sectors_per_block;
595 addr = part->blocks[block].offset +
596 (HEADER_MAP_OFFSET + offset) * sizeof(u16);
597 rc = part->mbd.mtd->write(part->mbd.mtd, addr,
598 sizeof(del), &retlen, (u_char*)&del);
600 if (!rc && retlen != sizeof(del))
601 rc = -EIO;
603 if (rc) {
604 printk(KERN_ERR PREFIX "error writing '%s' at "
605 "0x%lx\n", part->mbd.mtd->name, addr);
606 if (rc)
607 goto err;
609 if (block == part->current_block)
610 part->header_cache[offset + HEADER_MAP_OFFSET] = del;
612 part->blocks[block].used_sectors--;
614 if (!part->blocks[block].used_sectors &&
615 !part->blocks[block].free_sectors)
616 rc = erase_block(part, block);
618 err:
619 return rc;
622 static int find_free_sector(const struct partition *part, const struct block *block)
624 int i, stop;
626 i = stop = part->data_sectors_per_block - block->free_sectors;
628 do {
629 if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
630 == SECTOR_FREE)
631 return i;
633 if (++i == part->data_sectors_per_block)
634 i = 0;
636 while(i != stop);
638 return -1;
641 static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
643 struct partition *part = (struct partition*)dev;
644 struct block *block;
645 u_long addr;
646 int i;
647 int rc;
648 size_t retlen;
649 u16 entry;
651 if (part->current_block == -1 ||
652 !part->blocks[part->current_block].free_sectors) {
654 rc = find_writable_block(part, old_addr);
655 if (rc)
656 goto err;
659 block = &part->blocks[part->current_block];
661 i = find_free_sector(part, block);
663 if (i < 0) {
664 rc = -ENOSPC;
665 goto err;
668 addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
669 block->offset;
670 rc = part->mbd.mtd->write(part->mbd.mtd,
671 addr, SECTOR_SIZE, &retlen, (u_char*)buf);
673 if (!rc && retlen != SECTOR_SIZE)
674 rc = -EIO;
676 if (rc) {
677 printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
678 part->mbd.mtd->name, addr);
679 if (rc)
680 goto err;
683 part->sector_map[sector] = addr;
685 entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
687 part->header_cache[i + HEADER_MAP_OFFSET] = entry;
689 addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
690 rc = part->mbd.mtd->write(part->mbd.mtd, addr,
691 sizeof(entry), &retlen, (u_char*)&entry);
693 if (!rc && retlen != sizeof(entry))
694 rc = -EIO;
696 if (rc) {
697 printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
698 part->mbd.mtd->name, addr);
699 if (rc)
700 goto err;
702 block->used_sectors++;
703 block->free_sectors--;
705 err:
706 return rc;
709 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
711 struct partition *part = (struct partition*)dev;
712 u_long old_addr;
713 int i;
714 int rc = 0;
716 pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
718 if (part->reserved_block == -1) {
719 rc = -EACCES;
720 goto err;
723 if (sector >= part->sector_count) {
724 rc = -EIO;
725 goto err;
728 old_addr = part->sector_map[sector];
730 for (i=0; i<SECTOR_SIZE; i++) {
731 if (!buf[i])
732 continue;
734 rc = do_writesect(dev, sector, buf, &old_addr);
735 if (rc)
736 goto err;
737 break;
740 if (i == SECTOR_SIZE)
741 part->sector_map[sector] = -1;
743 if (old_addr != -1)
744 rc = mark_sector_deleted(part, old_addr);
746 err:
747 return rc;
750 static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
752 struct partition *part = (struct partition*)dev;
754 geo->heads = 1;
755 geo->sectors = SECTORS_PER_TRACK;
756 geo->cylinders = part->cylinders;
758 return 0;
761 static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
763 struct partition *part;
765 if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX)
766 return;
768 part = kzalloc(sizeof(struct partition), GFP_KERNEL);
769 if (!part)
770 return;
772 part->mbd.mtd = mtd;
774 if (block_size)
775 part->block_size = block_size;
776 else {
777 if (!mtd->erasesize) {
778 printk(KERN_WARNING PREFIX "please provide block_size");
779 goto out;
780 } else
781 part->block_size = mtd->erasesize;
784 if (scan_header(part) == 0) {
785 part->mbd.size = part->sector_count;
786 part->mbd.tr = tr;
787 part->mbd.devnum = -1;
788 if (!(mtd->flags & MTD_WRITEABLE))
789 part->mbd.readonly = 1;
790 else if (part->errors) {
791 printk(KERN_WARNING PREFIX "'%s': errors found, "
792 "setting read-only\n", mtd->name);
793 part->mbd.readonly = 1;
796 printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
797 mtd->name, mtd->type, mtd->flags);
799 if (!add_mtd_blktrans_dev((void*)part))
800 return;
802 out:
803 kfree(part);
806 static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
808 struct partition *part = (struct partition*)dev;
809 int i;
811 for (i=0; i<part->total_blocks; i++) {
812 pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
813 part->mbd.mtd->name, i, part->blocks[i].erases);
816 del_mtd_blktrans_dev(dev);
817 vfree(part->sector_map);
818 kfree(part->header_cache);
819 kfree(part->blocks);
822 static struct mtd_blktrans_ops rfd_ftl_tr = {
823 .name = "rfd",
824 .major = RFD_FTL_MAJOR,
825 .part_bits = PART_BITS,
826 .blksize = SECTOR_SIZE,
828 .readsect = rfd_ftl_readsect,
829 .writesect = rfd_ftl_writesect,
830 .getgeo = rfd_ftl_getgeo,
831 .add_mtd = rfd_ftl_add_mtd,
832 .remove_dev = rfd_ftl_remove_dev,
833 .owner = THIS_MODULE,
836 static int __init init_rfd_ftl(void)
838 return register_mtd_blktrans(&rfd_ftl_tr);
841 static void __exit cleanup_rfd_ftl(void)
843 deregister_mtd_blktrans(&rfd_ftl_tr);
846 module_init(init_rfd_ftl);
847 module_exit(cleanup_rfd_ftl);
849 MODULE_LICENSE("GPL");
850 MODULE_AUTHOR("Sean Young <sean@mess.org>");
851 MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
852 "used by General Software's Embedded BIOS");