Linux 2.6.20.7
[linux/fpc-iii.git] / drivers / mtd / devices / block2mtd.c
blob6d917a4daa9db24b4d9e7cb23281e1842cef9283
1 /*
2 * $Id: block2mtd.c,v 1.30 2005/11/29 14:48:32 gleixner Exp $
4 * block2mtd.c - create an mtd from a block device
6 * Copyright (C) 2001,2002 Simon Evans <spse@secret.org.uk>
7 * Copyright (C) 2004-2006 Jörn Engel <joern@wh.fh-wedel.de>
9 * Licence: GPL
11 #include <linux/module.h>
12 #include <linux/fs.h>
13 #include <linux/blkdev.h>
14 #include <linux/bio.h>
15 #include <linux/pagemap.h>
16 #include <linux/list.h>
17 #include <linux/init.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/buffer_head.h>
20 #include <linux/mutex.h>
21 #include <linux/mount.h>
23 #define VERSION "$Revision: 1.30 $"
26 #define ERROR(fmt, args...) printk(KERN_ERR "block2mtd: " fmt "\n" , ## args)
27 #define INFO(fmt, args...) printk(KERN_INFO "block2mtd: " fmt "\n" , ## args)
30 /* Info for the block device */
31 struct block2mtd_dev {
32 struct list_head list;
33 struct block_device *blkdev;
34 struct mtd_info mtd;
35 struct mutex write_mutex;
39 /* Static info about the MTD, used in cleanup_module */
40 static LIST_HEAD(blkmtd_device_list);
43 #define PAGE_READAHEAD 64
44 static void cache_readahead(struct address_space *mapping, int index)
46 filler_t *filler = (filler_t*)mapping->a_ops->readpage;
47 int i, pagei;
48 unsigned ret = 0;
49 unsigned long end_index;
50 struct page *page;
51 LIST_HEAD(page_pool);
52 struct inode *inode = mapping->host;
53 loff_t isize = i_size_read(inode);
55 if (!isize) {
56 INFO("iSize=0 in cache_readahead\n");
57 return;
60 end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
62 read_lock_irq(&mapping->tree_lock);
63 for (i = 0; i < PAGE_READAHEAD; i++) {
64 pagei = index + i;
65 if (pagei > end_index) {
66 INFO("Overrun end of disk in cache readahead\n");
67 break;
69 page = radix_tree_lookup(&mapping->page_tree, pagei);
70 if (page && (!i))
71 break;
72 if (page)
73 continue;
74 read_unlock_irq(&mapping->tree_lock);
75 page = page_cache_alloc_cold(mapping);
76 read_lock_irq(&mapping->tree_lock);
77 if (!page)
78 break;
79 page->index = pagei;
80 list_add(&page->lru, &page_pool);
81 ret++;
83 read_unlock_irq(&mapping->tree_lock);
84 if (ret)
85 read_cache_pages(mapping, &page_pool, filler, NULL);
89 static struct page* page_readahead(struct address_space *mapping, int index)
91 filler_t *filler = (filler_t*)mapping->a_ops->readpage;
92 cache_readahead(mapping, index);
93 return read_cache_page(mapping, index, filler, NULL);
97 /* erase a specified part of the device */
98 static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
100 struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
101 struct page *page;
102 int index = to >> PAGE_SHIFT; // page index
103 int pages = len >> PAGE_SHIFT;
104 u_long *p;
105 u_long *max;
107 while (pages) {
108 page = page_readahead(mapping, index);
109 if (!page)
110 return -ENOMEM;
111 if (IS_ERR(page))
112 return PTR_ERR(page);
114 max = (u_long*)page_address(page) + PAGE_SIZE;
115 for (p=(u_long*)page_address(page); p<max; p++)
116 if (*p != -1UL) {
117 lock_page(page);
118 memset(page_address(page), 0xff, PAGE_SIZE);
119 set_page_dirty(page);
120 unlock_page(page);
121 break;
124 page_cache_release(page);
125 pages--;
126 index++;
128 return 0;
130 static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
132 struct block2mtd_dev *dev = mtd->priv;
133 size_t from = instr->addr;
134 size_t len = instr->len;
135 int err;
137 instr->state = MTD_ERASING;
138 mutex_lock(&dev->write_mutex);
139 err = _block2mtd_erase(dev, from, len);
140 mutex_unlock(&dev->write_mutex);
141 if (err) {
142 ERROR("erase failed err = %d", err);
143 instr->state = MTD_ERASE_FAILED;
144 } else
145 instr->state = MTD_ERASE_DONE;
147 instr->state = MTD_ERASE_DONE;
148 mtd_erase_callback(instr);
149 return err;
153 static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
154 size_t *retlen, u_char *buf)
156 struct block2mtd_dev *dev = mtd->priv;
157 struct page *page;
158 int index = from >> PAGE_SHIFT;
159 int offset = from & (PAGE_SIZE-1);
160 int cpylen;
162 if (from > mtd->size)
163 return -EINVAL;
164 if (from + len > mtd->size)
165 len = mtd->size - from;
167 if (retlen)
168 *retlen = 0;
170 while (len) {
171 if ((offset + len) > PAGE_SIZE)
172 cpylen = PAGE_SIZE - offset; // multiple pages
173 else
174 cpylen = len; // this page
175 len = len - cpylen;
177 // Get page
178 page = page_readahead(dev->blkdev->bd_inode->i_mapping, index);
179 if (!page)
180 return -ENOMEM;
181 if (IS_ERR(page))
182 return PTR_ERR(page);
184 memcpy(buf, page_address(page) + offset, cpylen);
185 page_cache_release(page);
187 if (retlen)
188 *retlen += cpylen;
189 buf += cpylen;
190 offset = 0;
191 index++;
193 return 0;
197 /* write data to the underlying device */
198 static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
199 loff_t to, size_t len, size_t *retlen)
201 struct page *page;
202 struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
203 int index = to >> PAGE_SHIFT; // page index
204 int offset = to & ~PAGE_MASK; // page offset
205 int cpylen;
207 if (retlen)
208 *retlen = 0;
209 while (len) {
210 if ((offset+len) > PAGE_SIZE)
211 cpylen = PAGE_SIZE - offset; // multiple pages
212 else
213 cpylen = len; // this page
214 len = len - cpylen;
216 // Get page
217 page = page_readahead(mapping, index);
218 if (!page)
219 return -ENOMEM;
220 if (IS_ERR(page))
221 return PTR_ERR(page);
223 if (memcmp(page_address(page)+offset, buf, cpylen)) {
224 lock_page(page);
225 memcpy(page_address(page) + offset, buf, cpylen);
226 set_page_dirty(page);
227 unlock_page(page);
229 page_cache_release(page);
231 if (retlen)
232 *retlen += cpylen;
234 buf += cpylen;
235 offset = 0;
236 index++;
238 return 0;
242 static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
243 size_t *retlen, const u_char *buf)
245 struct block2mtd_dev *dev = mtd->priv;
246 int err;
248 if (!len)
249 return 0;
250 if (to >= mtd->size)
251 return -ENOSPC;
252 if (to + len > mtd->size)
253 len = mtd->size - to;
255 mutex_lock(&dev->write_mutex);
256 err = _block2mtd_write(dev, buf, to, len, retlen);
257 mutex_unlock(&dev->write_mutex);
258 if (err > 0)
259 err = 0;
260 return err;
264 /* sync the device - wait until the write queue is empty */
265 static void block2mtd_sync(struct mtd_info *mtd)
267 struct block2mtd_dev *dev = mtd->priv;
268 sync_blockdev(dev->blkdev);
269 return;
273 static void block2mtd_free_device(struct block2mtd_dev *dev)
275 if (!dev)
276 return;
278 kfree(dev->mtd.name);
280 if (dev->blkdev) {
281 invalidate_inode_pages(dev->blkdev->bd_inode->i_mapping);
282 close_bdev_excl(dev->blkdev);
285 kfree(dev);
289 /* FIXME: ensure that mtd->size % erase_size == 0 */
290 static struct block2mtd_dev *add_device(char *devname, int erase_size)
292 struct block_device *bdev;
293 struct block2mtd_dev *dev;
295 if (!devname)
296 return NULL;
298 dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
299 if (!dev)
300 return NULL;
302 /* Get a handle on the device */
303 bdev = open_bdev_excl(devname, O_RDWR, NULL);
304 #ifndef MODULE
305 if (IS_ERR(bdev)) {
307 /* We might not have rootfs mounted at this point. Try
308 to resolve the device name by other means. */
310 dev_t dev = name_to_dev_t(devname);
311 if (dev != 0) {
312 bdev = open_by_devnum(dev, FMODE_WRITE | FMODE_READ);
315 #endif
317 if (IS_ERR(bdev)) {
318 ERROR("error: cannot open device %s", devname);
319 goto devinit_err;
321 dev->blkdev = bdev;
323 if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
324 ERROR("attempting to use an MTD device as a block device");
325 goto devinit_err;
328 mutex_init(&dev->write_mutex);
330 /* Setup the MTD structure */
331 /* make the name contain the block device in */
332 dev->mtd.name = kmalloc(sizeof("block2mtd: ") + strlen(devname),
333 GFP_KERNEL);
334 if (!dev->mtd.name)
335 goto devinit_err;
337 sprintf(dev->mtd.name, "block2mtd: %s", devname);
339 dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
340 dev->mtd.erasesize = erase_size;
341 dev->mtd.writesize = 1;
342 dev->mtd.type = MTD_RAM;
343 dev->mtd.flags = MTD_CAP_RAM;
344 dev->mtd.erase = block2mtd_erase;
345 dev->mtd.write = block2mtd_write;
346 dev->mtd.writev = default_mtd_writev;
347 dev->mtd.sync = block2mtd_sync;
348 dev->mtd.read = block2mtd_read;
349 dev->mtd.priv = dev;
350 dev->mtd.owner = THIS_MODULE;
352 if (add_mtd_device(&dev->mtd)) {
353 /* Device didnt get added, so free the entry */
354 goto devinit_err;
356 list_add(&dev->list, &blkmtd_device_list);
357 INFO("mtd%d: [%s] erase_size = %dKiB [%d]", dev->mtd.index,
358 dev->mtd.name + strlen("blkmtd: "),
359 dev->mtd.erasesize >> 10, dev->mtd.erasesize);
360 return dev;
362 devinit_err:
363 block2mtd_free_device(dev);
364 return NULL;
368 /* This function works similar to reguler strtoul. In addition, it
369 * allows some suffixes for a more human-readable number format:
370 * ki, Ki, kiB, KiB - multiply result with 1024
371 * Mi, MiB - multiply result with 1024^2
372 * Gi, GiB - multiply result with 1024^3
374 static int ustrtoul(const char *cp, char **endp, unsigned int base)
376 unsigned long result = simple_strtoul(cp, endp, base);
377 switch (**endp) {
378 case 'G' :
379 result *= 1024;
380 case 'M':
381 result *= 1024;
382 case 'K':
383 case 'k':
384 result *= 1024;
385 /* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
386 if ((*endp)[1] == 'i') {
387 if ((*endp)[2] == 'B')
388 (*endp) += 3;
389 else
390 (*endp) += 2;
393 return result;
397 static int parse_num(size_t *num, const char *token)
399 char *endp;
400 size_t n;
402 n = (size_t) ustrtoul(token, &endp, 0);
403 if (*endp)
404 return -EINVAL;
406 *num = n;
407 return 0;
411 static inline void kill_final_newline(char *str)
413 char *newline = strrchr(str, '\n');
414 if (newline && !newline[1])
415 *newline = 0;
419 #define parse_err(fmt, args...) do { \
420 ERROR("block2mtd: " fmt "\n", ## args); \
421 return 0; \
422 } while (0)
424 #ifndef MODULE
425 static int block2mtd_init_called = 0;
426 static __initdata char block2mtd_paramline[80 + 12]; /* 80 for device, 12 for erase size */
427 #endif
430 static int block2mtd_setup2(const char *val)
432 char buf[80 + 12]; /* 80 for device, 12 for erase size */
433 char *str = buf;
434 char *token[2];
435 char *name;
436 size_t erase_size = PAGE_SIZE;
437 int i, ret;
439 if (strnlen(val, sizeof(buf)) >= sizeof(buf))
440 parse_err("parameter too long");
442 strcpy(str, val);
443 kill_final_newline(str);
445 for (i = 0; i < 2; i++)
446 token[i] = strsep(&str, ",");
448 if (str)
449 parse_err("too many arguments");
451 if (!token[0])
452 parse_err("no argument");
454 name = token[0];
455 if (strlen(name) + 1 > 80)
456 parse_err("device name too long");
458 if (token[1]) {
459 ret = parse_num(&erase_size, token[1]);
460 if (ret) {
461 kfree(name);
462 parse_err("illegal erase size");
466 add_device(name, erase_size);
468 return 0;
472 static int block2mtd_setup(const char *val, struct kernel_param *kp)
474 #ifdef MODULE
475 return block2mtd_setup2(val);
476 #else
477 /* If more parameters are later passed in via
478 /sys/module/block2mtd/parameters/block2mtd
479 and block2mtd_init() has already been called,
480 we can parse the argument now. */
482 if (block2mtd_init_called)
483 return block2mtd_setup2(val);
485 /* During early boot stage, we only save the parameters
486 here. We must parse them later: if the param passed
487 from kernel boot command line, block2mtd_setup() is
488 called so early that it is not possible to resolve
489 the device (even kmalloc() fails). Deter that work to
490 block2mtd_setup2(). */
492 strlcpy(block2mtd_paramline, val, sizeof(block2mtd_paramline));
494 return 0;
495 #endif
499 module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
500 MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
502 static int __init block2mtd_init(void)
504 int ret = 0;
505 INFO("version " VERSION);
507 #ifndef MODULE
508 if (strlen(block2mtd_paramline))
509 ret = block2mtd_setup2(block2mtd_paramline);
510 block2mtd_init_called = 1;
511 #endif
513 return ret;
517 static void __devexit block2mtd_exit(void)
519 struct list_head *pos, *next;
521 /* Remove the MTD devices */
522 list_for_each_safe(pos, next, &blkmtd_device_list) {
523 struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
524 block2mtd_sync(&dev->mtd);
525 del_mtd_device(&dev->mtd);
526 INFO("mtd%d: [%s] removed", dev->mtd.index,
527 dev->mtd.name + strlen("blkmtd: "));
528 list_del(&dev->list);
529 block2mtd_free_device(dev);
534 module_init(block2mtd_init);
535 module_exit(block2mtd_exit);
537 MODULE_LICENSE("GPL");
538 MODULE_AUTHOR("Simon Evans <spse@secret.org.uk> and others");
539 MODULE_DESCRIPTION("Emulate an MTD using a block device");