2 * Interface to Linux block layer for MTD 'translation layers'.
4 * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 #include <linux/kernel.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/list.h>
27 #include <linux/mtd/blktrans.h>
28 #include <linux/mtd/mtd.h>
29 #include <linux/blkdev.h>
30 #include <linux/blkpg.h>
31 #include <linux/spinlock.h>
32 #include <linux/hdreg.h>
33 #include <linux/mutex.h>
34 #include <linux/uaccess.h>
38 static LIST_HEAD(blktrans_majors
);
39 static DEFINE_MUTEX(blktrans_ref_mutex
);
41 static void blktrans_dev_release(struct kref
*kref
)
43 struct mtd_blktrans_dev
*dev
=
44 container_of(kref
, struct mtd_blktrans_dev
, ref
);
46 dev
->disk
->private_data
= NULL
;
47 blk_cleanup_queue(dev
->rq
);
53 static struct mtd_blktrans_dev
*blktrans_dev_get(struct gendisk
*disk
)
55 struct mtd_blktrans_dev
*dev
;
57 mutex_lock(&blktrans_ref_mutex
);
58 dev
= disk
->private_data
;
64 mutex_unlock(&blktrans_ref_mutex
);
68 static void blktrans_dev_put(struct mtd_blktrans_dev
*dev
)
70 mutex_lock(&blktrans_ref_mutex
);
71 kref_put(&dev
->ref
, blktrans_dev_release
);
72 mutex_unlock(&blktrans_ref_mutex
);
76 static blk_status_t
do_blktrans_request(struct mtd_blktrans_ops
*tr
,
77 struct mtd_blktrans_dev
*dev
,
80 unsigned long block
, nsect
;
83 block
= blk_rq_pos(req
) << 9 >> tr
->blkshift
;
84 nsect
= blk_rq_cur_bytes(req
) >> tr
->blkshift
;
86 if (req_op(req
) == REQ_OP_FLUSH
) {
92 if (blk_rq_pos(req
) + blk_rq_cur_sectors(req
) >
93 get_capacity(req
->rq_disk
))
96 switch (req_op(req
)) {
98 if (tr
->discard(dev
, block
, nsect
))
102 buf
= kmap(bio_page(req
->bio
)) + bio_offset(req
->bio
);
103 for (; nsect
> 0; nsect
--, block
++, buf
+= tr
->blksize
) {
104 if (tr
->readsect(dev
, block
, buf
)) {
105 kunmap(bio_page(req
->bio
));
106 return BLK_STS_IOERR
;
109 kunmap(bio_page(req
->bio
));
110 rq_flush_dcache_pages(req
);
114 return BLK_STS_IOERR
;
116 rq_flush_dcache_pages(req
);
117 buf
= kmap(bio_page(req
->bio
)) + bio_offset(req
->bio
);
118 for (; nsect
> 0; nsect
--, block
++, buf
+= tr
->blksize
) {
119 if (tr
->writesect(dev
, block
, buf
)) {
120 kunmap(bio_page(req
->bio
));
121 return BLK_STS_IOERR
;
124 kunmap(bio_page(req
->bio
));
127 return BLK_STS_IOERR
;
131 int mtd_blktrans_cease_background(struct mtd_blktrans_dev
*dev
)
135 EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background
);
137 static void mtd_blktrans_work(struct work_struct
*work
)
139 struct mtd_blktrans_dev
*dev
=
140 container_of(work
, struct mtd_blktrans_dev
, work
);
141 struct mtd_blktrans_ops
*tr
= dev
->tr
;
142 struct request_queue
*rq
= dev
->rq
;
143 struct request
*req
= NULL
;
144 int background_done
= 0;
146 spin_lock_irq(rq
->queue_lock
);
151 dev
->bg_stop
= false;
152 if (!req
&& !(req
= blk_fetch_request(rq
))) {
153 if (tr
->background
&& !background_done
) {
154 spin_unlock_irq(rq
->queue_lock
);
155 mutex_lock(&dev
->lock
);
157 mutex_unlock(&dev
->lock
);
158 spin_lock_irq(rq
->queue_lock
);
160 * Do background processing just once per idle
163 background_done
= !dev
->bg_stop
;
169 spin_unlock_irq(rq
->queue_lock
);
171 mutex_lock(&dev
->lock
);
172 res
= do_blktrans_request(dev
->tr
, dev
, req
);
173 mutex_unlock(&dev
->lock
);
175 spin_lock_irq(rq
->queue_lock
);
177 if (!__blk_end_request_cur(req
, res
))
183 spin_unlock_irq(rq
->queue_lock
);
186 static void mtd_blktrans_request(struct request_queue
*rq
)
188 struct mtd_blktrans_dev
*dev
;
189 struct request
*req
= NULL
;
194 while ((req
= blk_fetch_request(rq
)) != NULL
)
195 __blk_end_request_all(req
, BLK_STS_IOERR
);
197 queue_work(dev
->wq
, &dev
->work
);
200 static int blktrans_open(struct block_device
*bdev
, fmode_t mode
)
202 struct mtd_blktrans_dev
*dev
= blktrans_dev_get(bdev
->bd_disk
);
206 return -ERESTARTSYS
; /* FIXME: busy loop! -arnd*/
208 mutex_lock(&mtd_table_mutex
);
209 mutex_lock(&dev
->lock
);
215 __module_get(dev
->tr
->owner
);
221 ret
= dev
->tr
->open(dev
);
226 ret
= __get_mtd_device(dev
->mtd
);
229 dev
->file_mode
= mode
;
233 mutex_unlock(&dev
->lock
);
234 mutex_unlock(&mtd_table_mutex
);
235 blktrans_dev_put(dev
);
239 if (dev
->tr
->release
)
240 dev
->tr
->release(dev
);
242 module_put(dev
->tr
->owner
);
243 kref_put(&dev
->ref
, blktrans_dev_release
);
244 mutex_unlock(&dev
->lock
);
245 mutex_unlock(&mtd_table_mutex
);
246 blktrans_dev_put(dev
);
250 static void blktrans_release(struct gendisk
*disk
, fmode_t mode
)
252 struct mtd_blktrans_dev
*dev
= blktrans_dev_get(disk
);
257 mutex_lock(&mtd_table_mutex
);
258 mutex_lock(&dev
->lock
);
263 kref_put(&dev
->ref
, blktrans_dev_release
);
264 module_put(dev
->tr
->owner
);
267 if (dev
->tr
->release
)
268 dev
->tr
->release(dev
);
269 __put_mtd_device(dev
->mtd
);
272 mutex_unlock(&dev
->lock
);
273 mutex_unlock(&mtd_table_mutex
);
274 blktrans_dev_put(dev
);
277 static int blktrans_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
279 struct mtd_blktrans_dev
*dev
= blktrans_dev_get(bdev
->bd_disk
);
285 mutex_lock(&dev
->lock
);
290 ret
= dev
->tr
->getgeo
? dev
->tr
->getgeo(dev
, geo
) : -ENOTTY
;
292 mutex_unlock(&dev
->lock
);
293 blktrans_dev_put(dev
);
297 static int blktrans_ioctl(struct block_device
*bdev
, fmode_t mode
,
298 unsigned int cmd
, unsigned long arg
)
300 struct mtd_blktrans_dev
*dev
= blktrans_dev_get(bdev
->bd_disk
);
306 mutex_lock(&dev
->lock
);
313 ret
= dev
->tr
->flush
? dev
->tr
->flush(dev
) : 0;
319 mutex_unlock(&dev
->lock
);
320 blktrans_dev_put(dev
);
324 static const struct block_device_operations mtd_block_ops
= {
325 .owner
= THIS_MODULE
,
326 .open
= blktrans_open
,
327 .release
= blktrans_release
,
328 .ioctl
= blktrans_ioctl
,
329 .getgeo
= blktrans_getgeo
,
332 int add_mtd_blktrans_dev(struct mtd_blktrans_dev
*new)
334 struct mtd_blktrans_ops
*tr
= new->tr
;
335 struct mtd_blktrans_dev
*d
;
336 int last_devnum
= -1;
340 if (mutex_trylock(&mtd_table_mutex
)) {
341 mutex_unlock(&mtd_table_mutex
);
345 mutex_lock(&blktrans_ref_mutex
);
346 list_for_each_entry(d
, &tr
->devs
, list
) {
347 if (new->devnum
== -1) {
348 /* Use first free number */
349 if (d
->devnum
!= last_devnum
+1) {
350 /* Found a free devnum. Plug it in here */
351 new->devnum
= last_devnum
+1;
352 list_add_tail(&new->list
, &d
->list
);
355 } else if (d
->devnum
== new->devnum
) {
356 /* Required number taken */
357 mutex_unlock(&blktrans_ref_mutex
);
359 } else if (d
->devnum
> new->devnum
) {
360 /* Required number was free */
361 list_add_tail(&new->list
, &d
->list
);
364 last_devnum
= d
->devnum
;
368 if (new->devnum
== -1)
369 new->devnum
= last_devnum
+1;
371 /* Check that the device and any partitions will get valid
372 * minor numbers and that the disk naming code below can cope
373 * with this number. */
374 if (new->devnum
> (MINORMASK
>> tr
->part_bits
) ||
375 (tr
->part_bits
&& new->devnum
>= 27 * 26)) {
376 mutex_unlock(&blktrans_ref_mutex
);
380 list_add_tail(&new->list
, &tr
->devs
);
382 mutex_unlock(&blktrans_ref_mutex
);
384 mutex_init(&new->lock
);
385 kref_init(&new->ref
);
391 gd
= alloc_disk(1 << tr
->part_bits
);
397 gd
->private_data
= new;
398 gd
->major
= tr
->major
;
399 gd
->first_minor
= (new->devnum
) << tr
->part_bits
;
400 gd
->fops
= &mtd_block_ops
;
403 if (new->devnum
< 26)
404 snprintf(gd
->disk_name
, sizeof(gd
->disk_name
),
405 "%s%c", tr
->name
, 'a' + new->devnum
);
407 snprintf(gd
->disk_name
, sizeof(gd
->disk_name
),
409 'a' - 1 + new->devnum
/ 26,
410 'a' + new->devnum
% 26);
412 snprintf(gd
->disk_name
, sizeof(gd
->disk_name
),
413 "%s%d", tr
->name
, new->devnum
);
415 set_capacity(gd
, ((u64
)new->size
* tr
->blksize
) >> 9);
417 /* Create the request queue */
418 spin_lock_init(&new->queue_lock
);
419 new->rq
= blk_init_queue(mtd_blktrans_request
, &new->queue_lock
);
425 blk_queue_write_cache(new->rq
, true, false);
427 new->rq
->queuedata
= new;
428 blk_queue_logical_block_size(new->rq
, tr
->blksize
);
430 blk_queue_flag_set(QUEUE_FLAG_NONROT
, new->rq
);
431 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM
, new->rq
);
434 blk_queue_flag_set(QUEUE_FLAG_DISCARD
, new->rq
);
435 blk_queue_max_discard_sectors(new->rq
, UINT_MAX
);
440 /* Create processing workqueue */
441 new->wq
= alloc_workqueue("%s%d", 0, 0,
442 tr
->name
, new->mtd
->index
);
445 INIT_WORK(&new->work
, mtd_blktrans_work
);
450 device_add_disk(&new->mtd
->dev
, gd
);
452 if (new->disk_attributes
) {
453 ret
= sysfs_create_group(&disk_to_dev(gd
)->kobj
,
454 new->disk_attributes
);
459 blk_cleanup_queue(new->rq
);
463 list_del(&new->list
);
468 int del_mtd_blktrans_dev(struct mtd_blktrans_dev
*old
)
472 if (mutex_trylock(&mtd_table_mutex
)) {
473 mutex_unlock(&mtd_table_mutex
);
477 if (old
->disk_attributes
)
478 sysfs_remove_group(&disk_to_dev(old
->disk
)->kobj
,
479 old
->disk_attributes
);
481 /* Stop new requests to arrive */
482 del_gendisk(old
->disk
);
484 /* Stop workqueue. This will perform any pending request. */
485 destroy_workqueue(old
->wq
);
487 /* Kill current requests */
488 spin_lock_irqsave(&old
->queue_lock
, flags
);
489 old
->rq
->queuedata
= NULL
;
490 blk_start_queue(old
->rq
);
491 spin_unlock_irqrestore(&old
->queue_lock
, flags
);
493 /* If the device is currently open, tell trans driver to close it,
494 then put mtd device, and don't touch it again */
495 mutex_lock(&old
->lock
);
497 if (old
->tr
->release
)
498 old
->tr
->release(old
);
499 __put_mtd_device(old
->mtd
);
504 mutex_unlock(&old
->lock
);
505 blktrans_dev_put(old
);
509 static void blktrans_notify_remove(struct mtd_info
*mtd
)
511 struct mtd_blktrans_ops
*tr
;
512 struct mtd_blktrans_dev
*dev
, *next
;
514 list_for_each_entry(tr
, &blktrans_majors
, list
)
515 list_for_each_entry_safe(dev
, next
, &tr
->devs
, list
)
520 static void blktrans_notify_add(struct mtd_info
*mtd
)
522 struct mtd_blktrans_ops
*tr
;
524 if (mtd
->type
== MTD_ABSENT
)
527 list_for_each_entry(tr
, &blktrans_majors
, list
)
528 tr
->add_mtd(tr
, mtd
);
531 static struct mtd_notifier blktrans_notifier
= {
532 .add
= blktrans_notify_add
,
533 .remove
= blktrans_notify_remove
,
536 int register_mtd_blktrans(struct mtd_blktrans_ops
*tr
)
538 struct mtd_info
*mtd
;
541 /* Register the notifier if/when the first device type is
542 registered, to prevent the link/init ordering from fucking
544 if (!blktrans_notifier
.list
.next
)
545 register_mtd_user(&blktrans_notifier
);
548 mutex_lock(&mtd_table_mutex
);
550 ret
= register_blkdev(tr
->major
, tr
->name
);
552 printk(KERN_WARNING
"Unable to register %s block device on major %d: %d\n",
553 tr
->name
, tr
->major
, ret
);
554 mutex_unlock(&mtd_table_mutex
);
561 tr
->blkshift
= ffs(tr
->blksize
) - 1;
563 INIT_LIST_HEAD(&tr
->devs
);
564 list_add(&tr
->list
, &blktrans_majors
);
566 mtd_for_each_device(mtd
)
567 if (mtd
->type
!= MTD_ABSENT
)
568 tr
->add_mtd(tr
, mtd
);
570 mutex_unlock(&mtd_table_mutex
);
574 int deregister_mtd_blktrans(struct mtd_blktrans_ops
*tr
)
576 struct mtd_blktrans_dev
*dev
, *next
;
578 mutex_lock(&mtd_table_mutex
);
580 /* Remove it from the list of active majors */
583 list_for_each_entry_safe(dev
, next
, &tr
->devs
, list
)
586 unregister_blkdev(tr
->major
, tr
->name
);
587 mutex_unlock(&mtd_table_mutex
);
589 BUG_ON(!list_empty(&tr
->devs
));
593 static void __exit
mtd_blktrans_exit(void)
595 /* No race here -- if someone's currently in register_mtd_blktrans
596 we're screwed anyway. */
597 if (blktrans_notifier
.list
.next
)
598 unregister_mtd_user(&blktrans_notifier
);
601 module_exit(mtd_blktrans_exit
);
603 EXPORT_SYMBOL_GPL(register_mtd_blktrans
);
604 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans
);
605 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev
);
606 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev
);
608 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
609 MODULE_LICENSE("GPL");
610 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");