2 * Interface to Linux block layer for MTD 'translation layers'.
4 * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 #include <linux/kernel.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/list.h>
27 #include <linux/mtd/blktrans.h>
28 #include <linux/mtd/mtd.h>
29 #include <linux/blkdev.h>
30 #include <linux/blkpg.h>
31 #include <linux/spinlock.h>
32 #include <linux/hdreg.h>
33 #include <linux/mutex.h>
34 #include <linux/uaccess.h>
38 static LIST_HEAD(blktrans_majors
);
39 static DEFINE_MUTEX(blktrans_ref_mutex
);
41 static void blktrans_dev_release(struct kref
*kref
)
43 struct mtd_blktrans_dev
*dev
=
44 container_of(kref
, struct mtd_blktrans_dev
, ref
);
46 dev
->disk
->private_data
= NULL
;
47 blk_cleanup_queue(dev
->rq
);
53 static struct mtd_blktrans_dev
*blktrans_dev_get(struct gendisk
*disk
)
55 struct mtd_blktrans_dev
*dev
;
57 mutex_lock(&blktrans_ref_mutex
);
58 dev
= disk
->private_data
;
64 mutex_unlock(&blktrans_ref_mutex
);
68 static void blktrans_dev_put(struct mtd_blktrans_dev
*dev
)
70 mutex_lock(&blktrans_ref_mutex
);
71 kref_put(&dev
->ref
, blktrans_dev_release
);
72 mutex_unlock(&blktrans_ref_mutex
);
76 static blk_status_t
do_blktrans_request(struct mtd_blktrans_ops
*tr
,
77 struct mtd_blktrans_dev
*dev
,
80 unsigned long block
, nsect
;
83 block
= blk_rq_pos(req
) << 9 >> tr
->blkshift
;
84 nsect
= blk_rq_cur_bytes(req
) >> tr
->blkshift
;
85 buf
= bio_data(req
->bio
);
87 if (req_op(req
) == REQ_OP_FLUSH
) {
93 if (blk_rq_pos(req
) + blk_rq_cur_sectors(req
) >
94 get_capacity(req
->rq_disk
))
97 switch (req_op(req
)) {
99 if (tr
->discard(dev
, block
, nsect
))
100 return BLK_STS_IOERR
;
103 for (; nsect
> 0; nsect
--, block
++, buf
+= tr
->blksize
)
104 if (tr
->readsect(dev
, block
, buf
))
105 return BLK_STS_IOERR
;
106 rq_flush_dcache_pages(req
);
110 return BLK_STS_IOERR
;
112 rq_flush_dcache_pages(req
);
113 for (; nsect
> 0; nsect
--, block
++, buf
+= tr
->blksize
)
114 if (tr
->writesect(dev
, block
, buf
))
115 return BLK_STS_IOERR
;
118 return BLK_STS_IOERR
;
122 int mtd_blktrans_cease_background(struct mtd_blktrans_dev
*dev
)
126 EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background
);
128 static void mtd_blktrans_work(struct work_struct
*work
)
130 struct mtd_blktrans_dev
*dev
=
131 container_of(work
, struct mtd_blktrans_dev
, work
);
132 struct mtd_blktrans_ops
*tr
= dev
->tr
;
133 struct request_queue
*rq
= dev
->rq
;
134 struct request
*req
= NULL
;
135 int background_done
= 0;
137 spin_lock_irq(rq
->queue_lock
);
142 dev
->bg_stop
= false;
143 if (!req
&& !(req
= blk_fetch_request(rq
))) {
144 if (tr
->background
&& !background_done
) {
145 spin_unlock_irq(rq
->queue_lock
);
146 mutex_lock(&dev
->lock
);
148 mutex_unlock(&dev
->lock
);
149 spin_lock_irq(rq
->queue_lock
);
151 * Do background processing just once per idle
154 background_done
= !dev
->bg_stop
;
160 spin_unlock_irq(rq
->queue_lock
);
162 mutex_lock(&dev
->lock
);
163 res
= do_blktrans_request(dev
->tr
, dev
, req
);
164 mutex_unlock(&dev
->lock
);
166 spin_lock_irq(rq
->queue_lock
);
168 if (!__blk_end_request_cur(req
, res
))
174 spin_unlock_irq(rq
->queue_lock
);
177 static void mtd_blktrans_request(struct request_queue
*rq
)
179 struct mtd_blktrans_dev
*dev
;
180 struct request
*req
= NULL
;
185 while ((req
= blk_fetch_request(rq
)) != NULL
)
186 __blk_end_request_all(req
, BLK_STS_IOERR
);
188 queue_work(dev
->wq
, &dev
->work
);
191 static int blktrans_open(struct block_device
*bdev
, fmode_t mode
)
193 struct mtd_blktrans_dev
*dev
= blktrans_dev_get(bdev
->bd_disk
);
197 return -ERESTARTSYS
; /* FIXME: busy loop! -arnd*/
199 mutex_lock(&mtd_table_mutex
);
200 mutex_lock(&dev
->lock
);
206 __module_get(dev
->tr
->owner
);
212 ret
= dev
->tr
->open(dev
);
217 ret
= __get_mtd_device(dev
->mtd
);
220 dev
->file_mode
= mode
;
224 mutex_unlock(&dev
->lock
);
225 mutex_unlock(&mtd_table_mutex
);
226 blktrans_dev_put(dev
);
230 if (dev
->tr
->release
)
231 dev
->tr
->release(dev
);
233 module_put(dev
->tr
->owner
);
234 kref_put(&dev
->ref
, blktrans_dev_release
);
235 mutex_unlock(&dev
->lock
);
236 mutex_unlock(&mtd_table_mutex
);
237 blktrans_dev_put(dev
);
241 static void blktrans_release(struct gendisk
*disk
, fmode_t mode
)
243 struct mtd_blktrans_dev
*dev
= blktrans_dev_get(disk
);
248 mutex_lock(&mtd_table_mutex
);
249 mutex_lock(&dev
->lock
);
254 kref_put(&dev
->ref
, blktrans_dev_release
);
255 module_put(dev
->tr
->owner
);
258 if (dev
->tr
->release
)
259 dev
->tr
->release(dev
);
260 __put_mtd_device(dev
->mtd
);
263 mutex_unlock(&dev
->lock
);
264 mutex_unlock(&mtd_table_mutex
);
265 blktrans_dev_put(dev
);
268 static int blktrans_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
270 struct mtd_blktrans_dev
*dev
= blktrans_dev_get(bdev
->bd_disk
);
276 mutex_lock(&dev
->lock
);
281 ret
= dev
->tr
->getgeo
? dev
->tr
->getgeo(dev
, geo
) : -ENOTTY
;
283 mutex_unlock(&dev
->lock
);
284 blktrans_dev_put(dev
);
288 static int blktrans_ioctl(struct block_device
*bdev
, fmode_t mode
,
289 unsigned int cmd
, unsigned long arg
)
291 struct mtd_blktrans_dev
*dev
= blktrans_dev_get(bdev
->bd_disk
);
297 mutex_lock(&dev
->lock
);
304 ret
= dev
->tr
->flush
? dev
->tr
->flush(dev
) : 0;
310 mutex_unlock(&dev
->lock
);
311 blktrans_dev_put(dev
);
315 static const struct block_device_operations mtd_block_ops
= {
316 .owner
= THIS_MODULE
,
317 .open
= blktrans_open
,
318 .release
= blktrans_release
,
319 .ioctl
= blktrans_ioctl
,
320 .getgeo
= blktrans_getgeo
,
323 int add_mtd_blktrans_dev(struct mtd_blktrans_dev
*new)
325 struct mtd_blktrans_ops
*tr
= new->tr
;
326 struct mtd_blktrans_dev
*d
;
327 int last_devnum
= -1;
331 if (mutex_trylock(&mtd_table_mutex
)) {
332 mutex_unlock(&mtd_table_mutex
);
336 mutex_lock(&blktrans_ref_mutex
);
337 list_for_each_entry(d
, &tr
->devs
, list
) {
338 if (new->devnum
== -1) {
339 /* Use first free number */
340 if (d
->devnum
!= last_devnum
+1) {
341 /* Found a free devnum. Plug it in here */
342 new->devnum
= last_devnum
+1;
343 list_add_tail(&new->list
, &d
->list
);
346 } else if (d
->devnum
== new->devnum
) {
347 /* Required number taken */
348 mutex_unlock(&blktrans_ref_mutex
);
350 } else if (d
->devnum
> new->devnum
) {
351 /* Required number was free */
352 list_add_tail(&new->list
, &d
->list
);
355 last_devnum
= d
->devnum
;
359 if (new->devnum
== -1)
360 new->devnum
= last_devnum
+1;
362 /* Check that the device and any partitions will get valid
363 * minor numbers and that the disk naming code below can cope
364 * with this number. */
365 if (new->devnum
> (MINORMASK
>> tr
->part_bits
) ||
366 (tr
->part_bits
&& new->devnum
>= 27 * 26)) {
367 mutex_unlock(&blktrans_ref_mutex
);
371 list_add_tail(&new->list
, &tr
->devs
);
373 mutex_unlock(&blktrans_ref_mutex
);
375 mutex_init(&new->lock
);
376 kref_init(&new->ref
);
382 gd
= alloc_disk(1 << tr
->part_bits
);
388 gd
->private_data
= new;
389 gd
->major
= tr
->major
;
390 gd
->first_minor
= (new->devnum
) << tr
->part_bits
;
391 gd
->fops
= &mtd_block_ops
;
394 if (new->devnum
< 26)
395 snprintf(gd
->disk_name
, sizeof(gd
->disk_name
),
396 "%s%c", tr
->name
, 'a' + new->devnum
);
398 snprintf(gd
->disk_name
, sizeof(gd
->disk_name
),
400 'a' - 1 + new->devnum
/ 26,
401 'a' + new->devnum
% 26);
403 snprintf(gd
->disk_name
, sizeof(gd
->disk_name
),
404 "%s%d", tr
->name
, new->devnum
);
406 set_capacity(gd
, ((u64
)new->size
* tr
->blksize
) >> 9);
408 /* Create the request queue */
409 spin_lock_init(&new->queue_lock
);
410 new->rq
= blk_init_queue(mtd_blktrans_request
, &new->queue_lock
);
416 blk_queue_write_cache(new->rq
, true, false);
418 new->rq
->queuedata
= new;
419 blk_queue_logical_block_size(new->rq
, tr
->blksize
);
421 blk_queue_bounce_limit(new->rq
, BLK_BOUNCE_HIGH
);
422 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, new->rq
);
423 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM
, new->rq
);
426 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, new->rq
);
427 blk_queue_max_discard_sectors(new->rq
, UINT_MAX
);
432 /* Create processing workqueue */
433 new->wq
= alloc_workqueue("%s%d", 0, 0,
434 tr
->name
, new->mtd
->index
);
437 INIT_WORK(&new->work
, mtd_blktrans_work
);
442 device_add_disk(&new->mtd
->dev
, gd
);
444 if (new->disk_attributes
) {
445 ret
= sysfs_create_group(&disk_to_dev(gd
)->kobj
,
446 new->disk_attributes
);
451 blk_cleanup_queue(new->rq
);
455 list_del(&new->list
);
460 int del_mtd_blktrans_dev(struct mtd_blktrans_dev
*old
)
464 if (mutex_trylock(&mtd_table_mutex
)) {
465 mutex_unlock(&mtd_table_mutex
);
469 if (old
->disk_attributes
)
470 sysfs_remove_group(&disk_to_dev(old
->disk
)->kobj
,
471 old
->disk_attributes
);
473 /* Stop new requests to arrive */
474 del_gendisk(old
->disk
);
476 /* Stop workqueue. This will perform any pending request. */
477 destroy_workqueue(old
->wq
);
479 /* Kill current requests */
480 spin_lock_irqsave(&old
->queue_lock
, flags
);
481 old
->rq
->queuedata
= NULL
;
482 blk_start_queue(old
->rq
);
483 spin_unlock_irqrestore(&old
->queue_lock
, flags
);
485 /* If the device is currently open, tell trans driver to close it,
486 then put mtd device, and don't touch it again */
487 mutex_lock(&old
->lock
);
489 if (old
->tr
->release
)
490 old
->tr
->release(old
);
491 __put_mtd_device(old
->mtd
);
496 mutex_unlock(&old
->lock
);
497 blktrans_dev_put(old
);
501 static void blktrans_notify_remove(struct mtd_info
*mtd
)
503 struct mtd_blktrans_ops
*tr
;
504 struct mtd_blktrans_dev
*dev
, *next
;
506 list_for_each_entry(tr
, &blktrans_majors
, list
)
507 list_for_each_entry_safe(dev
, next
, &tr
->devs
, list
)
512 static void blktrans_notify_add(struct mtd_info
*mtd
)
514 struct mtd_blktrans_ops
*tr
;
516 if (mtd
->type
== MTD_ABSENT
)
519 list_for_each_entry(tr
, &blktrans_majors
, list
)
520 tr
->add_mtd(tr
, mtd
);
523 static struct mtd_notifier blktrans_notifier
= {
524 .add
= blktrans_notify_add
,
525 .remove
= blktrans_notify_remove
,
528 int register_mtd_blktrans(struct mtd_blktrans_ops
*tr
)
530 struct mtd_info
*mtd
;
533 /* Register the notifier if/when the first device type is
534 registered, to prevent the link/init ordering from fucking
536 if (!blktrans_notifier
.list
.next
)
537 register_mtd_user(&blktrans_notifier
);
540 mutex_lock(&mtd_table_mutex
);
542 ret
= register_blkdev(tr
->major
, tr
->name
);
544 printk(KERN_WARNING
"Unable to register %s block device on major %d: %d\n",
545 tr
->name
, tr
->major
, ret
);
546 mutex_unlock(&mtd_table_mutex
);
553 tr
->blkshift
= ffs(tr
->blksize
) - 1;
555 INIT_LIST_HEAD(&tr
->devs
);
556 list_add(&tr
->list
, &blktrans_majors
);
558 mtd_for_each_device(mtd
)
559 if (mtd
->type
!= MTD_ABSENT
)
560 tr
->add_mtd(tr
, mtd
);
562 mutex_unlock(&mtd_table_mutex
);
566 int deregister_mtd_blktrans(struct mtd_blktrans_ops
*tr
)
568 struct mtd_blktrans_dev
*dev
, *next
;
570 mutex_lock(&mtd_table_mutex
);
572 /* Remove it from the list of active majors */
575 list_for_each_entry_safe(dev
, next
, &tr
->devs
, list
)
578 unregister_blkdev(tr
->major
, tr
->name
);
579 mutex_unlock(&mtd_table_mutex
);
581 BUG_ON(!list_empty(&tr
->devs
));
585 static void __exit
mtd_blktrans_exit(void)
587 /* No race here -- if someone's currently in register_mtd_blktrans
588 we're screwed anyway. */
589 if (blktrans_notifier
.list
.next
)
590 unregister_mtd_user(&blktrans_notifier
);
593 module_exit(mtd_blktrans_exit
);
595 EXPORT_SYMBOL_GPL(register_mtd_blktrans
);
596 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans
);
597 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev
);
598 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev
);
600 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
601 MODULE_LICENSE("GPL");
602 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");