2 * Block driver for media (i.e., flash cards)
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
17 * Author: Andrew Christian
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
24 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h>
34 #include <linux/delay.h>
35 #include <linux/capability.h>
36 #include <linux/compat.h>
38 #include <linux/mmc/ioctl.h>
39 #include <linux/mmc/card.h>
40 #include <linux/mmc/host.h>
41 #include <linux/mmc/mmc.h>
42 #include <linux/mmc/sd.h>
44 #include <asm/system.h>
45 #include <asm/uaccess.h>
49 MODULE_ALIAS("mmc:block");
50 #ifdef MODULE_PARAM_PREFIX
51 #undef MODULE_PARAM_PREFIX
53 #define MODULE_PARAM_PREFIX "mmcblk."
55 #define INAND_CMD38_ARG_EXT_CSD 113
56 #define INAND_CMD38_ARG_ERASE 0x00
57 #define INAND_CMD38_ARG_TRIM 0x01
58 #define INAND_CMD38_ARG_SECERASE 0x80
59 #define INAND_CMD38_ARG_SECTRIM1 0x81
60 #define INAND_CMD38_ARG_SECTRIM2 0x88
62 static DEFINE_MUTEX(block_mutex
);
65 * The defaults come from config options but can be overriden by module
68 static int perdev_minors
= CONFIG_MMC_BLOCK_MINORS
;
71 * We've only got one major, so number of mmcblk devices is
72 * limited to 256 / number of minors per device.
74 static int max_devices
;
76 /* 256 minors, so at most 256 separate devices */
77 static DECLARE_BITMAP(dev_use
, 256);
78 static DECLARE_BITMAP(name_use
, 256);
81 * There is one mmc_blk_data per slot.
86 struct mmc_queue queue
;
87 struct list_head part
;
90 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
91 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
94 unsigned int read_only
;
95 unsigned int part_type
;
96 unsigned int name_idx
;
99 * Only set in main mmc_blk_data associated
100 * with mmc_card with mmc_set_drvdata, and keeps
101 * track of the current selected device partition.
103 unsigned int part_curr
;
104 struct device_attribute force_ro
;
107 static DEFINE_MUTEX(open_lock
);
109 module_param(perdev_minors
, int, 0444);
110 MODULE_PARM_DESC(perdev_minors
, "Minors numbers to allocate per device");
112 static struct mmc_blk_data
*mmc_blk_get(struct gendisk
*disk
)
114 struct mmc_blk_data
*md
;
116 mutex_lock(&open_lock
);
117 md
= disk
->private_data
;
118 if (md
&& md
->usage
== 0)
122 mutex_unlock(&open_lock
);
127 static inline int mmc_get_devidx(struct gendisk
*disk
)
129 int devmaj
= MAJOR(disk_devt(disk
));
130 int devidx
= MINOR(disk_devt(disk
)) / perdev_minors
;
133 devidx
= disk
->first_minor
/ perdev_minors
;
137 static void mmc_blk_put(struct mmc_blk_data
*md
)
139 mutex_lock(&open_lock
);
141 if (md
->usage
== 0) {
142 int devidx
= mmc_get_devidx(md
->disk
);
143 blk_cleanup_queue(md
->queue
.queue
);
145 __clear_bit(devidx
, dev_use
);
150 mutex_unlock(&open_lock
);
153 static ssize_t
force_ro_show(struct device
*dev
, struct device_attribute
*attr
,
157 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
159 ret
= snprintf(buf
, PAGE_SIZE
, "%d",
160 get_disk_ro(dev_to_disk(dev
)) ^
166 static ssize_t
force_ro_store(struct device
*dev
, struct device_attribute
*attr
,
167 const char *buf
, size_t count
)
171 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
172 unsigned long set
= simple_strtoul(buf
, &end
, 0);
178 set_disk_ro(dev_to_disk(dev
), set
|| md
->read_only
);
185 static int mmc_blk_open(struct block_device
*bdev
, fmode_t mode
)
187 struct mmc_blk_data
*md
= mmc_blk_get(bdev
->bd_disk
);
190 mutex_lock(&block_mutex
);
193 check_disk_change(bdev
);
196 if ((mode
& FMODE_WRITE
) && md
->read_only
) {
201 mutex_unlock(&block_mutex
);
206 static int mmc_blk_release(struct gendisk
*disk
, fmode_t mode
)
208 struct mmc_blk_data
*md
= disk
->private_data
;
210 mutex_lock(&block_mutex
);
212 mutex_unlock(&block_mutex
);
217 mmc_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
219 geo
->cylinders
= get_capacity(bdev
->bd_disk
) / (4 * 16);
225 struct mmc_blk_ioc_data
{
226 struct mmc_ioc_cmd ic
;
231 static struct mmc_blk_ioc_data
*mmc_blk_ioctl_copy_from_user(
232 struct mmc_ioc_cmd __user
*user
)
234 struct mmc_blk_ioc_data
*idata
;
237 idata
= kzalloc(sizeof(*idata
), GFP_KERNEL
);
243 if (copy_from_user(&idata
->ic
, user
, sizeof(idata
->ic
))) {
248 idata
->buf_bytes
= (u64
) idata
->ic
.blksz
* idata
->ic
.blocks
;
249 if (idata
->buf_bytes
> MMC_IOC_MAX_BYTES
) {
254 idata
->buf
= kzalloc(idata
->buf_bytes
, GFP_KERNEL
);
260 if (copy_from_user(idata
->buf
, (void __user
*)(unsigned long)
261 idata
->ic
.data_ptr
, idata
->buf_bytes
)) {
276 static int mmc_blk_ioctl_cmd(struct block_device
*bdev
,
277 struct mmc_ioc_cmd __user
*ic_ptr
)
279 struct mmc_blk_ioc_data
*idata
;
280 struct mmc_blk_data
*md
;
281 struct mmc_card
*card
;
282 struct mmc_command cmd
= {0};
283 struct mmc_data data
= {0};
284 struct mmc_request mrq
= {0};
285 struct scatterlist sg
;
289 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
290 * whole block device, not on a partition. This prevents overspray
291 * between sibling partitions.
293 if ((!capable(CAP_SYS_RAWIO
)) || (bdev
!= bdev
->bd_contains
))
296 idata
= mmc_blk_ioctl_copy_from_user(ic_ptr
);
298 return PTR_ERR(idata
);
300 cmd
.opcode
= idata
->ic
.opcode
;
301 cmd
.arg
= idata
->ic
.arg
;
302 cmd
.flags
= idata
->ic
.flags
;
306 data
.blksz
= idata
->ic
.blksz
;
307 data
.blocks
= idata
->ic
.blocks
;
309 sg_init_one(data
.sg
, idata
->buf
, idata
->buf_bytes
);
311 if (idata
->ic
.write_flag
)
312 data
.flags
= MMC_DATA_WRITE
;
314 data
.flags
= MMC_DATA_READ
;
319 md
= mmc_blk_get(bdev
->bd_disk
);
325 card
= md
->queue
.card
;
331 mmc_claim_host(card
->host
);
333 if (idata
->ic
.is_acmd
) {
334 err
= mmc_app_cmd(card
->host
, card
);
339 /* data.flags must already be set before doing this. */
340 mmc_set_data_timeout(&data
, card
);
341 /* Allow overriding the timeout_ns for empirical tuning. */
342 if (idata
->ic
.data_timeout_ns
)
343 data
.timeout_ns
= idata
->ic
.data_timeout_ns
;
345 if ((cmd
.flags
& MMC_RSP_R1B
) == MMC_RSP_R1B
) {
347 * Pretend this is a data transfer and rely on the host driver
348 * to compute timeout. When all host drivers support
349 * cmd.cmd_timeout for R1B, this can be changed to:
352 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
354 data
.timeout_ns
= idata
->ic
.cmd_timeout_ms
* 1000000;
357 mmc_wait_for_req(card
->host
, &mrq
);
360 dev_err(mmc_dev(card
->host
), "%s: cmd error %d\n",
361 __func__
, cmd
.error
);
366 dev_err(mmc_dev(card
->host
), "%s: data error %d\n",
367 __func__
, data
.error
);
373 * According to the SD specs, some commands require a delay after
374 * issuing the command.
376 if (idata
->ic
.postsleep_min_us
)
377 usleep_range(idata
->ic
.postsleep_min_us
, idata
->ic
.postsleep_max_us
);
379 if (copy_to_user(&(ic_ptr
->response
), cmd
.resp
, sizeof(cmd
.resp
))) {
384 if (!idata
->ic
.write_flag
) {
385 if (copy_to_user((void __user
*)(unsigned long) idata
->ic
.data_ptr
,
386 idata
->buf
, idata
->buf_bytes
)) {
393 mmc_release_host(card
->host
);
402 static int mmc_blk_ioctl(struct block_device
*bdev
, fmode_t mode
,
403 unsigned int cmd
, unsigned long arg
)
406 if (cmd
== MMC_IOC_CMD
)
407 ret
= mmc_blk_ioctl_cmd(bdev
, (struct mmc_ioc_cmd __user
*)arg
);
412 static int mmc_blk_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
413 unsigned int cmd
, unsigned long arg
)
415 return mmc_blk_ioctl(bdev
, mode
, cmd
, (unsigned long) compat_ptr(arg
));
419 static const struct block_device_operations mmc_bdops
= {
420 .open
= mmc_blk_open
,
421 .release
= mmc_blk_release
,
422 .getgeo
= mmc_blk_getgeo
,
423 .owner
= THIS_MODULE
,
424 .ioctl
= mmc_blk_ioctl
,
426 .compat_ioctl
= mmc_blk_compat_ioctl
,
430 struct mmc_blk_request
{
431 struct mmc_request mrq
;
432 struct mmc_command sbc
;
433 struct mmc_command cmd
;
434 struct mmc_command stop
;
435 struct mmc_data data
;
438 static inline int mmc_blk_part_switch(struct mmc_card
*card
,
439 struct mmc_blk_data
*md
)
442 struct mmc_blk_data
*main_md
= mmc_get_drvdata(card
);
443 if (main_md
->part_curr
== md
->part_type
)
446 if (mmc_card_mmc(card
)) {
447 card
->ext_csd
.part_config
&= ~EXT_CSD_PART_CONFIG_ACC_MASK
;
448 card
->ext_csd
.part_config
|= md
->part_type
;
450 ret
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
451 EXT_CSD_PART_CONFIG
, card
->ext_csd
.part_config
,
452 card
->ext_csd
.part_time
);
457 main_md
->part_curr
= md
->part_type
;
461 static u32
mmc_sd_num_wr_blocks(struct mmc_card
*card
)
467 struct mmc_request mrq
= {0};
468 struct mmc_command cmd
= {0};
469 struct mmc_data data
= {0};
470 unsigned int timeout_us
;
472 struct scatterlist sg
;
474 cmd
.opcode
= MMC_APP_CMD
;
475 cmd
.arg
= card
->rca
<< 16;
476 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
478 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
481 if (!mmc_host_is_spi(card
->host
) && !(cmd
.resp
[0] & R1_APP_CMD
))
484 memset(&cmd
, 0, sizeof(struct mmc_command
));
486 cmd
.opcode
= SD_APP_SEND_NUM_WR_BLKS
;
488 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
490 data
.timeout_ns
= card
->csd
.tacc_ns
* 100;
491 data
.timeout_clks
= card
->csd
.tacc_clks
* 100;
493 timeout_us
= data
.timeout_ns
/ 1000;
494 timeout_us
+= data
.timeout_clks
* 1000 /
495 (card
->host
->ios
.clock
/ 1000);
497 if (timeout_us
> 100000) {
498 data
.timeout_ns
= 100000000;
499 data
.timeout_clks
= 0;
504 data
.flags
= MMC_DATA_READ
;
511 blocks
= kmalloc(4, GFP_KERNEL
);
515 sg_init_one(&sg
, blocks
, 4);
517 mmc_wait_for_req(card
->host
, &mrq
);
519 result
= ntohl(*blocks
);
522 if (cmd
.error
|| data
.error
)
528 static u32
get_card_status(struct mmc_card
*card
, struct request
*req
)
530 struct mmc_command cmd
= {0};
533 cmd
.opcode
= MMC_SEND_STATUS
;
534 if (!mmc_host_is_spi(card
->host
))
535 cmd
.arg
= card
->rca
<< 16;
536 cmd
.flags
= MMC_RSP_SPI_R2
| MMC_RSP_R1
| MMC_CMD_AC
;
537 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
539 printk(KERN_ERR
"%s: error %d sending status command",
540 req
->rq_disk
->disk_name
, err
);
544 static int mmc_blk_issue_discard_rq(struct mmc_queue
*mq
, struct request
*req
)
546 struct mmc_blk_data
*md
= mq
->data
;
547 struct mmc_card
*card
= md
->queue
.card
;
548 unsigned int from
, nr
, arg
;
551 if (!mmc_can_erase(card
)) {
556 from
= blk_rq_pos(req
);
557 nr
= blk_rq_sectors(req
);
559 if (mmc_can_trim(card
))
564 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
565 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
566 INAND_CMD38_ARG_EXT_CSD
,
567 arg
== MMC_TRIM_ARG
?
568 INAND_CMD38_ARG_TRIM
:
569 INAND_CMD38_ARG_ERASE
,
574 err
= mmc_erase(card
, from
, nr
, arg
);
576 spin_lock_irq(&md
->lock
);
577 __blk_end_request(req
, err
, blk_rq_bytes(req
));
578 spin_unlock_irq(&md
->lock
);
583 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue
*mq
,
586 struct mmc_blk_data
*md
= mq
->data
;
587 struct mmc_card
*card
= md
->queue
.card
;
588 unsigned int from
, nr
, arg
;
591 if (!mmc_can_secure_erase_trim(card
)) {
596 from
= blk_rq_pos(req
);
597 nr
= blk_rq_sectors(req
);
599 if (mmc_can_trim(card
) && !mmc_erase_group_aligned(card
, from
, nr
))
600 arg
= MMC_SECURE_TRIM1_ARG
;
602 arg
= MMC_SECURE_ERASE_ARG
;
604 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
605 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
606 INAND_CMD38_ARG_EXT_CSD
,
607 arg
== MMC_SECURE_TRIM1_ARG
?
608 INAND_CMD38_ARG_SECTRIM1
:
609 INAND_CMD38_ARG_SECERASE
,
614 err
= mmc_erase(card
, from
, nr
, arg
);
615 if (!err
&& arg
== MMC_SECURE_TRIM1_ARG
) {
616 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
617 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
618 INAND_CMD38_ARG_EXT_CSD
,
619 INAND_CMD38_ARG_SECTRIM2
,
624 err
= mmc_erase(card
, from
, nr
, MMC_SECURE_TRIM2_ARG
);
627 spin_lock_irq(&md
->lock
);
628 __blk_end_request(req
, err
, blk_rq_bytes(req
));
629 spin_unlock_irq(&md
->lock
);
634 static int mmc_blk_issue_flush(struct mmc_queue
*mq
, struct request
*req
)
636 struct mmc_blk_data
*md
= mq
->data
;
639 * No-op, only service this because we need REQ_FUA for reliable
642 spin_lock_irq(&md
->lock
);
643 __blk_end_request_all(req
, 0);
644 spin_unlock_irq(&md
->lock
);
650 * Reformat current write as a reliable write, supporting
651 * both legacy and the enhanced reliable write MMC cards.
652 * In each transfer we'll handle only as much as a single
653 * reliable write can handle, thus finish the request in
654 * partial completions.
656 static inline void mmc_apply_rel_rw(struct mmc_blk_request
*brq
,
657 struct mmc_card
*card
,
660 if (!(card
->ext_csd
.rel_param
& EXT_CSD_WR_REL_PARAM_EN
)) {
661 /* Legacy mode imposes restrictions on transfers. */
662 if (!IS_ALIGNED(brq
->cmd
.arg
, card
->ext_csd
.rel_sectors
))
663 brq
->data
.blocks
= 1;
665 if (brq
->data
.blocks
> card
->ext_csd
.rel_sectors
)
666 brq
->data
.blocks
= card
->ext_csd
.rel_sectors
;
667 else if (brq
->data
.blocks
< card
->ext_csd
.rel_sectors
)
668 brq
->data
.blocks
= 1;
672 static int mmc_blk_issue_rw_rq(struct mmc_queue
*mq
, struct request
*req
)
674 struct mmc_blk_data
*md
= mq
->data
;
675 struct mmc_card
*card
= md
->queue
.card
;
676 struct mmc_blk_request brq
;
677 int ret
= 1, disable_multi
= 0;
680 * Reliable writes are used to implement Forced Unit Access and
681 * REQ_META accesses, and are supported only on MMCs.
683 bool do_rel_wr
= ((req
->cmd_flags
& REQ_FUA
) ||
684 (req
->cmd_flags
& REQ_META
)) &&
685 (rq_data_dir(req
) == WRITE
) &&
686 (md
->flags
& MMC_BLK_REL_WR
);
689 struct mmc_command cmd
= {0};
690 u32 readcmd
, writecmd
, status
= 0;
692 memset(&brq
, 0, sizeof(struct mmc_blk_request
));
693 brq
.mrq
.cmd
= &brq
.cmd
;
694 brq
.mrq
.data
= &brq
.data
;
696 brq
.cmd
.arg
= blk_rq_pos(req
);
697 if (!mmc_card_blockaddr(card
))
699 brq
.cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
700 brq
.data
.blksz
= 512;
701 brq
.stop
.opcode
= MMC_STOP_TRANSMISSION
;
703 brq
.stop
.flags
= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
| MMC_CMD_AC
;
704 brq
.data
.blocks
= blk_rq_sectors(req
);
707 * The block layer doesn't support all sector count
708 * restrictions, so we need to be prepared for too big
711 if (brq
.data
.blocks
> card
->host
->max_blk_count
)
712 brq
.data
.blocks
= card
->host
->max_blk_count
;
715 * After a read error, we redo the request one sector at a time
716 * in order to accurately determine which sectors can be read
719 if (disable_multi
&& brq
.data
.blocks
> 1)
722 if (brq
.data
.blocks
> 1 || do_rel_wr
) {
723 /* SPI multiblock writes terminate using a special
724 * token, not a STOP_TRANSMISSION request.
726 if (!mmc_host_is_spi(card
->host
) ||
727 rq_data_dir(req
) == READ
)
728 brq
.mrq
.stop
= &brq
.stop
;
729 readcmd
= MMC_READ_MULTIPLE_BLOCK
;
730 writecmd
= MMC_WRITE_MULTIPLE_BLOCK
;
733 readcmd
= MMC_READ_SINGLE_BLOCK
;
734 writecmd
= MMC_WRITE_BLOCK
;
736 if (rq_data_dir(req
) == READ
) {
737 brq
.cmd
.opcode
= readcmd
;
738 brq
.data
.flags
|= MMC_DATA_READ
;
740 brq
.cmd
.opcode
= writecmd
;
741 brq
.data
.flags
|= MMC_DATA_WRITE
;
745 mmc_apply_rel_rw(&brq
, card
, req
);
748 * Pre-defined multi-block transfers are preferable to
749 * open ended-ones (and necessary for reliable writes).
750 * However, it is not sufficient to just send CMD23,
751 * and avoid the final CMD12, as on an error condition
752 * CMD12 (stop) needs to be sent anyway. This, coupled
753 * with Auto-CMD23 enhancements provided by some
754 * hosts, means that the complexity of dealing
755 * with this is best left to the host. If CMD23 is
756 * supported by card and host, we'll fill sbc in and let
757 * the host deal with handling it correctly. This means
758 * that for hosts that don't expose MMC_CAP_CMD23, no
759 * change of behavior will be observed.
761 * N.B: Some MMC cards experience perf degradation.
762 * We'll avoid using CMD23-bounded multiblock writes for
763 * these, while retaining features like reliable writes.
766 if ((md
->flags
& MMC_BLK_CMD23
) &&
767 mmc_op_multi(brq
.cmd
.opcode
) &&
768 (do_rel_wr
|| !(card
->quirks
& MMC_QUIRK_BLK_NO_CMD23
))) {
769 brq
.sbc
.opcode
= MMC_SET_BLOCK_COUNT
;
770 brq
.sbc
.arg
= brq
.data
.blocks
|
771 (do_rel_wr
? (1 << 31) : 0);
772 brq
.sbc
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
773 brq
.mrq
.sbc
= &brq
.sbc
;
776 mmc_set_data_timeout(&brq
.data
, card
);
778 brq
.data
.sg
= mq
->sg
;
779 brq
.data
.sg_len
= mmc_queue_map_sg(mq
);
782 * Adjust the sg list so it is the same size as the
785 if (brq
.data
.blocks
!= blk_rq_sectors(req
)) {
786 int i
, data_size
= brq
.data
.blocks
<< 9;
787 struct scatterlist
*sg
;
789 for_each_sg(brq
.data
.sg
, sg
, brq
.data
.sg_len
, i
) {
790 data_size
-= sg
->length
;
791 if (data_size
<= 0) {
792 sg
->length
+= data_size
;
800 mmc_queue_bounce_pre(mq
);
802 mmc_wait_for_req(card
->host
, &brq
.mrq
);
804 mmc_queue_bounce_post(mq
);
807 * Check for errors here, but don't jump to cmd_err
808 * until later as we need to wait for the card to leave
809 * programming mode even when things go wrong.
811 if (brq
.sbc
.error
|| brq
.cmd
.error
||
812 brq
.data
.error
|| brq
.stop
.error
) {
813 if (brq
.data
.blocks
> 1 && rq_data_dir(req
) == READ
) {
814 /* Redo read one sector at a time */
815 printk(KERN_WARNING
"%s: retrying using single "
816 "block read\n", req
->rq_disk
->disk_name
);
820 status
= get_card_status(card
, req
);
824 printk(KERN_ERR
"%s: error %d sending SET_BLOCK_COUNT "
825 "command, response %#x, card status %#x\n",
826 req
->rq_disk
->disk_name
, brq
.sbc
.error
,
827 brq
.sbc
.resp
[0], status
);
831 printk(KERN_ERR
"%s: error %d sending read/write "
832 "command, response %#x, card status %#x\n",
833 req
->rq_disk
->disk_name
, brq
.cmd
.error
,
834 brq
.cmd
.resp
[0], status
);
837 if (brq
.data
.error
) {
838 if (brq
.data
.error
== -ETIMEDOUT
&& brq
.mrq
.stop
)
839 /* 'Stop' response contains card status */
840 status
= brq
.mrq
.stop
->resp
[0];
841 printk(KERN_ERR
"%s: error %d transferring data,"
842 " sector %u, nr %u, card status %#x\n",
843 req
->rq_disk
->disk_name
, brq
.data
.error
,
844 (unsigned)blk_rq_pos(req
),
845 (unsigned)blk_rq_sectors(req
), status
);
848 if (brq
.stop
.error
) {
849 printk(KERN_ERR
"%s: error %d sending stop command, "
850 "response %#x, card status %#x\n",
851 req
->rq_disk
->disk_name
, brq
.stop
.error
,
852 brq
.stop
.resp
[0], status
);
855 if (!mmc_host_is_spi(card
->host
) && rq_data_dir(req
) != READ
) {
859 cmd
.opcode
= MMC_SEND_STATUS
;
860 cmd
.arg
= card
->rca
<< 16;
861 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
862 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 5);
864 printk(KERN_ERR
"%s: error %d requesting status\n",
865 req
->rq_disk
->disk_name
, err
);
869 * Some cards mishandle the status bits,
870 * so make sure to check both the busy
871 * indication and the card state.
873 } while (!(cmd
.resp
[0] & R1_READY_FOR_DATA
) ||
874 (R1_CURRENT_STATE(cmd
.resp
[0]) == 7));
877 if (cmd
.resp
[0] & ~0x00000900)
878 printk(KERN_ERR
"%s: status = %08x\n",
879 req
->rq_disk
->disk_name
, cmd
.resp
[0]);
880 if (mmc_decode_status(cmd
.resp
))
885 if (brq
.cmd
.error
|| brq
.stop
.error
|| brq
.data
.error
) {
886 if (rq_data_dir(req
) == READ
) {
888 * After an error, we redo I/O one sector at a
889 * time, so we only reach here after trying to
890 * read a single sector.
892 spin_lock_irq(&md
->lock
);
893 ret
= __blk_end_request(req
, -EIO
, brq
.data
.blksz
);
894 spin_unlock_irq(&md
->lock
);
901 * A block was successfully transferred.
903 spin_lock_irq(&md
->lock
);
904 ret
= __blk_end_request(req
, 0, brq
.data
.bytes_xfered
);
905 spin_unlock_irq(&md
->lock
);
912 * If this is an SD card and we're writing, we can first
913 * mark the known good sectors as ok.
915 * If the card is not SD, we can still ok written sectors
916 * as reported by the controller (which might be less than
917 * the real number of written sectors, but never more).
919 if (mmc_card_sd(card
)) {
922 blocks
= mmc_sd_num_wr_blocks(card
);
923 if (blocks
!= (u32
)-1) {
924 spin_lock_irq(&md
->lock
);
925 ret
= __blk_end_request(req
, 0, blocks
<< 9);
926 spin_unlock_irq(&md
->lock
);
929 spin_lock_irq(&md
->lock
);
930 ret
= __blk_end_request(req
, 0, brq
.data
.bytes_xfered
);
931 spin_unlock_irq(&md
->lock
);
934 spin_lock_irq(&md
->lock
);
936 ret
= __blk_end_request(req
, -EIO
, blk_rq_cur_bytes(req
));
937 spin_unlock_irq(&md
->lock
);
942 static int mmc_blk_issue_rq(struct mmc_queue
*mq
, struct request
*req
)
945 struct mmc_blk_data
*md
= mq
->data
;
946 struct mmc_card
*card
= md
->queue
.card
;
948 mmc_claim_host(card
->host
);
949 ret
= mmc_blk_part_switch(card
, md
);
955 if (req
->cmd_flags
& REQ_DISCARD
) {
956 if (req
->cmd_flags
& REQ_SECURE
)
957 ret
= mmc_blk_issue_secdiscard_rq(mq
, req
);
959 ret
= mmc_blk_issue_discard_rq(mq
, req
);
960 } else if (req
->cmd_flags
& REQ_FLUSH
) {
961 ret
= mmc_blk_issue_flush(mq
, req
);
963 ret
= mmc_blk_issue_rw_rq(mq
, req
);
967 mmc_release_host(card
->host
);
971 static inline int mmc_blk_readonly(struct mmc_card
*card
)
973 return mmc_card_readonly(card
) ||
974 !(card
->csd
.cmdclass
& CCC_BLOCK_WRITE
);
977 static struct mmc_blk_data
*mmc_blk_alloc_req(struct mmc_card
*card
,
978 struct device
*parent
,
983 struct mmc_blk_data
*md
;
986 devidx
= find_first_zero_bit(dev_use
, max_devices
);
987 if (devidx
>= max_devices
)
988 return ERR_PTR(-ENOSPC
);
989 __set_bit(devidx
, dev_use
);
991 md
= kzalloc(sizeof(struct mmc_blk_data
), GFP_KERNEL
);
998 * !subname implies we are creating main mmc_blk_data that will be
999 * associated with mmc_card with mmc_set_drvdata. Due to device
1000 * partitions, devidx will not coincide with a per-physical card
1001 * index anymore so we keep track of a name index.
1004 md
->name_idx
= find_first_zero_bit(name_use
, max_devices
);
1005 __set_bit(md
->name_idx
, name_use
);
1008 md
->name_idx
= ((struct mmc_blk_data
*)
1009 dev_to_disk(parent
)->private_data
)->name_idx
;
1012 * Set the read-only status based on the supported commands
1013 * and the write protect switch.
1015 md
->read_only
= mmc_blk_readonly(card
);
1017 md
->disk
= alloc_disk(perdev_minors
);
1018 if (md
->disk
== NULL
) {
1023 spin_lock_init(&md
->lock
);
1024 INIT_LIST_HEAD(&md
->part
);
1027 ret
= mmc_init_queue(&md
->queue
, card
, &md
->lock
, subname
);
1031 md
->queue
.issue_fn
= mmc_blk_issue_rq
;
1032 md
->queue
.data
= md
;
1034 md
->disk
->major
= MMC_BLOCK_MAJOR
;
1035 md
->disk
->first_minor
= devidx
* perdev_minors
;
1036 md
->disk
->fops
= &mmc_bdops
;
1037 md
->disk
->private_data
= md
;
1038 md
->disk
->queue
= md
->queue
.queue
;
1039 md
->disk
->driverfs_dev
= parent
;
1040 set_disk_ro(md
->disk
, md
->read_only
|| default_ro
);
1043 * As discussed on lkml, GENHD_FL_REMOVABLE should:
1045 * - be set for removable media with permanent block devices
1046 * - be unset for removable block devices with permanent media
1048 * Since MMC block devices clearly fall under the second
1049 * case, we do not set GENHD_FL_REMOVABLE. Userspace
1050 * should use the block device creation/destruction hotplug
1051 * messages to tell when the card is present.
1054 snprintf(md
->disk
->disk_name
, sizeof(md
->disk
->disk_name
),
1055 "mmcblk%d%s", md
->name_idx
, subname
? subname
: "");
1057 blk_queue_logical_block_size(md
->queue
.queue
, 512);
1058 set_capacity(md
->disk
, size
);
1060 if (mmc_host_cmd23(card
->host
)) {
1061 if (mmc_card_mmc(card
) ||
1062 (mmc_card_sd(card
) &&
1063 card
->scr
.cmds
& SD_SCR_CMD23_SUPPORT
))
1064 md
->flags
|= MMC_BLK_CMD23
;
1067 if (mmc_card_mmc(card
) &&
1068 md
->flags
& MMC_BLK_CMD23
&&
1069 ((card
->ext_csd
.rel_param
& EXT_CSD_WR_REL_PARAM_EN
) ||
1070 card
->ext_csd
.rel_sectors
)) {
1071 md
->flags
|= MMC_BLK_REL_WR
;
1072 blk_queue_flush(md
->queue
.queue
, REQ_FLUSH
| REQ_FUA
);
1082 return ERR_PTR(ret
);
1085 static struct mmc_blk_data
*mmc_blk_alloc(struct mmc_card
*card
)
1088 struct mmc_blk_data
*md
;
1090 if (!mmc_card_sd(card
) && mmc_card_blockaddr(card
)) {
1092 * The EXT_CSD sector count is in number or 512 byte
1095 size
= card
->ext_csd
.sectors
;
1098 * The CSD capacity field is in units of read_blkbits.
1099 * set_capacity takes units of 512 bytes.
1101 size
= card
->csd
.capacity
<< (card
->csd
.read_blkbits
- 9);
1104 md
= mmc_blk_alloc_req(card
, &card
->dev
, size
, false, NULL
);
1108 static int mmc_blk_alloc_part(struct mmc_card
*card
,
1109 struct mmc_blk_data
*md
,
1110 unsigned int part_type
,
1113 const char *subname
)
1116 struct mmc_blk_data
*part_md
;
1118 part_md
= mmc_blk_alloc_req(card
, disk_to_dev(md
->disk
), size
, default_ro
,
1120 if (IS_ERR(part_md
))
1121 return PTR_ERR(part_md
);
1122 part_md
->part_type
= part_type
;
1123 list_add(&part_md
->part
, &md
->part
);
1125 string_get_size((u64
)get_capacity(part_md
->disk
) << 9, STRING_UNITS_2
,
1126 cap_str
, sizeof(cap_str
));
1127 printk(KERN_INFO
"%s: %s %s partition %u %s\n",
1128 part_md
->disk
->disk_name
, mmc_card_id(card
),
1129 mmc_card_name(card
), part_md
->part_type
, cap_str
);
1133 static int mmc_blk_alloc_parts(struct mmc_card
*card
, struct mmc_blk_data
*md
)
1137 if (!mmc_card_mmc(card
))
1140 if (card
->ext_csd
.boot_size
) {
1141 ret
= mmc_blk_alloc_part(card
, md
, EXT_CSD_PART_CONFIG_ACC_BOOT0
,
1142 card
->ext_csd
.boot_size
>> 9,
1147 ret
= mmc_blk_alloc_part(card
, md
, EXT_CSD_PART_CONFIG_ACC_BOOT1
,
1148 card
->ext_csd
.boot_size
>> 9,
1159 mmc_blk_set_blksize(struct mmc_blk_data
*md
, struct mmc_card
*card
)
1163 mmc_claim_host(card
->host
);
1164 err
= mmc_set_blocklen(card
, 512);
1165 mmc_release_host(card
->host
);
1168 printk(KERN_ERR
"%s: unable to set block size to 512: %d\n",
1169 md
->disk
->disk_name
, err
);
1176 static void mmc_blk_remove_req(struct mmc_blk_data
*md
)
1179 if (md
->disk
->flags
& GENHD_FL_UP
) {
1180 device_remove_file(disk_to_dev(md
->disk
), &md
->force_ro
);
1182 /* Stop new requests from getting into the queue */
1183 del_gendisk(md
->disk
);
1186 /* Then flush out any already in there */
1187 mmc_cleanup_queue(&md
->queue
);
1192 static void mmc_blk_remove_parts(struct mmc_card
*card
,
1193 struct mmc_blk_data
*md
)
1195 struct list_head
*pos
, *q
;
1196 struct mmc_blk_data
*part_md
;
1198 __clear_bit(md
->name_idx
, name_use
);
1199 list_for_each_safe(pos
, q
, &md
->part
) {
1200 part_md
= list_entry(pos
, struct mmc_blk_data
, part
);
1202 mmc_blk_remove_req(part_md
);
1206 static int mmc_add_disk(struct mmc_blk_data
*md
)
1211 md
->force_ro
.show
= force_ro_show
;
1212 md
->force_ro
.store
= force_ro_store
;
1213 sysfs_attr_init(&md
->force_ro
.attr
);
1214 md
->force_ro
.attr
.name
= "force_ro";
1215 md
->force_ro
.attr
.mode
= S_IRUGO
| S_IWUSR
;
1216 ret
= device_create_file(disk_to_dev(md
->disk
), &md
->force_ro
);
1218 del_gendisk(md
->disk
);
1223 static const struct mmc_fixup blk_fixups
[] =
1225 MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk
, MMC_QUIRK_INAND_CMD38
),
1226 MMC_FIXUP("SEM04G", 0x2, 0x100, add_quirk
, MMC_QUIRK_INAND_CMD38
),
1227 MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk
, MMC_QUIRK_INAND_CMD38
),
1228 MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk
, MMC_QUIRK_INAND_CMD38
),
1229 MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk
, MMC_QUIRK_INAND_CMD38
),
1232 * Some MMC cards experience performance degradation with CMD23
1233 * instead of CMD12-bounded multiblock transfers. For now we'll
1234 * black list what's bad...
1235 * - Certain Toshiba cards.
1237 * N.B. This doesn't affect SD cards.
1239 MMC_FIXUP("MMC08G", 0x11, CID_OEMID_ANY
, add_quirk_mmc
,
1240 MMC_QUIRK_BLK_NO_CMD23
),
1241 MMC_FIXUP("MMC16G", 0x11, CID_OEMID_ANY
, add_quirk_mmc
,
1242 MMC_QUIRK_BLK_NO_CMD23
),
1243 MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY
, add_quirk_mmc
,
1244 MMC_QUIRK_BLK_NO_CMD23
),
1248 static int mmc_blk_probe(struct mmc_card
*card
)
1250 struct mmc_blk_data
*md
, *part_md
;
1255 * Check that the card supports the command class(es) we need.
1257 if (!(card
->csd
.cmdclass
& CCC_BLOCK_READ
))
1260 md
= mmc_blk_alloc(card
);
1264 err
= mmc_blk_set_blksize(md
, card
);
1268 string_get_size((u64
)get_capacity(md
->disk
) << 9, STRING_UNITS_2
,
1269 cap_str
, sizeof(cap_str
));
1270 printk(KERN_INFO
"%s: %s %s %s %s\n",
1271 md
->disk
->disk_name
, mmc_card_id(card
), mmc_card_name(card
),
1272 cap_str
, md
->read_only
? "(ro)" : "");
1274 if (mmc_blk_alloc_parts(card
, md
))
1277 mmc_set_drvdata(card
, md
);
1278 mmc_fixup_device(card
, blk_fixups
);
1280 if (mmc_add_disk(md
))
1283 list_for_each_entry(part_md
, &md
->part
, part
) {
1284 if (mmc_add_disk(part_md
))
1290 mmc_blk_remove_parts(card
, md
);
1291 mmc_blk_remove_req(md
);
1295 static void mmc_blk_remove(struct mmc_card
*card
)
1297 struct mmc_blk_data
*md
= mmc_get_drvdata(card
);
1299 mmc_blk_remove_parts(card
, md
);
1300 mmc_claim_host(card
->host
);
1301 mmc_blk_part_switch(card
, md
);
1302 mmc_release_host(card
->host
);
1303 mmc_blk_remove_req(md
);
1304 mmc_set_drvdata(card
, NULL
);
1308 static int mmc_blk_suspend(struct mmc_card
*card
, pm_message_t state
)
1310 struct mmc_blk_data
*part_md
;
1311 struct mmc_blk_data
*md
= mmc_get_drvdata(card
);
1314 mmc_queue_suspend(&md
->queue
);
1315 list_for_each_entry(part_md
, &md
->part
, part
) {
1316 mmc_queue_suspend(&part_md
->queue
);
1322 static int mmc_blk_resume(struct mmc_card
*card
)
1324 struct mmc_blk_data
*part_md
;
1325 struct mmc_blk_data
*md
= mmc_get_drvdata(card
);
1328 mmc_blk_set_blksize(md
, card
);
1331 * Resume involves the card going into idle state,
1332 * so current partition is always the main one.
1334 md
->part_curr
= md
->part_type
;
1335 mmc_queue_resume(&md
->queue
);
1336 list_for_each_entry(part_md
, &md
->part
, part
) {
1337 mmc_queue_resume(&part_md
->queue
);
1343 #define mmc_blk_suspend NULL
1344 #define mmc_blk_resume NULL
1347 static struct mmc_driver mmc_driver
= {
1351 .probe
= mmc_blk_probe
,
1352 .remove
= mmc_blk_remove
,
1353 .suspend
= mmc_blk_suspend
,
1354 .resume
= mmc_blk_resume
,
1357 static int __init
mmc_blk_init(void)
1361 if (perdev_minors
!= CONFIG_MMC_BLOCK_MINORS
)
1362 pr_info("mmcblk: using %d minors per device\n", perdev_minors
);
1364 max_devices
= 256 / perdev_minors
;
1366 res
= register_blkdev(MMC_BLOCK_MAJOR
, "mmc");
1370 res
= mmc_register_driver(&mmc_driver
);
1376 unregister_blkdev(MMC_BLOCK_MAJOR
, "mmc");
1381 static void __exit
mmc_blk_exit(void)
1383 mmc_unregister_driver(&mmc_driver
);
1384 unregister_blkdev(MMC_BLOCK_MAJOR
, "mmc");
1387 module_init(mmc_blk_init
);
1388 module_exit(mmc_blk_exit
);
1390 MODULE_LICENSE("GPL");
1391 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");