2 * Block driver for media (i.e., flash cards)
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
17 * Author: Andrew Christian
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
24 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h>
34 #include <linux/delay.h>
35 #include <linux/capability.h>
36 #include <linux/compat.h>
38 #include <linux/mmc/ioctl.h>
39 #include <linux/mmc/card.h>
40 #include <linux/mmc/host.h>
41 #include <linux/mmc/mmc.h>
42 #include <linux/mmc/sd.h>
44 #include <asm/uaccess.h>
48 MODULE_ALIAS("mmc:block");
49 #ifdef MODULE_PARAM_PREFIX
50 #undef MODULE_PARAM_PREFIX
52 #define MODULE_PARAM_PREFIX "mmcblk."
54 #define INAND_CMD38_ARG_EXT_CSD 113
55 #define INAND_CMD38_ARG_ERASE 0x00
56 #define INAND_CMD38_ARG_TRIM 0x01
57 #define INAND_CMD38_ARG_SECERASE 0x80
58 #define INAND_CMD38_ARG_SECTRIM1 0x81
59 #define INAND_CMD38_ARG_SECTRIM2 0x88
61 static DEFINE_MUTEX(block_mutex
);
64 * The defaults come from config options but can be overriden by module
67 static int perdev_minors
= CONFIG_MMC_BLOCK_MINORS
;
70 * We've only got one major, so number of mmcblk devices is
71 * limited to 256 / number of minors per device.
73 static int max_devices
;
75 /* 256 minors, so at most 256 separate devices */
76 static DECLARE_BITMAP(dev_use
, 256);
77 static DECLARE_BITMAP(name_use
, 256);
80 * There is one mmc_blk_data per slot.
85 struct mmc_queue queue
;
86 struct list_head part
;
89 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
90 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
93 unsigned int read_only
;
94 unsigned int part_type
;
95 unsigned int name_idx
;
96 unsigned int reset_done
;
97 #define MMC_BLK_READ BIT(0)
98 #define MMC_BLK_WRITE BIT(1)
99 #define MMC_BLK_DISCARD BIT(2)
100 #define MMC_BLK_SECDISCARD BIT(3)
103 * Only set in main mmc_blk_data associated
104 * with mmc_card with mmc_set_drvdata, and keeps
105 * track of the current selected device partition.
107 unsigned int part_curr
;
108 struct device_attribute force_ro
;
109 struct device_attribute power_ro_lock
;
113 static DEFINE_MUTEX(open_lock
);
115 enum mmc_blk_status
{
126 module_param(perdev_minors
, int, 0444);
127 MODULE_PARM_DESC(perdev_minors
, "Minors numbers to allocate per device");
129 static struct mmc_blk_data
*mmc_blk_get(struct gendisk
*disk
)
131 struct mmc_blk_data
*md
;
133 mutex_lock(&open_lock
);
134 md
= disk
->private_data
;
135 if (md
&& md
->usage
== 0)
139 mutex_unlock(&open_lock
);
144 static inline int mmc_get_devidx(struct gendisk
*disk
)
146 int devmaj
= MAJOR(disk_devt(disk
));
147 int devidx
= MINOR(disk_devt(disk
)) / perdev_minors
;
150 devidx
= disk
->first_minor
/ perdev_minors
;
154 static void mmc_blk_put(struct mmc_blk_data
*md
)
156 mutex_lock(&open_lock
);
158 if (md
->usage
== 0) {
159 int devidx
= mmc_get_devidx(md
->disk
);
160 blk_cleanup_queue(md
->queue
.queue
);
162 __clear_bit(devidx
, dev_use
);
167 mutex_unlock(&open_lock
);
170 static ssize_t
power_ro_lock_show(struct device
*dev
,
171 struct device_attribute
*attr
, char *buf
)
174 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
175 struct mmc_card
*card
= md
->queue
.card
;
178 if (card
->ext_csd
.boot_ro_lock
& EXT_CSD_BOOT_WP_B_PERM_WP_EN
)
180 else if (card
->ext_csd
.boot_ro_lock
& EXT_CSD_BOOT_WP_B_PWR_WP_EN
)
183 ret
= snprintf(buf
, PAGE_SIZE
, "%d\n", locked
);
188 static ssize_t
power_ro_lock_store(struct device
*dev
,
189 struct device_attribute
*attr
, const char *buf
, size_t count
)
192 struct mmc_blk_data
*md
, *part_md
;
193 struct mmc_card
*card
;
196 if (kstrtoul(buf
, 0, &set
))
202 md
= mmc_blk_get(dev_to_disk(dev
));
203 card
= md
->queue
.card
;
205 mmc_claim_host(card
->host
);
207 ret
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
, EXT_CSD_BOOT_WP
,
208 card
->ext_csd
.boot_ro_lock
|
209 EXT_CSD_BOOT_WP_B_PWR_WP_EN
,
210 card
->ext_csd
.part_time
);
212 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md
->disk
->disk_name
, ret
);
214 card
->ext_csd
.boot_ro_lock
|= EXT_CSD_BOOT_WP_B_PWR_WP_EN
;
216 mmc_release_host(card
->host
);
219 pr_info("%s: Locking boot partition ro until next power on\n",
220 md
->disk
->disk_name
);
221 set_disk_ro(md
->disk
, 1);
223 list_for_each_entry(part_md
, &md
->part
, part
)
224 if (part_md
->area_type
== MMC_BLK_DATA_AREA_BOOT
) {
225 pr_info("%s: Locking boot partition ro until next power on\n", part_md
->disk
->disk_name
);
226 set_disk_ro(part_md
->disk
, 1);
234 static ssize_t
force_ro_show(struct device
*dev
, struct device_attribute
*attr
,
238 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
240 ret
= snprintf(buf
, PAGE_SIZE
, "%d",
241 get_disk_ro(dev_to_disk(dev
)) ^
247 static ssize_t
force_ro_store(struct device
*dev
, struct device_attribute
*attr
,
248 const char *buf
, size_t count
)
252 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
253 unsigned long set
= simple_strtoul(buf
, &end
, 0);
259 set_disk_ro(dev_to_disk(dev
), set
|| md
->read_only
);
266 static int mmc_blk_open(struct block_device
*bdev
, fmode_t mode
)
268 struct mmc_blk_data
*md
= mmc_blk_get(bdev
->bd_disk
);
271 mutex_lock(&block_mutex
);
274 check_disk_change(bdev
);
277 if ((mode
& FMODE_WRITE
) && md
->read_only
) {
282 mutex_unlock(&block_mutex
);
287 static int mmc_blk_release(struct gendisk
*disk
, fmode_t mode
)
289 struct mmc_blk_data
*md
= disk
->private_data
;
291 mutex_lock(&block_mutex
);
293 mutex_unlock(&block_mutex
);
298 mmc_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
300 geo
->cylinders
= get_capacity(bdev
->bd_disk
) / (4 * 16);
306 struct mmc_blk_ioc_data
{
307 struct mmc_ioc_cmd ic
;
312 static struct mmc_blk_ioc_data
*mmc_blk_ioctl_copy_from_user(
313 struct mmc_ioc_cmd __user
*user
)
315 struct mmc_blk_ioc_data
*idata
;
318 idata
= kzalloc(sizeof(*idata
), GFP_KERNEL
);
324 if (copy_from_user(&idata
->ic
, user
, sizeof(idata
->ic
))) {
329 idata
->buf_bytes
= (u64
) idata
->ic
.blksz
* idata
->ic
.blocks
;
330 if (idata
->buf_bytes
> MMC_IOC_MAX_BYTES
) {
335 if (!idata
->buf_bytes
)
338 idata
->buf
= kzalloc(idata
->buf_bytes
, GFP_KERNEL
);
344 if (copy_from_user(idata
->buf
, (void __user
*)(unsigned long)
345 idata
->ic
.data_ptr
, idata
->buf_bytes
)) {
360 static int mmc_blk_ioctl_cmd(struct block_device
*bdev
,
361 struct mmc_ioc_cmd __user
*ic_ptr
)
363 struct mmc_blk_ioc_data
*idata
;
364 struct mmc_blk_data
*md
;
365 struct mmc_card
*card
;
366 struct mmc_command cmd
= {0};
367 struct mmc_data data
= {0};
368 struct mmc_request mrq
= {NULL
};
369 struct scatterlist sg
;
373 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
374 * whole block device, not on a partition. This prevents overspray
375 * between sibling partitions.
377 if ((!capable(CAP_SYS_RAWIO
)) || (bdev
!= bdev
->bd_contains
))
380 idata
= mmc_blk_ioctl_copy_from_user(ic_ptr
);
382 return PTR_ERR(idata
);
384 md
= mmc_blk_get(bdev
->bd_disk
);
390 card
= md
->queue
.card
;
396 cmd
.opcode
= idata
->ic
.opcode
;
397 cmd
.arg
= idata
->ic
.arg
;
398 cmd
.flags
= idata
->ic
.flags
;
400 if (idata
->buf_bytes
) {
403 data
.blksz
= idata
->ic
.blksz
;
404 data
.blocks
= idata
->ic
.blocks
;
406 sg_init_one(data
.sg
, idata
->buf
, idata
->buf_bytes
);
408 if (idata
->ic
.write_flag
)
409 data
.flags
= MMC_DATA_WRITE
;
411 data
.flags
= MMC_DATA_READ
;
413 /* data.flags must already be set before doing this. */
414 mmc_set_data_timeout(&data
, card
);
416 /* Allow overriding the timeout_ns for empirical tuning. */
417 if (idata
->ic
.data_timeout_ns
)
418 data
.timeout_ns
= idata
->ic
.data_timeout_ns
;
420 if ((cmd
.flags
& MMC_RSP_R1B
) == MMC_RSP_R1B
) {
422 * Pretend this is a data transfer and rely on the
423 * host driver to compute timeout. When all host
424 * drivers support cmd.cmd_timeout for R1B, this
428 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
430 data
.timeout_ns
= idata
->ic
.cmd_timeout_ms
* 1000000;
438 mmc_claim_host(card
->host
);
440 if (idata
->ic
.is_acmd
) {
441 err
= mmc_app_cmd(card
->host
, card
);
446 mmc_wait_for_req(card
->host
, &mrq
);
449 dev_err(mmc_dev(card
->host
), "%s: cmd error %d\n",
450 __func__
, cmd
.error
);
455 dev_err(mmc_dev(card
->host
), "%s: data error %d\n",
456 __func__
, data
.error
);
462 * According to the SD specs, some commands require a delay after
463 * issuing the command.
465 if (idata
->ic
.postsleep_min_us
)
466 usleep_range(idata
->ic
.postsleep_min_us
, idata
->ic
.postsleep_max_us
);
468 if (copy_to_user(&(ic_ptr
->response
), cmd
.resp
, sizeof(cmd
.resp
))) {
473 if (!idata
->ic
.write_flag
) {
474 if (copy_to_user((void __user
*)(unsigned long) idata
->ic
.data_ptr
,
475 idata
->buf
, idata
->buf_bytes
)) {
482 mmc_release_host(card
->host
);
492 static int mmc_blk_ioctl(struct block_device
*bdev
, fmode_t mode
,
493 unsigned int cmd
, unsigned long arg
)
496 if (cmd
== MMC_IOC_CMD
)
497 ret
= mmc_blk_ioctl_cmd(bdev
, (struct mmc_ioc_cmd __user
*)arg
);
502 static int mmc_blk_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
503 unsigned int cmd
, unsigned long arg
)
505 return mmc_blk_ioctl(bdev
, mode
, cmd
, (unsigned long) compat_ptr(arg
));
509 static const struct block_device_operations mmc_bdops
= {
510 .open
= mmc_blk_open
,
511 .release
= mmc_blk_release
,
512 .getgeo
= mmc_blk_getgeo
,
513 .owner
= THIS_MODULE
,
514 .ioctl
= mmc_blk_ioctl
,
516 .compat_ioctl
= mmc_blk_compat_ioctl
,
520 static inline int mmc_blk_part_switch(struct mmc_card
*card
,
521 struct mmc_blk_data
*md
)
524 struct mmc_blk_data
*main_md
= mmc_get_drvdata(card
);
526 if (main_md
->part_curr
== md
->part_type
)
529 if (mmc_card_mmc(card
)) {
530 u8 part_config
= card
->ext_csd
.part_config
;
532 part_config
&= ~EXT_CSD_PART_CONFIG_ACC_MASK
;
533 part_config
|= md
->part_type
;
535 ret
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
536 EXT_CSD_PART_CONFIG
, part_config
,
537 card
->ext_csd
.part_time
);
541 card
->ext_csd
.part_config
= part_config
;
544 main_md
->part_curr
= md
->part_type
;
548 static u32
mmc_sd_num_wr_blocks(struct mmc_card
*card
)
554 struct mmc_request mrq
= {NULL
};
555 struct mmc_command cmd
= {0};
556 struct mmc_data data
= {0};
558 struct scatterlist sg
;
560 cmd
.opcode
= MMC_APP_CMD
;
561 cmd
.arg
= card
->rca
<< 16;
562 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
564 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
567 if (!mmc_host_is_spi(card
->host
) && !(cmd
.resp
[0] & R1_APP_CMD
))
570 memset(&cmd
, 0, sizeof(struct mmc_command
));
572 cmd
.opcode
= SD_APP_SEND_NUM_WR_BLKS
;
574 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
578 data
.flags
= MMC_DATA_READ
;
581 mmc_set_data_timeout(&data
, card
);
586 blocks
= kmalloc(4, GFP_KERNEL
);
590 sg_init_one(&sg
, blocks
, 4);
592 mmc_wait_for_req(card
->host
, &mrq
);
594 result
= ntohl(*blocks
);
597 if (cmd
.error
|| data
.error
)
603 static int send_stop(struct mmc_card
*card
, u32
*status
)
605 struct mmc_command cmd
= {0};
608 cmd
.opcode
= MMC_STOP_TRANSMISSION
;
609 cmd
.flags
= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
| MMC_CMD_AC
;
610 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 5);
612 *status
= cmd
.resp
[0];
616 static int get_card_status(struct mmc_card
*card
, u32
*status
, int retries
)
618 struct mmc_command cmd
= {0};
621 cmd
.opcode
= MMC_SEND_STATUS
;
622 if (!mmc_host_is_spi(card
->host
))
623 cmd
.arg
= card
->rca
<< 16;
624 cmd
.flags
= MMC_RSP_SPI_R2
| MMC_RSP_R1
| MMC_CMD_AC
;
625 err
= mmc_wait_for_cmd(card
->host
, &cmd
, retries
);
627 *status
= cmd
.resp
[0];
631 #define ERR_NOMEDIUM 3
634 #define ERR_CONTINUE 0
636 static int mmc_blk_cmd_error(struct request
*req
, const char *name
, int error
,
637 bool status_valid
, u32 status
)
641 /* response crc error, retry the r/w cmd */
642 pr_err("%s: %s sending %s command, card status %#x\n",
643 req
->rq_disk
->disk_name
, "response CRC error",
648 pr_err("%s: %s sending %s command, card status %#x\n",
649 req
->rq_disk
->disk_name
, "timed out", name
, status
);
651 /* If the status cmd initially failed, retry the r/w cmd */
656 * If it was a r/w cmd crc error, or illegal command
657 * (eg, issued in wrong state) then retry - we should
658 * have corrected the state problem above.
660 if (status
& (R1_COM_CRC_ERROR
| R1_ILLEGAL_COMMAND
))
663 /* Otherwise abort the command */
667 /* We don't understand the error code the driver gave us */
668 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
669 req
->rq_disk
->disk_name
, error
, status
);
675 * Initial r/w and stop cmd error recovery.
676 * We don't know whether the card received the r/w cmd or not, so try to
677 * restore things back to a sane state. Essentially, we do this as follows:
678 * - Obtain card status. If the first attempt to obtain card status fails,
679 * the status word will reflect the failed status cmd, not the failed
680 * r/w cmd. If we fail to obtain card status, it suggests we can no
681 * longer communicate with the card.
682 * - Check the card state. If the card received the cmd but there was a
683 * transient problem with the response, it might still be in a data transfer
684 * mode. Try to send it a stop command. If this fails, we can't recover.
685 * - If the r/w cmd failed due to a response CRC error, it was probably
686 * transient, so retry the cmd.
687 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
688 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
689 * illegal cmd, retry.
690 * Otherwise we don't understand what happened, so abort.
692 static int mmc_blk_cmd_recovery(struct mmc_card
*card
, struct request
*req
,
693 struct mmc_blk_request
*brq
, int *ecc_err
)
695 bool prev_cmd_status_valid
= true;
696 u32 status
, stop_status
= 0;
699 if (mmc_card_removed(card
))
703 * Try to get card status which indicates both the card state
704 * and why there was no response. If the first attempt fails,
705 * we can't be sure the returned status is for the r/w command.
707 for (retry
= 2; retry
>= 0; retry
--) {
708 err
= get_card_status(card
, &status
, 0);
712 prev_cmd_status_valid
= false;
713 pr_err("%s: error %d sending status command, %sing\n",
714 req
->rq_disk
->disk_name
, err
, retry
? "retry" : "abort");
717 /* We couldn't get a response from the card. Give up. */
719 /* Check if the card is removed */
720 if (mmc_detect_card_removed(card
->host
))
725 /* Flag ECC errors */
726 if ((status
& R1_CARD_ECC_FAILED
) ||
727 (brq
->stop
.resp
[0] & R1_CARD_ECC_FAILED
) ||
728 (brq
->cmd
.resp
[0] & R1_CARD_ECC_FAILED
))
732 * Check the current card state. If it is in some data transfer
733 * mode, tell it to stop (and hopefully transition back to TRAN.)
735 if (R1_CURRENT_STATE(status
) == R1_STATE_DATA
||
736 R1_CURRENT_STATE(status
) == R1_STATE_RCV
) {
737 err
= send_stop(card
, &stop_status
);
739 pr_err("%s: error %d sending stop command\n",
740 req
->rq_disk
->disk_name
, err
);
743 * If the stop cmd also timed out, the card is probably
744 * not present, so abort. Other errors are bad news too.
748 if (stop_status
& R1_CARD_ECC_FAILED
)
752 /* Check for set block count errors */
754 return mmc_blk_cmd_error(req
, "SET_BLOCK_COUNT", brq
->sbc
.error
,
755 prev_cmd_status_valid
, status
);
757 /* Check for r/w command errors */
759 return mmc_blk_cmd_error(req
, "r/w cmd", brq
->cmd
.error
,
760 prev_cmd_status_valid
, status
);
763 if (!brq
->stop
.error
)
766 /* Now for stop errors. These aren't fatal to the transfer. */
767 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
768 req
->rq_disk
->disk_name
, brq
->stop
.error
,
769 brq
->cmd
.resp
[0], status
);
772 * Subsitute in our own stop status as this will give the error
773 * state which happened during the execution of the r/w command.
776 brq
->stop
.resp
[0] = stop_status
;
782 static int mmc_blk_reset(struct mmc_blk_data
*md
, struct mmc_host
*host
,
787 if (md
->reset_done
& type
)
790 md
->reset_done
|= type
;
791 err
= mmc_hw_reset(host
);
792 /* Ensure we switch back to the correct partition */
793 if (err
!= -EOPNOTSUPP
) {
794 struct mmc_blk_data
*main_md
= mmc_get_drvdata(host
->card
);
797 main_md
->part_curr
= main_md
->part_type
;
798 part_err
= mmc_blk_part_switch(host
->card
, md
);
801 * We have failed to get back into the correct
802 * partition, so we need to abort the whole request.
810 static inline void mmc_blk_reset_success(struct mmc_blk_data
*md
, int type
)
812 md
->reset_done
&= ~type
;
815 static int mmc_blk_issue_discard_rq(struct mmc_queue
*mq
, struct request
*req
)
817 struct mmc_blk_data
*md
= mq
->data
;
818 struct mmc_card
*card
= md
->queue
.card
;
819 unsigned int from
, nr
, arg
;
820 int err
= 0, type
= MMC_BLK_DISCARD
;
822 if (!mmc_can_erase(card
)) {
827 from
= blk_rq_pos(req
);
828 nr
= blk_rq_sectors(req
);
830 if (mmc_can_discard(card
))
831 arg
= MMC_DISCARD_ARG
;
832 else if (mmc_can_trim(card
))
837 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
838 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
839 INAND_CMD38_ARG_EXT_CSD
,
840 arg
== MMC_TRIM_ARG
?
841 INAND_CMD38_ARG_TRIM
:
842 INAND_CMD38_ARG_ERASE
,
847 err
= mmc_erase(card
, from
, nr
, arg
);
849 if (err
== -EIO
&& !mmc_blk_reset(md
, card
->host
, type
))
852 mmc_blk_reset_success(md
, type
);
853 blk_end_request(req
, err
, blk_rq_bytes(req
));
858 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue
*mq
,
861 struct mmc_blk_data
*md
= mq
->data
;
862 struct mmc_card
*card
= md
->queue
.card
;
863 unsigned int from
, nr
, arg
, trim_arg
, erase_arg
;
864 int err
= 0, type
= MMC_BLK_SECDISCARD
;
866 if (!(mmc_can_secure_erase_trim(card
) || mmc_can_sanitize(card
))) {
871 from
= blk_rq_pos(req
);
872 nr
= blk_rq_sectors(req
);
874 /* The sanitize operation is supported at v4.5 only */
875 if (mmc_can_sanitize(card
)) {
876 erase_arg
= MMC_ERASE_ARG
;
877 trim_arg
= MMC_TRIM_ARG
;
879 erase_arg
= MMC_SECURE_ERASE_ARG
;
880 trim_arg
= MMC_SECURE_TRIM1_ARG
;
883 if (mmc_erase_group_aligned(card
, from
, nr
))
885 else if (mmc_can_trim(card
))
892 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
893 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
894 INAND_CMD38_ARG_EXT_CSD
,
895 arg
== MMC_SECURE_TRIM1_ARG
?
896 INAND_CMD38_ARG_SECTRIM1
:
897 INAND_CMD38_ARG_SECERASE
,
903 err
= mmc_erase(card
, from
, nr
, arg
);
909 if (arg
== MMC_SECURE_TRIM1_ARG
) {
910 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
911 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
912 INAND_CMD38_ARG_EXT_CSD
,
913 INAND_CMD38_ARG_SECTRIM2
,
919 err
= mmc_erase(card
, from
, nr
, MMC_SECURE_TRIM2_ARG
);
926 if (mmc_can_sanitize(card
))
927 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
928 EXT_CSD_SANITIZE_START
, 1, 0);
930 if (err
&& !mmc_blk_reset(md
, card
->host
, type
))
933 mmc_blk_reset_success(md
, type
);
935 blk_end_request(req
, err
, blk_rq_bytes(req
));
940 static int mmc_blk_issue_flush(struct mmc_queue
*mq
, struct request
*req
)
942 struct mmc_blk_data
*md
= mq
->data
;
943 struct mmc_card
*card
= md
->queue
.card
;
946 ret
= mmc_flush_cache(card
);
950 blk_end_request_all(req
, ret
);
956 * Reformat current write as a reliable write, supporting
957 * both legacy and the enhanced reliable write MMC cards.
958 * In each transfer we'll handle only as much as a single
959 * reliable write can handle, thus finish the request in
960 * partial completions.
962 static inline void mmc_apply_rel_rw(struct mmc_blk_request
*brq
,
963 struct mmc_card
*card
,
966 if (!(card
->ext_csd
.rel_param
& EXT_CSD_WR_REL_PARAM_EN
)) {
967 /* Legacy mode imposes restrictions on transfers. */
968 if (!IS_ALIGNED(brq
->cmd
.arg
, card
->ext_csd
.rel_sectors
))
969 brq
->data
.blocks
= 1;
971 if (brq
->data
.blocks
> card
->ext_csd
.rel_sectors
)
972 brq
->data
.blocks
= card
->ext_csd
.rel_sectors
;
973 else if (brq
->data
.blocks
< card
->ext_csd
.rel_sectors
)
974 brq
->data
.blocks
= 1;
979 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
980 R1_ADDRESS_ERROR | /* Misaligned address */ \
981 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
982 R1_WP_VIOLATION | /* Tried to write to protected block */ \
983 R1_CC_ERROR | /* Card controller error */ \
984 R1_ERROR) /* General/unknown error */
986 static int mmc_blk_err_check(struct mmc_card
*card
,
987 struct mmc_async_req
*areq
)
989 struct mmc_queue_req
*mq_mrq
= container_of(areq
, struct mmc_queue_req
,
991 struct mmc_blk_request
*brq
= &mq_mrq
->brq
;
992 struct request
*req
= mq_mrq
->req
;
996 * sbc.error indicates a problem with the set block count
997 * command. No data will have been transferred.
999 * cmd.error indicates a problem with the r/w command. No
1000 * data will have been transferred.
1002 * stop.error indicates a problem with the stop command. Data
1003 * may have been transferred, or may still be transferring.
1005 if (brq
->sbc
.error
|| brq
->cmd
.error
|| brq
->stop
.error
||
1007 switch (mmc_blk_cmd_recovery(card
, req
, brq
, &ecc_err
)) {
1009 return MMC_BLK_RETRY
;
1011 return MMC_BLK_ABORT
;
1013 return MMC_BLK_NOMEDIUM
;
1020 * Check for errors relating to the execution of the
1021 * initial command - such as address errors. No data
1022 * has been transferred.
1024 if (brq
->cmd
.resp
[0] & CMD_ERRORS
) {
1025 pr_err("%s: r/w command failed, status = %#x\n",
1026 req
->rq_disk
->disk_name
, brq
->cmd
.resp
[0]);
1027 return MMC_BLK_ABORT
;
1031 * Everything else is either success, or a data error of some
1032 * kind. If it was a write, we may have transitioned to
1033 * program mode, which we have to wait for it to complete.
1035 if (!mmc_host_is_spi(card
->host
) && rq_data_dir(req
) != READ
) {
1038 int err
= get_card_status(card
, &status
, 5);
1040 pr_err("%s: error %d requesting status\n",
1041 req
->rq_disk
->disk_name
, err
);
1042 return MMC_BLK_CMD_ERR
;
1045 * Some cards mishandle the status bits,
1046 * so make sure to check both the busy
1047 * indication and the card state.
1049 } while (!(status
& R1_READY_FOR_DATA
) ||
1050 (R1_CURRENT_STATE(status
) == R1_STATE_PRG
));
1053 if (brq
->data
.error
) {
1054 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1055 req
->rq_disk
->disk_name
, brq
->data
.error
,
1056 (unsigned)blk_rq_pos(req
),
1057 (unsigned)blk_rq_sectors(req
),
1058 brq
->cmd
.resp
[0], brq
->stop
.resp
[0]);
1060 if (rq_data_dir(req
) == READ
) {
1062 return MMC_BLK_ECC_ERR
;
1063 return MMC_BLK_DATA_ERR
;
1065 return MMC_BLK_CMD_ERR
;
1069 if (!brq
->data
.bytes_xfered
)
1070 return MMC_BLK_RETRY
;
1072 if (blk_rq_bytes(req
) != brq
->data
.bytes_xfered
)
1073 return MMC_BLK_PARTIAL
;
1075 return MMC_BLK_SUCCESS
;
1078 static void mmc_blk_rw_rq_prep(struct mmc_queue_req
*mqrq
,
1079 struct mmc_card
*card
,
1081 struct mmc_queue
*mq
)
1083 u32 readcmd
, writecmd
;
1084 struct mmc_blk_request
*brq
= &mqrq
->brq
;
1085 struct request
*req
= mqrq
->req
;
1086 struct mmc_blk_data
*md
= mq
->data
;
1090 * Reliable writes are used to implement Forced Unit Access and
1091 * REQ_META accesses, and are supported only on MMCs.
1093 * XXX: this really needs a good explanation of why REQ_META
1094 * is treated special.
1096 bool do_rel_wr
= ((req
->cmd_flags
& REQ_FUA
) ||
1097 (req
->cmd_flags
& REQ_META
)) &&
1098 (rq_data_dir(req
) == WRITE
) &&
1099 (md
->flags
& MMC_BLK_REL_WR
);
1101 memset(brq
, 0, sizeof(struct mmc_blk_request
));
1102 brq
->mrq
.cmd
= &brq
->cmd
;
1103 brq
->mrq
.data
= &brq
->data
;
1105 brq
->cmd
.arg
= blk_rq_pos(req
);
1106 if (!mmc_card_blockaddr(card
))
1108 brq
->cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
1109 brq
->data
.blksz
= 512;
1110 brq
->stop
.opcode
= MMC_STOP_TRANSMISSION
;
1112 brq
->stop
.flags
= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
| MMC_CMD_AC
;
1113 brq
->data
.blocks
= blk_rq_sectors(req
);
1116 * The block layer doesn't support all sector count
1117 * restrictions, so we need to be prepared for too big
1120 if (brq
->data
.blocks
> card
->host
->max_blk_count
)
1121 brq
->data
.blocks
= card
->host
->max_blk_count
;
1123 if (brq
->data
.blocks
> 1) {
1125 * After a read error, we redo the request one sector
1126 * at a time in order to accurately determine which
1127 * sectors can be read successfully.
1130 brq
->data
.blocks
= 1;
1132 /* Some controllers can't do multiblock reads due to hw bugs */
1133 if (card
->host
->caps2
& MMC_CAP2_NO_MULTI_READ
&&
1134 rq_data_dir(req
) == READ
)
1135 brq
->data
.blocks
= 1;
1138 if (brq
->data
.blocks
> 1 || do_rel_wr
) {
1139 /* SPI multiblock writes terminate using a special
1140 * token, not a STOP_TRANSMISSION request.
1142 if (!mmc_host_is_spi(card
->host
) ||
1143 rq_data_dir(req
) == READ
)
1144 brq
->mrq
.stop
= &brq
->stop
;
1145 readcmd
= MMC_READ_MULTIPLE_BLOCK
;
1146 writecmd
= MMC_WRITE_MULTIPLE_BLOCK
;
1148 brq
->mrq
.stop
= NULL
;
1149 readcmd
= MMC_READ_SINGLE_BLOCK
;
1150 writecmd
= MMC_WRITE_BLOCK
;
1152 if (rq_data_dir(req
) == READ
) {
1153 brq
->cmd
.opcode
= readcmd
;
1154 brq
->data
.flags
|= MMC_DATA_READ
;
1156 brq
->cmd
.opcode
= writecmd
;
1157 brq
->data
.flags
|= MMC_DATA_WRITE
;
1161 mmc_apply_rel_rw(brq
, card
, req
);
1164 * Data tag is used only during writing meta data to speed
1165 * up write and any subsequent read of this meta data
1167 do_data_tag
= (card
->ext_csd
.data_tag_unit_size
) &&
1168 (req
->cmd_flags
& REQ_META
) &&
1169 (rq_data_dir(req
) == WRITE
) &&
1170 ((brq
->data
.blocks
* brq
->data
.blksz
) >=
1171 card
->ext_csd
.data_tag_unit_size
);
1174 * Pre-defined multi-block transfers are preferable to
1175 * open ended-ones (and necessary for reliable writes).
1176 * However, it is not sufficient to just send CMD23,
1177 * and avoid the final CMD12, as on an error condition
1178 * CMD12 (stop) needs to be sent anyway. This, coupled
1179 * with Auto-CMD23 enhancements provided by some
1180 * hosts, means that the complexity of dealing
1181 * with this is best left to the host. If CMD23 is
1182 * supported by card and host, we'll fill sbc in and let
1183 * the host deal with handling it correctly. This means
1184 * that for hosts that don't expose MMC_CAP_CMD23, no
1185 * change of behavior will be observed.
1187 * N.B: Some MMC cards experience perf degradation.
1188 * We'll avoid using CMD23-bounded multiblock writes for
1189 * these, while retaining features like reliable writes.
1191 if ((md
->flags
& MMC_BLK_CMD23
) && mmc_op_multi(brq
->cmd
.opcode
) &&
1192 (do_rel_wr
|| !(card
->quirks
& MMC_QUIRK_BLK_NO_CMD23
) ||
1194 brq
->sbc
.opcode
= MMC_SET_BLOCK_COUNT
;
1195 brq
->sbc
.arg
= brq
->data
.blocks
|
1196 (do_rel_wr
? (1 << 31) : 0) |
1197 (do_data_tag
? (1 << 29) : 0);
1198 brq
->sbc
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
1199 brq
->mrq
.sbc
= &brq
->sbc
;
1202 mmc_set_data_timeout(&brq
->data
, card
);
1204 brq
->data
.sg
= mqrq
->sg
;
1205 brq
->data
.sg_len
= mmc_queue_map_sg(mq
, mqrq
);
1208 * Adjust the sg list so it is the same size as the
1211 if (brq
->data
.blocks
!= blk_rq_sectors(req
)) {
1212 int i
, data_size
= brq
->data
.blocks
<< 9;
1213 struct scatterlist
*sg
;
1215 for_each_sg(brq
->data
.sg
, sg
, brq
->data
.sg_len
, i
) {
1216 data_size
-= sg
->length
;
1217 if (data_size
<= 0) {
1218 sg
->length
+= data_size
;
1223 brq
->data
.sg_len
= i
;
1226 mqrq
->mmc_active
.mrq
= &brq
->mrq
;
1227 mqrq
->mmc_active
.err_check
= mmc_blk_err_check
;
1229 mmc_queue_bounce_pre(mqrq
);
1232 static int mmc_blk_cmd_err(struct mmc_blk_data
*md
, struct mmc_card
*card
,
1233 struct mmc_blk_request
*brq
, struct request
*req
,
1237 * If this is an SD card and we're writing, we can first
1238 * mark the known good sectors as ok.
1240 * If the card is not SD, we can still ok written sectors
1241 * as reported by the controller (which might be less than
1242 * the real number of written sectors, but never more).
1244 if (mmc_card_sd(card
)) {
1247 blocks
= mmc_sd_num_wr_blocks(card
);
1248 if (blocks
!= (u32
)-1) {
1249 ret
= blk_end_request(req
, 0, blocks
<< 9);
1252 ret
= blk_end_request(req
, 0, brq
->data
.bytes_xfered
);
1257 static int mmc_blk_issue_rw_rq(struct mmc_queue
*mq
, struct request
*rqc
)
1259 struct mmc_blk_data
*md
= mq
->data
;
1260 struct mmc_card
*card
= md
->queue
.card
;
1261 struct mmc_blk_request
*brq
= &mq
->mqrq_cur
->brq
;
1262 int ret
= 1, disable_multi
= 0, retry
= 0, type
;
1263 enum mmc_blk_status status
;
1264 struct mmc_queue_req
*mq_rq
;
1265 struct request
*req
= rqc
;
1266 struct mmc_async_req
*areq
;
1268 if (!rqc
&& !mq
->mqrq_prev
->req
)
1274 * When 4KB native sector is enabled, only 8 blocks
1275 * multiple read or write is allowed
1277 if ((brq
->data
.blocks
& 0x07) &&
1278 (card
->ext_csd
.data_sector_size
== 4096)) {
1279 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1280 req
->rq_disk
->disk_name
);
1283 mmc_blk_rw_rq_prep(mq
->mqrq_cur
, card
, 0, mq
);
1284 areq
= &mq
->mqrq_cur
->mmc_active
;
1287 areq
= mmc_start_req(card
->host
, areq
, (int *) &status
);
1291 mq_rq
= container_of(areq
, struct mmc_queue_req
, mmc_active
);
1294 type
= rq_data_dir(req
) == READ
? MMC_BLK_READ
: MMC_BLK_WRITE
;
1295 mmc_queue_bounce_post(mq_rq
);
1298 case MMC_BLK_SUCCESS
:
1299 case MMC_BLK_PARTIAL
:
1301 * A block was successfully transferred.
1303 mmc_blk_reset_success(md
, type
);
1304 ret
= blk_end_request(req
, 0,
1305 brq
->data
.bytes_xfered
);
1307 * If the blk_end_request function returns non-zero even
1308 * though all data has been transferred and no errors
1309 * were returned by the host controller, it's a bug.
1311 if (status
== MMC_BLK_SUCCESS
&& ret
) {
1312 pr_err("%s BUG rq_tot %d d_xfer %d\n",
1313 __func__
, blk_rq_bytes(req
),
1314 brq
->data
.bytes_xfered
);
1319 case MMC_BLK_CMD_ERR
:
1320 ret
= mmc_blk_cmd_err(md
, card
, brq
, req
, ret
);
1321 if (!mmc_blk_reset(md
, card
->host
, type
))
1329 if (!mmc_blk_reset(md
, card
->host
, type
))
1332 case MMC_BLK_DATA_ERR
: {
1335 err
= mmc_blk_reset(md
, card
->host
, type
);
1342 case MMC_BLK_ECC_ERR
:
1343 if (brq
->data
.blocks
> 1) {
1344 /* Redo read one sector at a time */
1345 pr_warning("%s: retrying using single block read\n",
1346 req
->rq_disk
->disk_name
);
1351 * After an error, we redo I/O one sector at a
1352 * time, so we only reach here after trying to
1353 * read a single sector.
1355 ret
= blk_end_request(req
, -EIO
,
1360 case MMC_BLK_NOMEDIUM
:
1366 * In case of a incomplete request
1367 * prepare it again and resend.
1369 mmc_blk_rw_rq_prep(mq_rq
, card
, disable_multi
, mq
);
1370 mmc_start_req(card
->host
, &mq_rq
->mmc_active
, NULL
);
1377 if (mmc_card_removed(card
))
1378 req
->cmd_flags
|= REQ_QUIET
;
1380 ret
= blk_end_request(req
, -EIO
, blk_rq_cur_bytes(req
));
1384 mmc_blk_rw_rq_prep(mq
->mqrq_cur
, card
, 0, mq
);
1385 mmc_start_req(card
->host
, &mq
->mqrq_cur
->mmc_active
, NULL
);
1391 static int mmc_blk_issue_rq(struct mmc_queue
*mq
, struct request
*req
)
1394 struct mmc_blk_data
*md
= mq
->data
;
1395 struct mmc_card
*card
= md
->queue
.card
;
1397 if (req
&& !mq
->mqrq_prev
->req
)
1398 /* claim host only for the first request */
1399 mmc_claim_host(card
->host
);
1401 ret
= mmc_blk_part_switch(card
, md
);
1404 blk_end_request_all(req
, -EIO
);
1410 if (req
&& req
->cmd_flags
& REQ_DISCARD
) {
1411 /* complete ongoing async transfer before issuing discard */
1412 if (card
->host
->areq
)
1413 mmc_blk_issue_rw_rq(mq
, NULL
);
1414 if (req
->cmd_flags
& REQ_SECURE
&&
1415 !(card
->quirks
& MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
))
1416 ret
= mmc_blk_issue_secdiscard_rq(mq
, req
);
1418 ret
= mmc_blk_issue_discard_rq(mq
, req
);
1419 } else if (req
&& req
->cmd_flags
& REQ_FLUSH
) {
1420 /* complete ongoing async transfer before issuing flush */
1421 if (card
->host
->areq
)
1422 mmc_blk_issue_rw_rq(mq
, NULL
);
1423 ret
= mmc_blk_issue_flush(mq
, req
);
1425 ret
= mmc_blk_issue_rw_rq(mq
, req
);
1430 /* release host only when there are no more requests */
1431 mmc_release_host(card
->host
);
1435 static inline int mmc_blk_readonly(struct mmc_card
*card
)
1437 return mmc_card_readonly(card
) ||
1438 !(card
->csd
.cmdclass
& CCC_BLOCK_WRITE
);
1441 static struct mmc_blk_data
*mmc_blk_alloc_req(struct mmc_card
*card
,
1442 struct device
*parent
,
1445 const char *subname
,
1448 struct mmc_blk_data
*md
;
1451 devidx
= find_first_zero_bit(dev_use
, max_devices
);
1452 if (devidx
>= max_devices
)
1453 return ERR_PTR(-ENOSPC
);
1454 __set_bit(devidx
, dev_use
);
1456 md
= kzalloc(sizeof(struct mmc_blk_data
), GFP_KERNEL
);
1463 * !subname implies we are creating main mmc_blk_data that will be
1464 * associated with mmc_card with mmc_set_drvdata. Due to device
1465 * partitions, devidx will not coincide with a per-physical card
1466 * index anymore so we keep track of a name index.
1469 md
->name_idx
= find_first_zero_bit(name_use
, max_devices
);
1470 __set_bit(md
->name_idx
, name_use
);
1472 md
->name_idx
= ((struct mmc_blk_data
*)
1473 dev_to_disk(parent
)->private_data
)->name_idx
;
1475 md
->area_type
= area_type
;
1478 * Set the read-only status based on the supported commands
1479 * and the write protect switch.
1481 md
->read_only
= mmc_blk_readonly(card
);
1483 md
->disk
= alloc_disk(perdev_minors
);
1484 if (md
->disk
== NULL
) {
1489 spin_lock_init(&md
->lock
);
1490 INIT_LIST_HEAD(&md
->part
);
1493 ret
= mmc_init_queue(&md
->queue
, card
, &md
->lock
, subname
);
1497 md
->queue
.issue_fn
= mmc_blk_issue_rq
;
1498 md
->queue
.data
= md
;
1500 md
->disk
->major
= MMC_BLOCK_MAJOR
;
1501 md
->disk
->first_minor
= devidx
* perdev_minors
;
1502 md
->disk
->fops
= &mmc_bdops
;
1503 md
->disk
->private_data
= md
;
1504 md
->disk
->queue
= md
->queue
.queue
;
1505 md
->disk
->driverfs_dev
= parent
;
1506 set_disk_ro(md
->disk
, md
->read_only
|| default_ro
);
1509 * As discussed on lkml, GENHD_FL_REMOVABLE should:
1511 * - be set for removable media with permanent block devices
1512 * - be unset for removable block devices with permanent media
1514 * Since MMC block devices clearly fall under the second
1515 * case, we do not set GENHD_FL_REMOVABLE. Userspace
1516 * should use the block device creation/destruction hotplug
1517 * messages to tell when the card is present.
1520 snprintf(md
->disk
->disk_name
, sizeof(md
->disk
->disk_name
),
1521 "mmcblk%d%s", md
->name_idx
, subname
? subname
: "");
1523 if (mmc_card_mmc(card
))
1524 blk_queue_logical_block_size(md
->queue
.queue
,
1525 card
->ext_csd
.data_sector_size
);
1527 blk_queue_logical_block_size(md
->queue
.queue
, 512);
1529 set_capacity(md
->disk
, size
);
1531 if (mmc_host_cmd23(card
->host
)) {
1532 if (mmc_card_mmc(card
) ||
1533 (mmc_card_sd(card
) &&
1534 card
->scr
.cmds
& SD_SCR_CMD23_SUPPORT
))
1535 md
->flags
|= MMC_BLK_CMD23
;
1538 if (mmc_card_mmc(card
) &&
1539 md
->flags
& MMC_BLK_CMD23
&&
1540 ((card
->ext_csd
.rel_param
& EXT_CSD_WR_REL_PARAM_EN
) ||
1541 card
->ext_csd
.rel_sectors
)) {
1542 md
->flags
|= MMC_BLK_REL_WR
;
1543 blk_queue_flush(md
->queue
.queue
, REQ_FLUSH
| REQ_FUA
);
1553 return ERR_PTR(ret
);
1556 static struct mmc_blk_data
*mmc_blk_alloc(struct mmc_card
*card
)
1559 struct mmc_blk_data
*md
;
1561 if (!mmc_card_sd(card
) && mmc_card_blockaddr(card
)) {
1563 * The EXT_CSD sector count is in number or 512 byte
1566 size
= card
->ext_csd
.sectors
;
1569 * The CSD capacity field is in units of read_blkbits.
1570 * set_capacity takes units of 512 bytes.
1572 size
= card
->csd
.capacity
<< (card
->csd
.read_blkbits
- 9);
1575 md
= mmc_blk_alloc_req(card
, &card
->dev
, size
, false, NULL
,
1576 MMC_BLK_DATA_AREA_MAIN
);
1580 static int mmc_blk_alloc_part(struct mmc_card
*card
,
1581 struct mmc_blk_data
*md
,
1582 unsigned int part_type
,
1585 const char *subname
,
1589 struct mmc_blk_data
*part_md
;
1591 part_md
= mmc_blk_alloc_req(card
, disk_to_dev(md
->disk
), size
, default_ro
,
1592 subname
, area_type
);
1593 if (IS_ERR(part_md
))
1594 return PTR_ERR(part_md
);
1595 part_md
->part_type
= part_type
;
1596 list_add(&part_md
->part
, &md
->part
);
1598 string_get_size((u64
)get_capacity(part_md
->disk
) << 9, STRING_UNITS_2
,
1599 cap_str
, sizeof(cap_str
));
1600 pr_info("%s: %s %s partition %u %s\n",
1601 part_md
->disk
->disk_name
, mmc_card_id(card
),
1602 mmc_card_name(card
), part_md
->part_type
, cap_str
);
1606 /* MMC Physical partitions consist of two boot partitions and
1607 * up to four general purpose partitions.
1608 * For each partition enabled in EXT_CSD a block device will be allocatedi
1609 * to provide access to the partition.
1612 static int mmc_blk_alloc_parts(struct mmc_card
*card
, struct mmc_blk_data
*md
)
1616 if (!mmc_card_mmc(card
))
1619 for (idx
= 0; idx
< card
->nr_parts
; idx
++) {
1620 if (card
->part
[idx
].size
) {
1621 ret
= mmc_blk_alloc_part(card
, md
,
1622 card
->part
[idx
].part_cfg
,
1623 card
->part
[idx
].size
>> 9,
1624 card
->part
[idx
].force_ro
,
1625 card
->part
[idx
].name
,
1626 card
->part
[idx
].area_type
);
1635 static void mmc_blk_remove_req(struct mmc_blk_data
*md
)
1637 struct mmc_card
*card
;
1640 card
= md
->queue
.card
;
1641 if (md
->disk
->flags
& GENHD_FL_UP
) {
1642 device_remove_file(disk_to_dev(md
->disk
), &md
->force_ro
);
1643 if ((md
->area_type
& MMC_BLK_DATA_AREA_BOOT
) &&
1644 card
->ext_csd
.boot_ro_lockable
)
1645 device_remove_file(disk_to_dev(md
->disk
),
1646 &md
->power_ro_lock
);
1648 /* Stop new requests from getting into the queue */
1649 del_gendisk(md
->disk
);
1652 /* Then flush out any already in there */
1653 mmc_cleanup_queue(&md
->queue
);
1658 static void mmc_blk_remove_parts(struct mmc_card
*card
,
1659 struct mmc_blk_data
*md
)
1661 struct list_head
*pos
, *q
;
1662 struct mmc_blk_data
*part_md
;
1664 __clear_bit(md
->name_idx
, name_use
);
1665 list_for_each_safe(pos
, q
, &md
->part
) {
1666 part_md
= list_entry(pos
, struct mmc_blk_data
, part
);
1668 mmc_blk_remove_req(part_md
);
1672 static int mmc_add_disk(struct mmc_blk_data
*md
)
1675 struct mmc_card
*card
= md
->queue
.card
;
1678 md
->force_ro
.show
= force_ro_show
;
1679 md
->force_ro
.store
= force_ro_store
;
1680 sysfs_attr_init(&md
->force_ro
.attr
);
1681 md
->force_ro
.attr
.name
= "force_ro";
1682 md
->force_ro
.attr
.mode
= S_IRUGO
| S_IWUSR
;
1683 ret
= device_create_file(disk_to_dev(md
->disk
), &md
->force_ro
);
1687 if ((md
->area_type
& MMC_BLK_DATA_AREA_BOOT
) &&
1688 card
->ext_csd
.boot_ro_lockable
) {
1691 if (card
->ext_csd
.boot_ro_lock
& EXT_CSD_BOOT_WP_B_PWR_WP_DIS
)
1694 mode
= S_IRUGO
| S_IWUSR
;
1696 md
->power_ro_lock
.show
= power_ro_lock_show
;
1697 md
->power_ro_lock
.store
= power_ro_lock_store
;
1698 sysfs_attr_init(&md
->power_ro_lock
.attr
);
1699 md
->power_ro_lock
.attr
.mode
= mode
;
1700 md
->power_ro_lock
.attr
.name
=
1701 "ro_lock_until_next_power_on";
1702 ret
= device_create_file(disk_to_dev(md
->disk
),
1703 &md
->power_ro_lock
);
1705 goto power_ro_lock_fail
;
1710 device_remove_file(disk_to_dev(md
->disk
), &md
->force_ro
);
1712 del_gendisk(md
->disk
);
1717 #define CID_MANFID_SANDISK 0x2
1718 #define CID_MANFID_TOSHIBA 0x11
1719 #define CID_MANFID_MICRON 0x13
1720 #define CID_MANFID_SAMSUNG 0x15
1722 static const struct mmc_fixup blk_fixups
[] =
1724 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
1725 MMC_QUIRK_INAND_CMD38
),
1726 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
1727 MMC_QUIRK_INAND_CMD38
),
1728 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
1729 MMC_QUIRK_INAND_CMD38
),
1730 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
1731 MMC_QUIRK_INAND_CMD38
),
1732 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
1733 MMC_QUIRK_INAND_CMD38
),
1736 * Some MMC cards experience performance degradation with CMD23
1737 * instead of CMD12-bounded multiblock transfers. For now we'll
1738 * black list what's bad...
1739 * - Certain Toshiba cards.
1741 * N.B. This doesn't affect SD cards.
1743 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA
, CID_OEMID_ANY
, add_quirk_mmc
,
1744 MMC_QUIRK_BLK_NO_CMD23
),
1745 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA
, CID_OEMID_ANY
, add_quirk_mmc
,
1746 MMC_QUIRK_BLK_NO_CMD23
),
1747 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA
, CID_OEMID_ANY
, add_quirk_mmc
,
1748 MMC_QUIRK_BLK_NO_CMD23
),
1751 * Some Micron MMC cards needs longer data read timeout than
1754 MMC_FIXUP(CID_NAME_ANY
, CID_MANFID_MICRON
, 0x200, add_quirk_mmc
,
1755 MMC_QUIRK_LONG_READ_TIME
),
1758 * On these Samsung MoviNAND parts, performing secure erase or
1759 * secure trim can result in unrecoverable corruption due to a
1762 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
1763 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
1764 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
1765 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
1766 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
1767 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
1768 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
1769 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
1770 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
1771 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
1772 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
1773 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
1774 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
1775 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
1776 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
1777 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
1782 static int mmc_blk_probe(struct mmc_card
*card
)
1784 struct mmc_blk_data
*md
, *part_md
;
1788 * Check that the card supports the command class(es) we need.
1790 if (!(card
->csd
.cmdclass
& CCC_BLOCK_READ
))
1793 md
= mmc_blk_alloc(card
);
1797 string_get_size((u64
)get_capacity(md
->disk
) << 9, STRING_UNITS_2
,
1798 cap_str
, sizeof(cap_str
));
1799 pr_info("%s: %s %s %s %s\n",
1800 md
->disk
->disk_name
, mmc_card_id(card
), mmc_card_name(card
),
1801 cap_str
, md
->read_only
? "(ro)" : "");
1803 if (mmc_blk_alloc_parts(card
, md
))
1806 mmc_set_drvdata(card
, md
);
1807 mmc_fixup_device(card
, blk_fixups
);
1809 if (mmc_add_disk(md
))
1812 list_for_each_entry(part_md
, &md
->part
, part
) {
1813 if (mmc_add_disk(part_md
))
1819 mmc_blk_remove_parts(card
, md
);
1820 mmc_blk_remove_req(md
);
1824 static void mmc_blk_remove(struct mmc_card
*card
)
1826 struct mmc_blk_data
*md
= mmc_get_drvdata(card
);
1828 mmc_blk_remove_parts(card
, md
);
1829 mmc_claim_host(card
->host
);
1830 mmc_blk_part_switch(card
, md
);
1831 mmc_release_host(card
->host
);
1832 mmc_blk_remove_req(md
);
1833 mmc_set_drvdata(card
, NULL
);
1837 static int mmc_blk_suspend(struct mmc_card
*card
)
1839 struct mmc_blk_data
*part_md
;
1840 struct mmc_blk_data
*md
= mmc_get_drvdata(card
);
1843 mmc_queue_suspend(&md
->queue
);
1844 list_for_each_entry(part_md
, &md
->part
, part
) {
1845 mmc_queue_suspend(&part_md
->queue
);
1851 static int mmc_blk_resume(struct mmc_card
*card
)
1853 struct mmc_blk_data
*part_md
;
1854 struct mmc_blk_data
*md
= mmc_get_drvdata(card
);
1858 * Resume involves the card going into idle state,
1859 * so current partition is always the main one.
1861 md
->part_curr
= md
->part_type
;
1862 mmc_queue_resume(&md
->queue
);
1863 list_for_each_entry(part_md
, &md
->part
, part
) {
1864 mmc_queue_resume(&part_md
->queue
);
1870 #define mmc_blk_suspend NULL
1871 #define mmc_blk_resume NULL
1874 static struct mmc_driver mmc_driver
= {
1878 .probe
= mmc_blk_probe
,
1879 .remove
= mmc_blk_remove
,
1880 .suspend
= mmc_blk_suspend
,
1881 .resume
= mmc_blk_resume
,
1884 static int __init
mmc_blk_init(void)
1888 if (perdev_minors
!= CONFIG_MMC_BLOCK_MINORS
)
1889 pr_info("mmcblk: using %d minors per device\n", perdev_minors
);
1891 max_devices
= 256 / perdev_minors
;
1893 res
= register_blkdev(MMC_BLOCK_MAJOR
, "mmc");
1897 res
= mmc_register_driver(&mmc_driver
);
1903 unregister_blkdev(MMC_BLOCK_MAJOR
, "mmc");
1908 static void __exit
mmc_blk_exit(void)
1910 mmc_unregister_driver(&mmc_driver
);
1911 unregister_blkdev(MMC_BLOCK_MAJOR
, "mmc");
1914 module_init(mmc_blk_init
);
1915 module_exit(mmc_blk_exit
);
1917 MODULE_LICENSE("GPL");
1918 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");