2 * Block driver for media (i.e., flash cards)
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
17 * Author: Andrew Christian
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
24 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h>
34 #include <linux/delay.h>
35 #include <linux/capability.h>
36 #include <linux/compat.h>
37 #include <linux/pm_runtime.h>
38 #include <linux/idr.h>
40 #include <linux/mmc/ioctl.h>
41 #include <linux/mmc/card.h>
42 #include <linux/mmc/host.h>
43 #include <linux/mmc/mmc.h>
44 #include <linux/mmc/sd.h>
46 #include <linux/uaccess.h>
51 MODULE_ALIAS("mmc:block");
52 #ifdef MODULE_PARAM_PREFIX
53 #undef MODULE_PARAM_PREFIX
55 #define MODULE_PARAM_PREFIX "mmcblk."
57 #define INAND_CMD38_ARG_EXT_CSD 113
58 #define INAND_CMD38_ARG_ERASE 0x00
59 #define INAND_CMD38_ARG_TRIM 0x01
60 #define INAND_CMD38_ARG_SECERASE 0x80
61 #define INAND_CMD38_ARG_SECTRIM1 0x81
62 #define INAND_CMD38_ARG_SECTRIM2 0x88
63 #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
64 #define MMC_SANITIZE_REQ_TIMEOUT 240000
65 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
67 #define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
68 (rq_data_dir(req) == WRITE))
69 static DEFINE_MUTEX(block_mutex
);
72 * The defaults come from config options but can be overriden by module
75 static int perdev_minors
= CONFIG_MMC_BLOCK_MINORS
;
78 * We've only got one major, so number of mmcblk devices is
79 * limited to (1 << 20) / number of minors per device. It is also
80 * limited by the MAX_DEVICES below.
82 static int max_devices
;
84 #define MAX_DEVICES 256
86 static DEFINE_IDA(mmc_blk_ida
);
87 static DEFINE_SPINLOCK(mmc_blk_lock
);
90 * There is one mmc_blk_data per slot.
94 struct device
*parent
;
96 struct mmc_queue queue
;
97 struct list_head part
;
100 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
101 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
104 unsigned int read_only
;
105 unsigned int part_type
;
106 unsigned int reset_done
;
107 #define MMC_BLK_READ BIT(0)
108 #define MMC_BLK_WRITE BIT(1)
109 #define MMC_BLK_DISCARD BIT(2)
110 #define MMC_BLK_SECDISCARD BIT(3)
113 * Only set in main mmc_blk_data associated
114 * with mmc_card with dev_set_drvdata, and keeps
115 * track of the current selected device partition.
117 unsigned int part_curr
;
118 struct device_attribute force_ro
;
119 struct device_attribute power_ro_lock
;
123 static DEFINE_MUTEX(open_lock
);
125 module_param(perdev_minors
, int, 0444);
126 MODULE_PARM_DESC(perdev_minors
, "Minors numbers to allocate per device");
128 static inline int mmc_blk_part_switch(struct mmc_card
*card
,
129 struct mmc_blk_data
*md
);
130 static int get_card_status(struct mmc_card
*card
, u32
*status
, int retries
);
132 static struct mmc_blk_data
*mmc_blk_get(struct gendisk
*disk
)
134 struct mmc_blk_data
*md
;
136 mutex_lock(&open_lock
);
137 md
= disk
->private_data
;
138 if (md
&& md
->usage
== 0)
142 mutex_unlock(&open_lock
);
147 static inline int mmc_get_devidx(struct gendisk
*disk
)
149 int devidx
= disk
->first_minor
/ perdev_minors
;
153 static void mmc_blk_put(struct mmc_blk_data
*md
)
155 mutex_lock(&open_lock
);
157 if (md
->usage
== 0) {
158 int devidx
= mmc_get_devidx(md
->disk
);
159 blk_cleanup_queue(md
->queue
.queue
);
161 spin_lock(&mmc_blk_lock
);
162 ida_remove(&mmc_blk_ida
, devidx
);
163 spin_unlock(&mmc_blk_lock
);
168 mutex_unlock(&open_lock
);
171 static ssize_t
power_ro_lock_show(struct device
*dev
,
172 struct device_attribute
*attr
, char *buf
)
175 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
176 struct mmc_card
*card
= md
->queue
.card
;
179 if (card
->ext_csd
.boot_ro_lock
& EXT_CSD_BOOT_WP_B_PERM_WP_EN
)
181 else if (card
->ext_csd
.boot_ro_lock
& EXT_CSD_BOOT_WP_B_PWR_WP_EN
)
184 ret
= snprintf(buf
, PAGE_SIZE
, "%d\n", locked
);
191 static ssize_t
power_ro_lock_store(struct device
*dev
,
192 struct device_attribute
*attr
, const char *buf
, size_t count
)
195 struct mmc_blk_data
*md
, *part_md
;
196 struct mmc_card
*card
;
199 if (kstrtoul(buf
, 0, &set
))
205 md
= mmc_blk_get(dev_to_disk(dev
));
206 card
= md
->queue
.card
;
210 ret
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
, EXT_CSD_BOOT_WP
,
211 card
->ext_csd
.boot_ro_lock
|
212 EXT_CSD_BOOT_WP_B_PWR_WP_EN
,
213 card
->ext_csd
.part_time
);
215 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md
->disk
->disk_name
, ret
);
217 card
->ext_csd
.boot_ro_lock
|= EXT_CSD_BOOT_WP_B_PWR_WP_EN
;
222 pr_info("%s: Locking boot partition ro until next power on\n",
223 md
->disk
->disk_name
);
224 set_disk_ro(md
->disk
, 1);
226 list_for_each_entry(part_md
, &md
->part
, part
)
227 if (part_md
->area_type
== MMC_BLK_DATA_AREA_BOOT
) {
228 pr_info("%s: Locking boot partition ro until next power on\n", part_md
->disk
->disk_name
);
229 set_disk_ro(part_md
->disk
, 1);
237 static ssize_t
force_ro_show(struct device
*dev
, struct device_attribute
*attr
,
241 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
243 ret
= snprintf(buf
, PAGE_SIZE
, "%d\n",
244 get_disk_ro(dev_to_disk(dev
)) ^
250 static ssize_t
force_ro_store(struct device
*dev
, struct device_attribute
*attr
,
251 const char *buf
, size_t count
)
255 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
256 unsigned long set
= simple_strtoul(buf
, &end
, 0);
262 set_disk_ro(dev_to_disk(dev
), set
|| md
->read_only
);
269 static int mmc_blk_open(struct block_device
*bdev
, fmode_t mode
)
271 struct mmc_blk_data
*md
= mmc_blk_get(bdev
->bd_disk
);
274 mutex_lock(&block_mutex
);
277 check_disk_change(bdev
);
280 if ((mode
& FMODE_WRITE
) && md
->read_only
) {
285 mutex_unlock(&block_mutex
);
290 static void mmc_blk_release(struct gendisk
*disk
, fmode_t mode
)
292 struct mmc_blk_data
*md
= disk
->private_data
;
294 mutex_lock(&block_mutex
);
296 mutex_unlock(&block_mutex
);
300 mmc_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
302 geo
->cylinders
= get_capacity(bdev
->bd_disk
) / (4 * 16);
308 struct mmc_blk_ioc_data
{
309 struct mmc_ioc_cmd ic
;
314 static struct mmc_blk_ioc_data
*mmc_blk_ioctl_copy_from_user(
315 struct mmc_ioc_cmd __user
*user
)
317 struct mmc_blk_ioc_data
*idata
;
320 idata
= kmalloc(sizeof(*idata
), GFP_KERNEL
);
326 if (copy_from_user(&idata
->ic
, user
, sizeof(idata
->ic
))) {
331 idata
->buf_bytes
= (u64
) idata
->ic
.blksz
* idata
->ic
.blocks
;
332 if (idata
->buf_bytes
> MMC_IOC_MAX_BYTES
) {
337 if (!idata
->buf_bytes
) {
342 idata
->buf
= kmalloc(idata
->buf_bytes
, GFP_KERNEL
);
348 if (copy_from_user(idata
->buf
, (void __user
*)(unsigned long)
349 idata
->ic
.data_ptr
, idata
->buf_bytes
)) {
364 static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user
*ic_ptr
,
365 struct mmc_blk_ioc_data
*idata
)
367 struct mmc_ioc_cmd
*ic
= &idata
->ic
;
369 if (copy_to_user(&(ic_ptr
->response
), ic
->response
,
370 sizeof(ic
->response
)))
373 if (!idata
->ic
.write_flag
) {
374 if (copy_to_user((void __user
*)(unsigned long)ic
->data_ptr
,
375 idata
->buf
, idata
->buf_bytes
))
382 static int ioctl_rpmb_card_status_poll(struct mmc_card
*card
, u32
*status
,
388 if (!status
|| !retries_max
)
392 err
= get_card_status(card
, status
, 5);
396 if (!R1_STATUS(*status
) &&
397 (R1_CURRENT_STATE(*status
) != R1_STATE_PRG
))
398 break; /* RPMB programming operation complete */
401 * Rechedule to give the MMC device a chance to continue
402 * processing the previous command without being polled too
405 usleep_range(1000, 5000);
406 } while (++retry_count
< retries_max
);
408 if (retry_count
== retries_max
)
414 static int ioctl_do_sanitize(struct mmc_card
*card
)
418 if (!mmc_can_sanitize(card
)) {
419 pr_warn("%s: %s - SANITIZE is not supported\n",
420 mmc_hostname(card
->host
), __func__
);
425 pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
426 mmc_hostname(card
->host
), __func__
);
428 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
429 EXT_CSD_SANITIZE_START
, 1,
430 MMC_SANITIZE_REQ_TIMEOUT
);
433 pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
434 mmc_hostname(card
->host
), __func__
, err
);
436 pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card
->host
),
442 static int __mmc_blk_ioctl_cmd(struct mmc_card
*card
, struct mmc_blk_data
*md
,
443 struct mmc_blk_ioc_data
*idata
)
445 struct mmc_command cmd
= {0};
446 struct mmc_data data
= {0};
447 struct mmc_request mrq
= {NULL
};
448 struct scatterlist sg
;
453 if (!card
|| !md
|| !idata
)
456 if (md
->area_type
& MMC_BLK_DATA_AREA_RPMB
)
459 cmd
.opcode
= idata
->ic
.opcode
;
460 cmd
.arg
= idata
->ic
.arg
;
461 cmd
.flags
= idata
->ic
.flags
;
463 if (idata
->buf_bytes
) {
466 data
.blksz
= idata
->ic
.blksz
;
467 data
.blocks
= idata
->ic
.blocks
;
469 sg_init_one(data
.sg
, idata
->buf
, idata
->buf_bytes
);
471 if (idata
->ic
.write_flag
)
472 data
.flags
= MMC_DATA_WRITE
;
474 data
.flags
= MMC_DATA_READ
;
476 /* data.flags must already be set before doing this. */
477 mmc_set_data_timeout(&data
, card
);
479 /* Allow overriding the timeout_ns for empirical tuning. */
480 if (idata
->ic
.data_timeout_ns
)
481 data
.timeout_ns
= idata
->ic
.data_timeout_ns
;
483 if ((cmd
.flags
& MMC_RSP_R1B
) == MMC_RSP_R1B
) {
485 * Pretend this is a data transfer and rely on the
486 * host driver to compute timeout. When all host
487 * drivers support cmd.cmd_timeout for R1B, this
491 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
493 data
.timeout_ns
= idata
->ic
.cmd_timeout_ms
* 1000000;
501 err
= mmc_blk_part_switch(card
, md
);
505 if (idata
->ic
.is_acmd
) {
506 err
= mmc_app_cmd(card
->host
, card
);
512 err
= mmc_set_blockcount(card
, data
.blocks
,
513 idata
->ic
.write_flag
& (1 << 31));
518 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd
.arg
) == EXT_CSD_SANITIZE_START
) &&
519 (cmd
.opcode
== MMC_SWITCH
)) {
520 err
= ioctl_do_sanitize(card
);
523 pr_err("%s: ioctl_do_sanitize() failed. err = %d",
529 mmc_wait_for_req(card
->host
, &mrq
);
532 dev_err(mmc_dev(card
->host
), "%s: cmd error %d\n",
533 __func__
, cmd
.error
);
537 dev_err(mmc_dev(card
->host
), "%s: data error %d\n",
538 __func__
, data
.error
);
543 * According to the SD specs, some commands require a delay after
544 * issuing the command.
546 if (idata
->ic
.postsleep_min_us
)
547 usleep_range(idata
->ic
.postsleep_min_us
, idata
->ic
.postsleep_max_us
);
549 memcpy(&(idata
->ic
.response
), cmd
.resp
, sizeof(cmd
.resp
));
553 * Ensure RPMB command has completed by polling CMD13
556 err
= ioctl_rpmb_card_status_poll(card
, &status
, 5);
558 dev_err(mmc_dev(card
->host
),
559 "%s: Card Status=0x%08X, error %d\n",
560 __func__
, status
, err
);
566 static int mmc_blk_ioctl_cmd(struct block_device
*bdev
,
567 struct mmc_ioc_cmd __user
*ic_ptr
)
569 struct mmc_blk_ioc_data
*idata
;
570 struct mmc_blk_data
*md
;
571 struct mmc_card
*card
;
572 int err
= 0, ioc_err
= 0;
575 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
576 * whole block device, not on a partition. This prevents overspray
577 * between sibling partitions.
579 if ((!capable(CAP_SYS_RAWIO
)) || (bdev
!= bdev
->bd_contains
))
582 idata
= mmc_blk_ioctl_copy_from_user(ic_ptr
);
584 return PTR_ERR(idata
);
586 md
= mmc_blk_get(bdev
->bd_disk
);
592 card
= md
->queue
.card
;
600 ioc_err
= __mmc_blk_ioctl_cmd(card
, md
, idata
);
602 /* Always switch back to main area after RPMB access */
603 if (md
->area_type
& MMC_BLK_DATA_AREA_RPMB
)
604 mmc_blk_part_switch(card
, dev_get_drvdata(&card
->dev
));
608 err
= mmc_blk_ioctl_copy_to_user(ic_ptr
, idata
);
615 return ioc_err
? ioc_err
: err
;
618 static int mmc_blk_ioctl_multi_cmd(struct block_device
*bdev
,
619 struct mmc_ioc_multi_cmd __user
*user
)
621 struct mmc_blk_ioc_data
**idata
= NULL
;
622 struct mmc_ioc_cmd __user
*cmds
= user
->cmds
;
623 struct mmc_card
*card
;
624 struct mmc_blk_data
*md
;
625 int i
, err
= 0, ioc_err
= 0;
629 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
630 * whole block device, not on a partition. This prevents overspray
631 * between sibling partitions.
633 if ((!capable(CAP_SYS_RAWIO
)) || (bdev
!= bdev
->bd_contains
))
636 if (copy_from_user(&num_of_cmds
, &user
->num_of_cmds
,
637 sizeof(num_of_cmds
)))
640 if (num_of_cmds
> MMC_IOC_MAX_CMDS
)
643 idata
= kcalloc(num_of_cmds
, sizeof(*idata
), GFP_KERNEL
);
647 for (i
= 0; i
< num_of_cmds
; i
++) {
648 idata
[i
] = mmc_blk_ioctl_copy_from_user(&cmds
[i
]);
649 if (IS_ERR(idata
[i
])) {
650 err
= PTR_ERR(idata
[i
]);
656 md
= mmc_blk_get(bdev
->bd_disk
);
662 card
= md
->queue
.card
;
670 for (i
= 0; i
< num_of_cmds
&& !ioc_err
; i
++)
671 ioc_err
= __mmc_blk_ioctl_cmd(card
, md
, idata
[i
]);
673 /* Always switch back to main area after RPMB access */
674 if (md
->area_type
& MMC_BLK_DATA_AREA_RPMB
)
675 mmc_blk_part_switch(card
, dev_get_drvdata(&card
->dev
));
679 /* copy to user if data and response */
680 for (i
= 0; i
< num_of_cmds
&& !err
; i
++)
681 err
= mmc_blk_ioctl_copy_to_user(&cmds
[i
], idata
[i
]);
686 for (i
= 0; i
< num_of_cmds
; i
++) {
687 kfree(idata
[i
]->buf
);
691 return ioc_err
? ioc_err
: err
;
694 static int mmc_blk_ioctl(struct block_device
*bdev
, fmode_t mode
,
695 unsigned int cmd
, unsigned long arg
)
699 return mmc_blk_ioctl_cmd(bdev
,
700 (struct mmc_ioc_cmd __user
*)arg
);
701 case MMC_IOC_MULTI_CMD
:
702 return mmc_blk_ioctl_multi_cmd(bdev
,
703 (struct mmc_ioc_multi_cmd __user
*)arg
);
710 static int mmc_blk_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
711 unsigned int cmd
, unsigned long arg
)
713 return mmc_blk_ioctl(bdev
, mode
, cmd
, (unsigned long) compat_ptr(arg
));
717 static const struct block_device_operations mmc_bdops
= {
718 .open
= mmc_blk_open
,
719 .release
= mmc_blk_release
,
720 .getgeo
= mmc_blk_getgeo
,
721 .owner
= THIS_MODULE
,
722 .ioctl
= mmc_blk_ioctl
,
724 .compat_ioctl
= mmc_blk_compat_ioctl
,
728 static inline int mmc_blk_part_switch(struct mmc_card
*card
,
729 struct mmc_blk_data
*md
)
732 struct mmc_blk_data
*main_md
= dev_get_drvdata(&card
->dev
);
734 if (main_md
->part_curr
== md
->part_type
)
737 if (mmc_card_mmc(card
)) {
738 u8 part_config
= card
->ext_csd
.part_config
;
740 if (md
->part_type
== EXT_CSD_PART_CONFIG_ACC_RPMB
)
741 mmc_retune_pause(card
->host
);
743 part_config
&= ~EXT_CSD_PART_CONFIG_ACC_MASK
;
744 part_config
|= md
->part_type
;
746 ret
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
747 EXT_CSD_PART_CONFIG
, part_config
,
748 card
->ext_csd
.part_time
);
750 if (md
->part_type
== EXT_CSD_PART_CONFIG_ACC_RPMB
)
751 mmc_retune_unpause(card
->host
);
755 card
->ext_csd
.part_config
= part_config
;
757 if (main_md
->part_curr
== EXT_CSD_PART_CONFIG_ACC_RPMB
)
758 mmc_retune_unpause(card
->host
);
761 main_md
->part_curr
= md
->part_type
;
765 static u32
mmc_sd_num_wr_blocks(struct mmc_card
*card
)
771 struct mmc_request mrq
= {NULL
};
772 struct mmc_command cmd
= {0};
773 struct mmc_data data
= {0};
775 struct scatterlist sg
;
777 cmd
.opcode
= MMC_APP_CMD
;
778 cmd
.arg
= card
->rca
<< 16;
779 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
781 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
784 if (!mmc_host_is_spi(card
->host
) && !(cmd
.resp
[0] & R1_APP_CMD
))
787 memset(&cmd
, 0, sizeof(struct mmc_command
));
789 cmd
.opcode
= SD_APP_SEND_NUM_WR_BLKS
;
791 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
795 data
.flags
= MMC_DATA_READ
;
798 mmc_set_data_timeout(&data
, card
);
803 blocks
= kmalloc(4, GFP_KERNEL
);
807 sg_init_one(&sg
, blocks
, 4);
809 mmc_wait_for_req(card
->host
, &mrq
);
811 result
= ntohl(*blocks
);
814 if (cmd
.error
|| data
.error
)
820 static int get_card_status(struct mmc_card
*card
, u32
*status
, int retries
)
822 struct mmc_command cmd
= {0};
825 cmd
.opcode
= MMC_SEND_STATUS
;
826 if (!mmc_host_is_spi(card
->host
))
827 cmd
.arg
= card
->rca
<< 16;
828 cmd
.flags
= MMC_RSP_SPI_R2
| MMC_RSP_R1
| MMC_CMD_AC
;
829 err
= mmc_wait_for_cmd(card
->host
, &cmd
, retries
);
831 *status
= cmd
.resp
[0];
835 static int card_busy_detect(struct mmc_card
*card
, unsigned int timeout_ms
,
836 bool hw_busy_detect
, struct request
*req
, bool *gen_err
)
838 unsigned long timeout
= jiffies
+ msecs_to_jiffies(timeout_ms
);
843 err
= get_card_status(card
, &status
, 5);
845 pr_err("%s: error %d requesting status\n",
846 req
->rq_disk
->disk_name
, err
);
850 if (status
& R1_ERROR
) {
851 pr_err("%s: %s: error sending status cmd, status %#x\n",
852 req
->rq_disk
->disk_name
, __func__
, status
);
856 /* We may rely on the host hw to handle busy detection.*/
857 if ((card
->host
->caps
& MMC_CAP_WAIT_WHILE_BUSY
) &&
862 * Timeout if the device never becomes ready for data and never
863 * leaves the program state.
865 if (time_after(jiffies
, timeout
)) {
866 pr_err("%s: Card stuck in programming state! %s %s\n",
867 mmc_hostname(card
->host
),
868 req
->rq_disk
->disk_name
, __func__
);
873 * Some cards mishandle the status bits,
874 * so make sure to check both the busy
875 * indication and the card state.
877 } while (!(status
& R1_READY_FOR_DATA
) ||
878 (R1_CURRENT_STATE(status
) == R1_STATE_PRG
));
883 static int send_stop(struct mmc_card
*card
, unsigned int timeout_ms
,
884 struct request
*req
, bool *gen_err
, u32
*stop_status
)
886 struct mmc_host
*host
= card
->host
;
887 struct mmc_command cmd
= {0};
889 bool use_r1b_resp
= rq_data_dir(req
) == WRITE
;
892 * Normally we use R1B responses for WRITE, but in cases where the host
893 * has specified a max_busy_timeout we need to validate it. A failure
894 * means we need to prevent the host from doing hw busy detection, which
895 * is done by converting to a R1 response instead.
897 if (host
->max_busy_timeout
&& (timeout_ms
> host
->max_busy_timeout
))
898 use_r1b_resp
= false;
900 cmd
.opcode
= MMC_STOP_TRANSMISSION
;
902 cmd
.flags
= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
| MMC_CMD_AC
;
903 cmd
.busy_timeout
= timeout_ms
;
905 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
908 err
= mmc_wait_for_cmd(host
, &cmd
, 5);
912 *stop_status
= cmd
.resp
[0];
914 /* No need to check card status in case of READ. */
915 if (rq_data_dir(req
) == READ
)
918 if (!mmc_host_is_spi(host
) &&
919 (*stop_status
& R1_ERROR
)) {
920 pr_err("%s: %s: general error sending stop command, resp %#x\n",
921 req
->rq_disk
->disk_name
, __func__
, *stop_status
);
925 return card_busy_detect(card
, timeout_ms
, use_r1b_resp
, req
, gen_err
);
928 #define ERR_NOMEDIUM 3
931 #define ERR_CONTINUE 0
933 static int mmc_blk_cmd_error(struct request
*req
, const char *name
, int error
,
934 bool status_valid
, u32 status
)
938 /* response crc error, retry the r/w cmd */
939 pr_err("%s: %s sending %s command, card status %#x\n",
940 req
->rq_disk
->disk_name
, "response CRC error",
945 pr_err("%s: %s sending %s command, card status %#x\n",
946 req
->rq_disk
->disk_name
, "timed out", name
, status
);
948 /* If the status cmd initially failed, retry the r/w cmd */
950 pr_err("%s: status not valid, retrying timeout\n",
951 req
->rq_disk
->disk_name
);
956 * If it was a r/w cmd crc error, or illegal command
957 * (eg, issued in wrong state) then retry - we should
958 * have corrected the state problem above.
960 if (status
& (R1_COM_CRC_ERROR
| R1_ILLEGAL_COMMAND
)) {
961 pr_err("%s: command error, retrying timeout\n",
962 req
->rq_disk
->disk_name
);
966 /* Otherwise abort the command */
970 /* We don't understand the error code the driver gave us */
971 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
972 req
->rq_disk
->disk_name
, error
, status
);
978 * Initial r/w and stop cmd error recovery.
979 * We don't know whether the card received the r/w cmd or not, so try to
980 * restore things back to a sane state. Essentially, we do this as follows:
981 * - Obtain card status. If the first attempt to obtain card status fails,
982 * the status word will reflect the failed status cmd, not the failed
983 * r/w cmd. If we fail to obtain card status, it suggests we can no
984 * longer communicate with the card.
985 * - Check the card state. If the card received the cmd but there was a
986 * transient problem with the response, it might still be in a data transfer
987 * mode. Try to send it a stop command. If this fails, we can't recover.
988 * - If the r/w cmd failed due to a response CRC error, it was probably
989 * transient, so retry the cmd.
990 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
991 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
992 * illegal cmd, retry.
993 * Otherwise we don't understand what happened, so abort.
995 static int mmc_blk_cmd_recovery(struct mmc_card
*card
, struct request
*req
,
996 struct mmc_blk_request
*brq
, bool *ecc_err
, bool *gen_err
)
998 bool prev_cmd_status_valid
= true;
999 u32 status
, stop_status
= 0;
1002 if (mmc_card_removed(card
))
1003 return ERR_NOMEDIUM
;
1006 * Try to get card status which indicates both the card state
1007 * and why there was no response. If the first attempt fails,
1008 * we can't be sure the returned status is for the r/w command.
1010 for (retry
= 2; retry
>= 0; retry
--) {
1011 err
= get_card_status(card
, &status
, 0);
1015 /* Re-tune if needed */
1016 mmc_retune_recheck(card
->host
);
1018 prev_cmd_status_valid
= false;
1019 pr_err("%s: error %d sending status command, %sing\n",
1020 req
->rq_disk
->disk_name
, err
, retry
? "retry" : "abort");
1023 /* We couldn't get a response from the card. Give up. */
1025 /* Check if the card is removed */
1026 if (mmc_detect_card_removed(card
->host
))
1027 return ERR_NOMEDIUM
;
1031 /* Flag ECC errors */
1032 if ((status
& R1_CARD_ECC_FAILED
) ||
1033 (brq
->stop
.resp
[0] & R1_CARD_ECC_FAILED
) ||
1034 (brq
->cmd
.resp
[0] & R1_CARD_ECC_FAILED
))
1037 /* Flag General errors */
1038 if (!mmc_host_is_spi(card
->host
) && rq_data_dir(req
) != READ
)
1039 if ((status
& R1_ERROR
) ||
1040 (brq
->stop
.resp
[0] & R1_ERROR
)) {
1041 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
1042 req
->rq_disk
->disk_name
, __func__
,
1043 brq
->stop
.resp
[0], status
);
1048 * Check the current card state. If it is in some data transfer
1049 * mode, tell it to stop (and hopefully transition back to TRAN.)
1051 if (R1_CURRENT_STATE(status
) == R1_STATE_DATA
||
1052 R1_CURRENT_STATE(status
) == R1_STATE_RCV
) {
1053 err
= send_stop(card
,
1054 DIV_ROUND_UP(brq
->data
.timeout_ns
, 1000000),
1055 req
, gen_err
, &stop_status
);
1057 pr_err("%s: error %d sending stop command\n",
1058 req
->rq_disk
->disk_name
, err
);
1060 * If the stop cmd also timed out, the card is probably
1061 * not present, so abort. Other errors are bad news too.
1066 if (stop_status
& R1_CARD_ECC_FAILED
)
1070 /* Check for set block count errors */
1072 return mmc_blk_cmd_error(req
, "SET_BLOCK_COUNT", brq
->sbc
.error
,
1073 prev_cmd_status_valid
, status
);
1075 /* Check for r/w command errors */
1077 return mmc_blk_cmd_error(req
, "r/w cmd", brq
->cmd
.error
,
1078 prev_cmd_status_valid
, status
);
1081 if (!brq
->stop
.error
)
1082 return ERR_CONTINUE
;
1084 /* Now for stop errors. These aren't fatal to the transfer. */
1085 pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
1086 req
->rq_disk
->disk_name
, brq
->stop
.error
,
1087 brq
->cmd
.resp
[0], status
);
1090 * Subsitute in our own stop status as this will give the error
1091 * state which happened during the execution of the r/w command.
1094 brq
->stop
.resp
[0] = stop_status
;
1095 brq
->stop
.error
= 0;
1097 return ERR_CONTINUE
;
1100 static int mmc_blk_reset(struct mmc_blk_data
*md
, struct mmc_host
*host
,
1105 if (md
->reset_done
& type
)
1108 md
->reset_done
|= type
;
1109 err
= mmc_hw_reset(host
);
1110 /* Ensure we switch back to the correct partition */
1111 if (err
!= -EOPNOTSUPP
) {
1112 struct mmc_blk_data
*main_md
=
1113 dev_get_drvdata(&host
->card
->dev
);
1116 main_md
->part_curr
= main_md
->part_type
;
1117 part_err
= mmc_blk_part_switch(host
->card
, md
);
1120 * We have failed to get back into the correct
1121 * partition, so we need to abort the whole request.
1129 static inline void mmc_blk_reset_success(struct mmc_blk_data
*md
, int type
)
1131 md
->reset_done
&= ~type
;
1134 int mmc_access_rpmb(struct mmc_queue
*mq
)
1136 struct mmc_blk_data
*md
= mq
->blkdata
;
1138 * If this is a RPMB partition access, return ture
1140 if (md
&& md
->part_type
== EXT_CSD_PART_CONFIG_ACC_RPMB
)
1146 static int mmc_blk_issue_discard_rq(struct mmc_queue
*mq
, struct request
*req
)
1148 struct mmc_blk_data
*md
= mq
->blkdata
;
1149 struct mmc_card
*card
= md
->queue
.card
;
1150 unsigned int from
, nr
, arg
;
1151 int err
= 0, type
= MMC_BLK_DISCARD
;
1153 if (!mmc_can_erase(card
)) {
1158 from
= blk_rq_pos(req
);
1159 nr
= blk_rq_sectors(req
);
1161 if (mmc_can_discard(card
))
1162 arg
= MMC_DISCARD_ARG
;
1163 else if (mmc_can_trim(card
))
1166 arg
= MMC_ERASE_ARG
;
1168 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
1169 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1170 INAND_CMD38_ARG_EXT_CSD
,
1171 arg
== MMC_TRIM_ARG
?
1172 INAND_CMD38_ARG_TRIM
:
1173 INAND_CMD38_ARG_ERASE
,
1178 err
= mmc_erase(card
, from
, nr
, arg
);
1180 if (err
== -EIO
&& !mmc_blk_reset(md
, card
->host
, type
))
1183 mmc_blk_reset_success(md
, type
);
1184 blk_end_request(req
, err
, blk_rq_bytes(req
));
1189 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue
*mq
,
1190 struct request
*req
)
1192 struct mmc_blk_data
*md
= mq
->blkdata
;
1193 struct mmc_card
*card
= md
->queue
.card
;
1194 unsigned int from
, nr
, arg
;
1195 int err
= 0, type
= MMC_BLK_SECDISCARD
;
1197 if (!(mmc_can_secure_erase_trim(card
))) {
1202 from
= blk_rq_pos(req
);
1203 nr
= blk_rq_sectors(req
);
1205 if (mmc_can_trim(card
) && !mmc_erase_group_aligned(card
, from
, nr
))
1206 arg
= MMC_SECURE_TRIM1_ARG
;
1208 arg
= MMC_SECURE_ERASE_ARG
;
1211 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
1212 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1213 INAND_CMD38_ARG_EXT_CSD
,
1214 arg
== MMC_SECURE_TRIM1_ARG
?
1215 INAND_CMD38_ARG_SECTRIM1
:
1216 INAND_CMD38_ARG_SECERASE
,
1222 err
= mmc_erase(card
, from
, nr
, arg
);
1228 if (arg
== MMC_SECURE_TRIM1_ARG
) {
1229 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
1230 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1231 INAND_CMD38_ARG_EXT_CSD
,
1232 INAND_CMD38_ARG_SECTRIM2
,
1238 err
= mmc_erase(card
, from
, nr
, MMC_SECURE_TRIM2_ARG
);
1246 if (err
&& !mmc_blk_reset(md
, card
->host
, type
))
1249 mmc_blk_reset_success(md
, type
);
1251 blk_end_request(req
, err
, blk_rq_bytes(req
));
1256 static int mmc_blk_issue_flush(struct mmc_queue
*mq
, struct request
*req
)
1258 struct mmc_blk_data
*md
= mq
->blkdata
;
1259 struct mmc_card
*card
= md
->queue
.card
;
1262 ret
= mmc_flush_cache(card
);
1266 blk_end_request_all(req
, ret
);
1272 * Reformat current write as a reliable write, supporting
1273 * both legacy and the enhanced reliable write MMC cards.
1274 * In each transfer we'll handle only as much as a single
1275 * reliable write can handle, thus finish the request in
1276 * partial completions.
1278 static inline void mmc_apply_rel_rw(struct mmc_blk_request
*brq
,
1279 struct mmc_card
*card
,
1280 struct request
*req
)
1282 if (!(card
->ext_csd
.rel_param
& EXT_CSD_WR_REL_PARAM_EN
)) {
1283 /* Legacy mode imposes restrictions on transfers. */
1284 if (!IS_ALIGNED(brq
->cmd
.arg
, card
->ext_csd
.rel_sectors
))
1285 brq
->data
.blocks
= 1;
1287 if (brq
->data
.blocks
> card
->ext_csd
.rel_sectors
)
1288 brq
->data
.blocks
= card
->ext_csd
.rel_sectors
;
1289 else if (brq
->data
.blocks
< card
->ext_csd
.rel_sectors
)
1290 brq
->data
.blocks
= 1;
1294 #define CMD_ERRORS \
1295 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
1296 R1_ADDRESS_ERROR | /* Misaligned address */ \
1297 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
1298 R1_WP_VIOLATION | /* Tried to write to protected block */ \
1299 R1_CC_ERROR | /* Card controller error */ \
1300 R1_ERROR) /* General/unknown error */
1302 static enum mmc_blk_status
mmc_blk_err_check(struct mmc_card
*card
,
1303 struct mmc_async_req
*areq
)
1305 struct mmc_queue_req
*mq_mrq
= container_of(areq
, struct mmc_queue_req
,
1307 struct mmc_blk_request
*brq
= &mq_mrq
->brq
;
1308 struct request
*req
= mq_mrq
->req
;
1309 int need_retune
= card
->host
->need_retune
;
1310 bool ecc_err
= false;
1311 bool gen_err
= false;
1314 * sbc.error indicates a problem with the set block count
1315 * command. No data will have been transferred.
1317 * cmd.error indicates a problem with the r/w command. No
1318 * data will have been transferred.
1320 * stop.error indicates a problem with the stop command. Data
1321 * may have been transferred, or may still be transferring.
1323 if (brq
->sbc
.error
|| brq
->cmd
.error
|| brq
->stop
.error
||
1325 switch (mmc_blk_cmd_recovery(card
, req
, brq
, &ecc_err
, &gen_err
)) {
1327 return MMC_BLK_RETRY
;
1329 return MMC_BLK_ABORT
;
1331 return MMC_BLK_NOMEDIUM
;
1338 * Check for errors relating to the execution of the
1339 * initial command - such as address errors. No data
1340 * has been transferred.
1342 if (brq
->cmd
.resp
[0] & CMD_ERRORS
) {
1343 pr_err("%s: r/w command failed, status = %#x\n",
1344 req
->rq_disk
->disk_name
, brq
->cmd
.resp
[0]);
1345 return MMC_BLK_ABORT
;
1349 * Everything else is either success, or a data error of some
1350 * kind. If it was a write, we may have transitioned to
1351 * program mode, which we have to wait for it to complete.
1353 if (!mmc_host_is_spi(card
->host
) && rq_data_dir(req
) != READ
) {
1356 /* Check stop command response */
1357 if (brq
->stop
.resp
[0] & R1_ERROR
) {
1358 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1359 req
->rq_disk
->disk_name
, __func__
,
1364 err
= card_busy_detect(card
, MMC_BLK_TIMEOUT_MS
, false, req
,
1367 return MMC_BLK_CMD_ERR
;
1370 /* if general error occurs, retry the write operation. */
1372 pr_warn("%s: retrying write for general error\n",
1373 req
->rq_disk
->disk_name
);
1374 return MMC_BLK_RETRY
;
1377 if (brq
->data
.error
) {
1378 if (need_retune
&& !brq
->retune_retry_done
) {
1379 pr_debug("%s: retrying because a re-tune was needed\n",
1380 req
->rq_disk
->disk_name
);
1381 brq
->retune_retry_done
= 1;
1382 return MMC_BLK_RETRY
;
1384 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1385 req
->rq_disk
->disk_name
, brq
->data
.error
,
1386 (unsigned)blk_rq_pos(req
),
1387 (unsigned)blk_rq_sectors(req
),
1388 brq
->cmd
.resp
[0], brq
->stop
.resp
[0]);
1390 if (rq_data_dir(req
) == READ
) {
1392 return MMC_BLK_ECC_ERR
;
1393 return MMC_BLK_DATA_ERR
;
1395 return MMC_BLK_CMD_ERR
;
1399 if (!brq
->data
.bytes_xfered
)
1400 return MMC_BLK_RETRY
;
1402 if (blk_rq_bytes(req
) != brq
->data
.bytes_xfered
)
1403 return MMC_BLK_PARTIAL
;
1405 return MMC_BLK_SUCCESS
;
1408 static void mmc_blk_rw_rq_prep(struct mmc_queue_req
*mqrq
,
1409 struct mmc_card
*card
,
1411 struct mmc_queue
*mq
)
1413 u32 readcmd
, writecmd
;
1414 struct mmc_blk_request
*brq
= &mqrq
->brq
;
1415 struct request
*req
= mqrq
->req
;
1416 struct mmc_blk_data
*md
= mq
->blkdata
;
1420 * Reliable writes are used to implement Forced Unit Access and
1421 * are supported only on MMCs.
1423 bool do_rel_wr
= (req
->cmd_flags
& REQ_FUA
) &&
1424 (rq_data_dir(req
) == WRITE
) &&
1425 (md
->flags
& MMC_BLK_REL_WR
);
1427 memset(brq
, 0, sizeof(struct mmc_blk_request
));
1428 brq
->mrq
.cmd
= &brq
->cmd
;
1429 brq
->mrq
.data
= &brq
->data
;
1431 brq
->cmd
.arg
= blk_rq_pos(req
);
1432 if (!mmc_card_blockaddr(card
))
1434 brq
->cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
1435 brq
->data
.blksz
= 512;
1436 brq
->stop
.opcode
= MMC_STOP_TRANSMISSION
;
1438 brq
->data
.blocks
= blk_rq_sectors(req
);
1441 * The block layer doesn't support all sector count
1442 * restrictions, so we need to be prepared for too big
1445 if (brq
->data
.blocks
> card
->host
->max_blk_count
)
1446 brq
->data
.blocks
= card
->host
->max_blk_count
;
1448 if (brq
->data
.blocks
> 1) {
1450 * After a read error, we redo the request one sector
1451 * at a time in order to accurately determine which
1452 * sectors can be read successfully.
1455 brq
->data
.blocks
= 1;
1458 * Some controllers have HW issues while operating
1459 * in multiple I/O mode
1461 if (card
->host
->ops
->multi_io_quirk
)
1462 brq
->data
.blocks
= card
->host
->ops
->multi_io_quirk(card
,
1463 (rq_data_dir(req
) == READ
) ?
1464 MMC_DATA_READ
: MMC_DATA_WRITE
,
1468 if (brq
->data
.blocks
> 1 || do_rel_wr
) {
1469 /* SPI multiblock writes terminate using a special
1470 * token, not a STOP_TRANSMISSION request.
1472 if (!mmc_host_is_spi(card
->host
) ||
1473 rq_data_dir(req
) == READ
)
1474 brq
->mrq
.stop
= &brq
->stop
;
1475 readcmd
= MMC_READ_MULTIPLE_BLOCK
;
1476 writecmd
= MMC_WRITE_MULTIPLE_BLOCK
;
1478 brq
->mrq
.stop
= NULL
;
1479 readcmd
= MMC_READ_SINGLE_BLOCK
;
1480 writecmd
= MMC_WRITE_BLOCK
;
1482 if (rq_data_dir(req
) == READ
) {
1483 brq
->cmd
.opcode
= readcmd
;
1484 brq
->data
.flags
= MMC_DATA_READ
;
1486 brq
->stop
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
|
1489 brq
->cmd
.opcode
= writecmd
;
1490 brq
->data
.flags
= MMC_DATA_WRITE
;
1492 brq
->stop
.flags
= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
|
1497 mmc_apply_rel_rw(brq
, card
, req
);
1500 * Data tag is used only during writing meta data to speed
1501 * up write and any subsequent read of this meta data
1503 do_data_tag
= (card
->ext_csd
.data_tag_unit_size
) &&
1504 (req
->cmd_flags
& REQ_META
) &&
1505 (rq_data_dir(req
) == WRITE
) &&
1506 ((brq
->data
.blocks
* brq
->data
.blksz
) >=
1507 card
->ext_csd
.data_tag_unit_size
);
1510 * Pre-defined multi-block transfers are preferable to
1511 * open ended-ones (and necessary for reliable writes).
1512 * However, it is not sufficient to just send CMD23,
1513 * and avoid the final CMD12, as on an error condition
1514 * CMD12 (stop) needs to be sent anyway. This, coupled
1515 * with Auto-CMD23 enhancements provided by some
1516 * hosts, means that the complexity of dealing
1517 * with this is best left to the host. If CMD23 is
1518 * supported by card and host, we'll fill sbc in and let
1519 * the host deal with handling it correctly. This means
1520 * that for hosts that don't expose MMC_CAP_CMD23, no
1521 * change of behavior will be observed.
1523 * N.B: Some MMC cards experience perf degradation.
1524 * We'll avoid using CMD23-bounded multiblock writes for
1525 * these, while retaining features like reliable writes.
1527 if ((md
->flags
& MMC_BLK_CMD23
) && mmc_op_multi(brq
->cmd
.opcode
) &&
1528 (do_rel_wr
|| !(card
->quirks
& MMC_QUIRK_BLK_NO_CMD23
) ||
1530 brq
->sbc
.opcode
= MMC_SET_BLOCK_COUNT
;
1531 brq
->sbc
.arg
= brq
->data
.blocks
|
1532 (do_rel_wr
? (1 << 31) : 0) |
1533 (do_data_tag
? (1 << 29) : 0);
1534 brq
->sbc
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
1535 brq
->mrq
.sbc
= &brq
->sbc
;
1538 mmc_set_data_timeout(&brq
->data
, card
);
1540 brq
->data
.sg
= mqrq
->sg
;
1541 brq
->data
.sg_len
= mmc_queue_map_sg(mq
, mqrq
);
1544 * Adjust the sg list so it is the same size as the
1547 if (brq
->data
.blocks
!= blk_rq_sectors(req
)) {
1548 int i
, data_size
= brq
->data
.blocks
<< 9;
1549 struct scatterlist
*sg
;
1551 for_each_sg(brq
->data
.sg
, sg
, brq
->data
.sg_len
, i
) {
1552 data_size
-= sg
->length
;
1553 if (data_size
<= 0) {
1554 sg
->length
+= data_size
;
1559 brq
->data
.sg_len
= i
;
1562 mqrq
->mmc_active
.mrq
= &brq
->mrq
;
1563 mqrq
->mmc_active
.err_check
= mmc_blk_err_check
;
1565 mmc_queue_bounce_pre(mqrq
);
1568 static int mmc_blk_cmd_err(struct mmc_blk_data
*md
, struct mmc_card
*card
,
1569 struct mmc_blk_request
*brq
, struct request
*req
,
1572 struct mmc_queue_req
*mq_rq
;
1573 mq_rq
= container_of(brq
, struct mmc_queue_req
, brq
);
1576 * If this is an SD card and we're writing, we can first
1577 * mark the known good sectors as ok.
1579 * If the card is not SD, we can still ok written sectors
1580 * as reported by the controller (which might be less than
1581 * the real number of written sectors, but never more).
1583 if (mmc_card_sd(card
)) {
1586 blocks
= mmc_sd_num_wr_blocks(card
);
1587 if (blocks
!= (u32
)-1) {
1588 ret
= blk_end_request(req
, 0, blocks
<< 9);
1591 ret
= blk_end_request(req
, 0, brq
->data
.bytes_xfered
);
1596 static int mmc_blk_issue_rw_rq(struct mmc_queue
*mq
, struct request
*rqc
)
1598 struct mmc_blk_data
*md
= mq
->blkdata
;
1599 struct mmc_card
*card
= md
->queue
.card
;
1600 struct mmc_blk_request
*brq
;
1601 int ret
= 1, disable_multi
= 0, retry
= 0, type
, retune_retry_done
= 0;
1602 enum mmc_blk_status status
;
1603 struct mmc_queue_req
*mq_rq
;
1604 struct request
*req
;
1605 struct mmc_async_req
*areq
;
1607 if (!rqc
&& !mq
->mqrq_prev
->req
)
1613 * When 4KB native sector is enabled, only 8 blocks
1614 * multiple read or write is allowed
1616 if (mmc_large_sector(card
) &&
1617 !IS_ALIGNED(blk_rq_sectors(rqc
), 8)) {
1618 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1619 rqc
->rq_disk
->disk_name
);
1620 mq_rq
= mq
->mqrq_cur
;
1626 mmc_blk_rw_rq_prep(mq
->mqrq_cur
, card
, 0, mq
);
1627 areq
= &mq
->mqrq_cur
->mmc_active
;
1630 areq
= mmc_start_req(card
->host
, areq
, &status
);
1632 if (status
== MMC_BLK_NEW_REQUEST
)
1633 mq
->flags
|= MMC_QUEUE_NEW_REQUEST
;
1637 mq_rq
= container_of(areq
, struct mmc_queue_req
, mmc_active
);
1640 type
= rq_data_dir(req
) == READ
? MMC_BLK_READ
: MMC_BLK_WRITE
;
1641 mmc_queue_bounce_post(mq_rq
);
1644 case MMC_BLK_SUCCESS
:
1645 case MMC_BLK_PARTIAL
:
1647 * A block was successfully transferred.
1649 mmc_blk_reset_success(md
, type
);
1651 ret
= blk_end_request(req
, 0,
1652 brq
->data
.bytes_xfered
);
1655 * If the blk_end_request function returns non-zero even
1656 * though all data has been transferred and no errors
1657 * were returned by the host controller, it's a bug.
1659 if (status
== MMC_BLK_SUCCESS
&& ret
) {
1660 pr_err("%s BUG rq_tot %d d_xfer %d\n",
1661 __func__
, blk_rq_bytes(req
),
1662 brq
->data
.bytes_xfered
);
1667 case MMC_BLK_CMD_ERR
:
1668 ret
= mmc_blk_cmd_err(md
, card
, brq
, req
, ret
);
1669 if (mmc_blk_reset(md
, card
->host
, type
))
1675 retune_retry_done
= brq
->retune_retry_done
;
1680 if (!mmc_blk_reset(md
, card
->host
, type
))
1683 case MMC_BLK_DATA_ERR
: {
1686 err
= mmc_blk_reset(md
, card
->host
, type
);
1693 case MMC_BLK_ECC_ERR
:
1694 if (brq
->data
.blocks
> 1) {
1695 /* Redo read one sector at a time */
1696 pr_warn("%s: retrying using single block read\n",
1697 req
->rq_disk
->disk_name
);
1702 * After an error, we redo I/O one sector at a
1703 * time, so we only reach here after trying to
1704 * read a single sector.
1706 ret
= blk_end_request(req
, -EIO
,
1711 case MMC_BLK_NOMEDIUM
:
1714 pr_err("%s: Unhandled return value (%d)",
1715 req
->rq_disk
->disk_name
, status
);
1721 * In case of a incomplete request
1722 * prepare it again and resend.
1724 mmc_blk_rw_rq_prep(mq_rq
, card
,
1726 mmc_start_req(card
->host
,
1727 &mq_rq
->mmc_active
, NULL
);
1728 mq_rq
->brq
.retune_retry_done
= retune_retry_done
;
1735 if (mmc_card_removed(card
))
1736 req
->rq_flags
|= RQF_QUIET
;
1738 ret
= blk_end_request(req
, -EIO
,
1739 blk_rq_cur_bytes(req
));
1743 if (mmc_card_removed(card
)) {
1744 rqc
->rq_flags
|= RQF_QUIET
;
1745 blk_end_request_all(rqc
, -EIO
);
1747 mmc_blk_rw_rq_prep(mq
->mqrq_cur
, card
, 0, mq
);
1748 mmc_start_req(card
->host
,
1749 &mq
->mqrq_cur
->mmc_active
, NULL
);
1756 int mmc_blk_issue_rq(struct mmc_queue
*mq
, struct request
*req
)
1759 struct mmc_blk_data
*md
= mq
->blkdata
;
1760 struct mmc_card
*card
= md
->queue
.card
;
1761 bool req_is_special
= mmc_req_is_special(req
);
1763 if (req
&& !mq
->mqrq_prev
->req
)
1764 /* claim host only for the first request */
1767 ret
= mmc_blk_part_switch(card
, md
);
1770 blk_end_request_all(req
, -EIO
);
1776 mq
->flags
&= ~MMC_QUEUE_NEW_REQUEST
;
1777 if (req
&& req_op(req
) == REQ_OP_DISCARD
) {
1778 /* complete ongoing async transfer before issuing discard */
1779 if (card
->host
->areq
)
1780 mmc_blk_issue_rw_rq(mq
, NULL
);
1781 ret
= mmc_blk_issue_discard_rq(mq
, req
);
1782 } else if (req
&& req_op(req
) == REQ_OP_SECURE_ERASE
) {
1783 /* complete ongoing async transfer before issuing secure erase*/
1784 if (card
->host
->areq
)
1785 mmc_blk_issue_rw_rq(mq
, NULL
);
1786 ret
= mmc_blk_issue_secdiscard_rq(mq
, req
);
1787 } else if (req
&& req_op(req
) == REQ_OP_FLUSH
) {
1788 /* complete ongoing async transfer before issuing flush */
1789 if (card
->host
->areq
)
1790 mmc_blk_issue_rw_rq(mq
, NULL
);
1791 ret
= mmc_blk_issue_flush(mq
, req
);
1793 ret
= mmc_blk_issue_rw_rq(mq
, req
);
1797 if ((!req
&& !(mq
->flags
& MMC_QUEUE_NEW_REQUEST
)) || req_is_special
)
1799 * Release host when there are no more requests
1800 * and after special request(discard, flush) is done.
1801 * In case sepecial request, there is no reentry to
1802 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
1808 static inline int mmc_blk_readonly(struct mmc_card
*card
)
1810 return mmc_card_readonly(card
) ||
1811 !(card
->csd
.cmdclass
& CCC_BLOCK_WRITE
);
1814 static struct mmc_blk_data
*mmc_blk_alloc_req(struct mmc_card
*card
,
1815 struct device
*parent
,
1818 const char *subname
,
1821 struct mmc_blk_data
*md
;
1825 if (!ida_pre_get(&mmc_blk_ida
, GFP_KERNEL
))
1826 return ERR_PTR(-ENOMEM
);
1828 spin_lock(&mmc_blk_lock
);
1829 ret
= ida_get_new(&mmc_blk_ida
, &devidx
);
1830 spin_unlock(&mmc_blk_lock
);
1835 return ERR_PTR(ret
);
1837 if (devidx
>= max_devices
) {
1842 md
= kzalloc(sizeof(struct mmc_blk_data
), GFP_KERNEL
);
1848 md
->area_type
= area_type
;
1851 * Set the read-only status based on the supported commands
1852 * and the write protect switch.
1854 md
->read_only
= mmc_blk_readonly(card
);
1856 md
->disk
= alloc_disk(perdev_minors
);
1857 if (md
->disk
== NULL
) {
1862 spin_lock_init(&md
->lock
);
1863 INIT_LIST_HEAD(&md
->part
);
1866 ret
= mmc_init_queue(&md
->queue
, card
, &md
->lock
, subname
);
1870 md
->queue
.blkdata
= md
;
1872 md
->disk
->major
= MMC_BLOCK_MAJOR
;
1873 md
->disk
->first_minor
= devidx
* perdev_minors
;
1874 md
->disk
->fops
= &mmc_bdops
;
1875 md
->disk
->private_data
= md
;
1876 md
->disk
->queue
= md
->queue
.queue
;
1877 md
->parent
= parent
;
1878 set_disk_ro(md
->disk
, md
->read_only
|| default_ro
);
1879 md
->disk
->flags
= GENHD_FL_EXT_DEVT
;
1880 if (area_type
& (MMC_BLK_DATA_AREA_RPMB
| MMC_BLK_DATA_AREA_BOOT
))
1881 md
->disk
->flags
|= GENHD_FL_NO_PART_SCAN
;
1884 * As discussed on lkml, GENHD_FL_REMOVABLE should:
1886 * - be set for removable media with permanent block devices
1887 * - be unset for removable block devices with permanent media
1889 * Since MMC block devices clearly fall under the second
1890 * case, we do not set GENHD_FL_REMOVABLE. Userspace
1891 * should use the block device creation/destruction hotplug
1892 * messages to tell when the card is present.
1895 snprintf(md
->disk
->disk_name
, sizeof(md
->disk
->disk_name
),
1896 "mmcblk%u%s", card
->host
->index
, subname
? subname
: "");
1898 if (mmc_card_mmc(card
))
1899 blk_queue_logical_block_size(md
->queue
.queue
,
1900 card
->ext_csd
.data_sector_size
);
1902 blk_queue_logical_block_size(md
->queue
.queue
, 512);
1904 set_capacity(md
->disk
, size
);
1906 if (mmc_host_cmd23(card
->host
)) {
1907 if ((mmc_card_mmc(card
) &&
1908 card
->csd
.mmca_vsn
>= CSD_SPEC_VER_3
) ||
1909 (mmc_card_sd(card
) &&
1910 card
->scr
.cmds
& SD_SCR_CMD23_SUPPORT
))
1911 md
->flags
|= MMC_BLK_CMD23
;
1914 if (mmc_card_mmc(card
) &&
1915 md
->flags
& MMC_BLK_CMD23
&&
1916 ((card
->ext_csd
.rel_param
& EXT_CSD_WR_REL_PARAM_EN
) ||
1917 card
->ext_csd
.rel_sectors
)) {
1918 md
->flags
|= MMC_BLK_REL_WR
;
1919 blk_queue_write_cache(md
->queue
.queue
, true, true);
1929 spin_lock(&mmc_blk_lock
);
1930 ida_remove(&mmc_blk_ida
, devidx
);
1931 spin_unlock(&mmc_blk_lock
);
1932 return ERR_PTR(ret
);
1935 static struct mmc_blk_data
*mmc_blk_alloc(struct mmc_card
*card
)
1939 if (!mmc_card_sd(card
) && mmc_card_blockaddr(card
)) {
1941 * The EXT_CSD sector count is in number or 512 byte
1944 size
= card
->ext_csd
.sectors
;
1947 * The CSD capacity field is in units of read_blkbits.
1948 * set_capacity takes units of 512 bytes.
1950 size
= (typeof(sector_t
))card
->csd
.capacity
1951 << (card
->csd
.read_blkbits
- 9);
1954 return mmc_blk_alloc_req(card
, &card
->dev
, size
, false, NULL
,
1955 MMC_BLK_DATA_AREA_MAIN
);
1958 static int mmc_blk_alloc_part(struct mmc_card
*card
,
1959 struct mmc_blk_data
*md
,
1960 unsigned int part_type
,
1963 const char *subname
,
1967 struct mmc_blk_data
*part_md
;
1969 part_md
= mmc_blk_alloc_req(card
, disk_to_dev(md
->disk
), size
, default_ro
,
1970 subname
, area_type
);
1971 if (IS_ERR(part_md
))
1972 return PTR_ERR(part_md
);
1973 part_md
->part_type
= part_type
;
1974 list_add(&part_md
->part
, &md
->part
);
1976 string_get_size((u64
)get_capacity(part_md
->disk
), 512, STRING_UNITS_2
,
1977 cap_str
, sizeof(cap_str
));
1978 pr_info("%s: %s %s partition %u %s\n",
1979 part_md
->disk
->disk_name
, mmc_card_id(card
),
1980 mmc_card_name(card
), part_md
->part_type
, cap_str
);
1984 /* MMC Physical partitions consist of two boot partitions and
1985 * up to four general purpose partitions.
1986 * For each partition enabled in EXT_CSD a block device will be allocatedi
1987 * to provide access to the partition.
1990 static int mmc_blk_alloc_parts(struct mmc_card
*card
, struct mmc_blk_data
*md
)
1994 if (!mmc_card_mmc(card
))
1997 for (idx
= 0; idx
< card
->nr_parts
; idx
++) {
1998 if (card
->part
[idx
].size
) {
1999 ret
= mmc_blk_alloc_part(card
, md
,
2000 card
->part
[idx
].part_cfg
,
2001 card
->part
[idx
].size
>> 9,
2002 card
->part
[idx
].force_ro
,
2003 card
->part
[idx
].name
,
2004 card
->part
[idx
].area_type
);
2013 static void mmc_blk_remove_req(struct mmc_blk_data
*md
)
2015 struct mmc_card
*card
;
2019 * Flush remaining requests and free queues. It
2020 * is freeing the queue that stops new requests
2021 * from being accepted.
2023 card
= md
->queue
.card
;
2024 mmc_cleanup_queue(&md
->queue
);
2025 if (md
->disk
->flags
& GENHD_FL_UP
) {
2026 device_remove_file(disk_to_dev(md
->disk
), &md
->force_ro
);
2027 if ((md
->area_type
& MMC_BLK_DATA_AREA_BOOT
) &&
2028 card
->ext_csd
.boot_ro_lockable
)
2029 device_remove_file(disk_to_dev(md
->disk
),
2030 &md
->power_ro_lock
);
2032 del_gendisk(md
->disk
);
2038 static void mmc_blk_remove_parts(struct mmc_card
*card
,
2039 struct mmc_blk_data
*md
)
2041 struct list_head
*pos
, *q
;
2042 struct mmc_blk_data
*part_md
;
2044 list_for_each_safe(pos
, q
, &md
->part
) {
2045 part_md
= list_entry(pos
, struct mmc_blk_data
, part
);
2047 mmc_blk_remove_req(part_md
);
2051 static int mmc_add_disk(struct mmc_blk_data
*md
)
2054 struct mmc_card
*card
= md
->queue
.card
;
2056 device_add_disk(md
->parent
, md
->disk
);
2057 md
->force_ro
.show
= force_ro_show
;
2058 md
->force_ro
.store
= force_ro_store
;
2059 sysfs_attr_init(&md
->force_ro
.attr
);
2060 md
->force_ro
.attr
.name
= "force_ro";
2061 md
->force_ro
.attr
.mode
= S_IRUGO
| S_IWUSR
;
2062 ret
= device_create_file(disk_to_dev(md
->disk
), &md
->force_ro
);
2066 if ((md
->area_type
& MMC_BLK_DATA_AREA_BOOT
) &&
2067 card
->ext_csd
.boot_ro_lockable
) {
2070 if (card
->ext_csd
.boot_ro_lock
& EXT_CSD_BOOT_WP_B_PWR_WP_DIS
)
2073 mode
= S_IRUGO
| S_IWUSR
;
2075 md
->power_ro_lock
.show
= power_ro_lock_show
;
2076 md
->power_ro_lock
.store
= power_ro_lock_store
;
2077 sysfs_attr_init(&md
->power_ro_lock
.attr
);
2078 md
->power_ro_lock
.attr
.mode
= mode
;
2079 md
->power_ro_lock
.attr
.name
=
2080 "ro_lock_until_next_power_on";
2081 ret
= device_create_file(disk_to_dev(md
->disk
),
2082 &md
->power_ro_lock
);
2084 goto power_ro_lock_fail
;
2089 device_remove_file(disk_to_dev(md
->disk
), &md
->force_ro
);
2091 del_gendisk(md
->disk
);
2096 static const struct mmc_fixup blk_fixups
[] =
2098 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
2099 MMC_QUIRK_INAND_CMD38
),
2100 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
2101 MMC_QUIRK_INAND_CMD38
),
2102 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
2103 MMC_QUIRK_INAND_CMD38
),
2104 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
2105 MMC_QUIRK_INAND_CMD38
),
2106 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
2107 MMC_QUIRK_INAND_CMD38
),
2110 * Some MMC cards experience performance degradation with CMD23
2111 * instead of CMD12-bounded multiblock transfers. For now we'll
2112 * black list what's bad...
2113 * - Certain Toshiba cards.
2115 * N.B. This doesn't affect SD cards.
2117 MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK
, CID_OEMID_ANY
, add_quirk_mmc
,
2118 MMC_QUIRK_BLK_NO_CMD23
),
2119 MMC_FIXUP("SDM032", CID_MANFID_SANDISK
, CID_OEMID_ANY
, add_quirk_mmc
,
2120 MMC_QUIRK_BLK_NO_CMD23
),
2121 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA
, CID_OEMID_ANY
, add_quirk_mmc
,
2122 MMC_QUIRK_BLK_NO_CMD23
),
2123 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA
, CID_OEMID_ANY
, add_quirk_mmc
,
2124 MMC_QUIRK_BLK_NO_CMD23
),
2125 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA
, CID_OEMID_ANY
, add_quirk_mmc
,
2126 MMC_QUIRK_BLK_NO_CMD23
),
2129 * Some MMC cards need longer data read timeout than indicated in CSD.
2131 MMC_FIXUP(CID_NAME_ANY
, CID_MANFID_MICRON
, 0x200, add_quirk_mmc
,
2132 MMC_QUIRK_LONG_READ_TIME
),
2133 MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA
, CID_OEMID_ANY
, add_quirk_mmc
,
2134 MMC_QUIRK_LONG_READ_TIME
),
2137 * On these Samsung MoviNAND parts, performing secure erase or
2138 * secure trim can result in unrecoverable corruption due to a
2141 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
2142 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
2143 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
2144 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
2145 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
2146 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
2147 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
2148 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
2149 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
2150 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
2151 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
2152 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
2153 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
2154 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
2155 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
2156 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
2159 * On Some Kingston eMMCs, performing trim can result in
2160 * unrecoverable data conrruption occasionally due to a firmware bug.
2162 MMC_FIXUP("V10008", CID_MANFID_KINGSTON
, CID_OEMID_ANY
, add_quirk_mmc
,
2163 MMC_QUIRK_TRIM_BROKEN
),
2164 MMC_FIXUP("V10016", CID_MANFID_KINGSTON
, CID_OEMID_ANY
, add_quirk_mmc
,
2165 MMC_QUIRK_TRIM_BROKEN
),
2170 static int mmc_blk_probe(struct mmc_card
*card
)
2172 struct mmc_blk_data
*md
, *part_md
;
2176 * Check that the card supports the command class(es) we need.
2178 if (!(card
->csd
.cmdclass
& CCC_BLOCK_READ
))
2181 mmc_fixup_device(card
, blk_fixups
);
2183 md
= mmc_blk_alloc(card
);
2187 string_get_size((u64
)get_capacity(md
->disk
), 512, STRING_UNITS_2
,
2188 cap_str
, sizeof(cap_str
));
2189 pr_info("%s: %s %s %s %s\n",
2190 md
->disk
->disk_name
, mmc_card_id(card
), mmc_card_name(card
),
2191 cap_str
, md
->read_only
? "(ro)" : "");
2193 if (mmc_blk_alloc_parts(card
, md
))
2196 dev_set_drvdata(&card
->dev
, md
);
2198 if (mmc_add_disk(md
))
2201 list_for_each_entry(part_md
, &md
->part
, part
) {
2202 if (mmc_add_disk(part_md
))
2206 pm_runtime_set_autosuspend_delay(&card
->dev
, 3000);
2207 pm_runtime_use_autosuspend(&card
->dev
);
2210 * Don't enable runtime PM for SD-combo cards here. Leave that
2211 * decision to be taken during the SDIO init sequence instead.
2213 if (card
->type
!= MMC_TYPE_SD_COMBO
) {
2214 pm_runtime_set_active(&card
->dev
);
2215 pm_runtime_enable(&card
->dev
);
2221 mmc_blk_remove_parts(card
, md
);
2222 mmc_blk_remove_req(md
);
2226 static void mmc_blk_remove(struct mmc_card
*card
)
2228 struct mmc_blk_data
*md
= dev_get_drvdata(&card
->dev
);
2230 mmc_blk_remove_parts(card
, md
);
2231 pm_runtime_get_sync(&card
->dev
);
2232 mmc_claim_host(card
->host
);
2233 mmc_blk_part_switch(card
, md
);
2234 mmc_release_host(card
->host
);
2235 if (card
->type
!= MMC_TYPE_SD_COMBO
)
2236 pm_runtime_disable(&card
->dev
);
2237 pm_runtime_put_noidle(&card
->dev
);
2238 mmc_blk_remove_req(md
);
2239 dev_set_drvdata(&card
->dev
, NULL
);
2242 static int _mmc_blk_suspend(struct mmc_card
*card
)
2244 struct mmc_blk_data
*part_md
;
2245 struct mmc_blk_data
*md
= dev_get_drvdata(&card
->dev
);
2248 mmc_queue_suspend(&md
->queue
);
2249 list_for_each_entry(part_md
, &md
->part
, part
) {
2250 mmc_queue_suspend(&part_md
->queue
);
2256 static void mmc_blk_shutdown(struct mmc_card
*card
)
2258 _mmc_blk_suspend(card
);
2261 #ifdef CONFIG_PM_SLEEP
2262 static int mmc_blk_suspend(struct device
*dev
)
2264 struct mmc_card
*card
= mmc_dev_to_card(dev
);
2266 return _mmc_blk_suspend(card
);
2269 static int mmc_blk_resume(struct device
*dev
)
2271 struct mmc_blk_data
*part_md
;
2272 struct mmc_blk_data
*md
= dev_get_drvdata(dev
);
2276 * Resume involves the card going into idle state,
2277 * so current partition is always the main one.
2279 md
->part_curr
= md
->part_type
;
2280 mmc_queue_resume(&md
->queue
);
2281 list_for_each_entry(part_md
, &md
->part
, part
) {
2282 mmc_queue_resume(&part_md
->queue
);
2289 static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops
, mmc_blk_suspend
, mmc_blk_resume
);
2291 static struct mmc_driver mmc_driver
= {
2294 .pm
= &mmc_blk_pm_ops
,
2296 .probe
= mmc_blk_probe
,
2297 .remove
= mmc_blk_remove
,
2298 .shutdown
= mmc_blk_shutdown
,
2301 static int __init
mmc_blk_init(void)
2305 if (perdev_minors
!= CONFIG_MMC_BLOCK_MINORS
)
2306 pr_info("mmcblk: using %d minors per device\n", perdev_minors
);
2308 max_devices
= min(MAX_DEVICES
, (1 << MINORBITS
) / perdev_minors
);
2310 res
= register_blkdev(MMC_BLOCK_MAJOR
, "mmc");
2314 res
= mmc_register_driver(&mmc_driver
);
2320 unregister_blkdev(MMC_BLOCK_MAJOR
, "mmc");
2325 static void __exit
mmc_blk_exit(void)
2327 mmc_unregister_driver(&mmc_driver
);
2328 unregister_blkdev(MMC_BLOCK_MAJOR
, "mmc");
2331 module_init(mmc_blk_init
);
2332 module_exit(mmc_blk_exit
);
2334 MODULE_LICENSE("GPL");
2335 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");