2 * Block driver for media (i.e., flash cards)
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
17 * Author: Andrew Christian
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
24 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h>
34 #include <linux/delay.h>
35 #include <linux/capability.h>
36 #include <linux/compat.h>
38 #include <linux/mmc/ioctl.h>
39 #include <linux/mmc/card.h>
40 #include <linux/mmc/host.h>
41 #include <linux/mmc/mmc.h>
42 #include <linux/mmc/sd.h>
44 #include <asm/system.h>
45 #include <asm/uaccess.h>
49 MODULE_ALIAS("mmc:block");
50 #ifdef MODULE_PARAM_PREFIX
51 #undef MODULE_PARAM_PREFIX
53 #define MODULE_PARAM_PREFIX "mmcblk."
55 #define INAND_CMD38_ARG_EXT_CSD 113
56 #define INAND_CMD38_ARG_ERASE 0x00
57 #define INAND_CMD38_ARG_TRIM 0x01
58 #define INAND_CMD38_ARG_SECERASE 0x80
59 #define INAND_CMD38_ARG_SECTRIM1 0x81
60 #define INAND_CMD38_ARG_SECTRIM2 0x88
62 static DEFINE_MUTEX(block_mutex
);
65 * The defaults come from config options but can be overriden by module
68 static int perdev_minors
= CONFIG_MMC_BLOCK_MINORS
;
71 * We've only got one major, so number of mmcblk devices is
72 * limited to 256 / number of minors per device.
74 static int max_devices
;
76 /* 256 minors, so at most 256 separate devices */
77 static DECLARE_BITMAP(dev_use
, 256);
78 static DECLARE_BITMAP(name_use
, 256);
81 * There is one mmc_blk_data per slot.
86 struct mmc_queue queue
;
87 struct list_head part
;
90 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
91 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
94 unsigned int read_only
;
95 unsigned int part_type
;
96 unsigned int name_idx
;
99 * Only set in main mmc_blk_data associated
100 * with mmc_card with mmc_set_drvdata, and keeps
101 * track of the current selected device partition.
103 unsigned int part_curr
;
104 struct device_attribute force_ro
;
107 static DEFINE_MUTEX(open_lock
);
109 enum mmc_blk_status
{
113 MMC_BLK_RETRY_SINGLE
,
119 module_param(perdev_minors
, int, 0444);
120 MODULE_PARM_DESC(perdev_minors
, "Minors numbers to allocate per device");
122 static struct mmc_blk_data
*mmc_blk_get(struct gendisk
*disk
)
124 struct mmc_blk_data
*md
;
126 mutex_lock(&open_lock
);
127 md
= disk
->private_data
;
128 if (md
&& md
->usage
== 0)
132 mutex_unlock(&open_lock
);
137 static inline int mmc_get_devidx(struct gendisk
*disk
)
139 int devmaj
= MAJOR(disk_devt(disk
));
140 int devidx
= MINOR(disk_devt(disk
)) / perdev_minors
;
143 devidx
= disk
->first_minor
/ perdev_minors
;
147 static void mmc_blk_put(struct mmc_blk_data
*md
)
149 mutex_lock(&open_lock
);
151 if (md
->usage
== 0) {
152 int devidx
= mmc_get_devidx(md
->disk
);
153 blk_cleanup_queue(md
->queue
.queue
);
155 __clear_bit(devidx
, dev_use
);
160 mutex_unlock(&open_lock
);
163 static ssize_t
force_ro_show(struct device
*dev
, struct device_attribute
*attr
,
167 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
169 ret
= snprintf(buf
, PAGE_SIZE
, "%d",
170 get_disk_ro(dev_to_disk(dev
)) ^
176 static ssize_t
force_ro_store(struct device
*dev
, struct device_attribute
*attr
,
177 const char *buf
, size_t count
)
181 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
182 unsigned long set
= simple_strtoul(buf
, &end
, 0);
188 set_disk_ro(dev_to_disk(dev
), set
|| md
->read_only
);
195 static int mmc_blk_open(struct block_device
*bdev
, fmode_t mode
)
197 struct mmc_blk_data
*md
= mmc_blk_get(bdev
->bd_disk
);
200 mutex_lock(&block_mutex
);
203 check_disk_change(bdev
);
206 if ((mode
& FMODE_WRITE
) && md
->read_only
) {
211 mutex_unlock(&block_mutex
);
216 static int mmc_blk_release(struct gendisk
*disk
, fmode_t mode
)
218 struct mmc_blk_data
*md
= disk
->private_data
;
220 mutex_lock(&block_mutex
);
222 mutex_unlock(&block_mutex
);
227 mmc_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
229 geo
->cylinders
= get_capacity(bdev
->bd_disk
) / (4 * 16);
235 struct mmc_blk_ioc_data
{
236 struct mmc_ioc_cmd ic
;
241 static struct mmc_blk_ioc_data
*mmc_blk_ioctl_copy_from_user(
242 struct mmc_ioc_cmd __user
*user
)
244 struct mmc_blk_ioc_data
*idata
;
247 idata
= kzalloc(sizeof(*idata
), GFP_KERNEL
);
253 if (copy_from_user(&idata
->ic
, user
, sizeof(idata
->ic
))) {
258 idata
->buf_bytes
= (u64
) idata
->ic
.blksz
* idata
->ic
.blocks
;
259 if (idata
->buf_bytes
> MMC_IOC_MAX_BYTES
) {
264 idata
->buf
= kzalloc(idata
->buf_bytes
, GFP_KERNEL
);
270 if (copy_from_user(idata
->buf
, (void __user
*)(unsigned long)
271 idata
->ic
.data_ptr
, idata
->buf_bytes
)) {
286 static int mmc_blk_ioctl_cmd(struct block_device
*bdev
,
287 struct mmc_ioc_cmd __user
*ic_ptr
)
289 struct mmc_blk_ioc_data
*idata
;
290 struct mmc_blk_data
*md
;
291 struct mmc_card
*card
;
292 struct mmc_command cmd
= {0};
293 struct mmc_data data
= {0};
294 struct mmc_request mrq
= {0};
295 struct scatterlist sg
;
299 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
300 * whole block device, not on a partition. This prevents overspray
301 * between sibling partitions.
303 if ((!capable(CAP_SYS_RAWIO
)) || (bdev
!= bdev
->bd_contains
))
306 idata
= mmc_blk_ioctl_copy_from_user(ic_ptr
);
308 return PTR_ERR(idata
);
310 cmd
.opcode
= idata
->ic
.opcode
;
311 cmd
.arg
= idata
->ic
.arg
;
312 cmd
.flags
= idata
->ic
.flags
;
316 data
.blksz
= idata
->ic
.blksz
;
317 data
.blocks
= idata
->ic
.blocks
;
319 sg_init_one(data
.sg
, idata
->buf
, idata
->buf_bytes
);
321 if (idata
->ic
.write_flag
)
322 data
.flags
= MMC_DATA_WRITE
;
324 data
.flags
= MMC_DATA_READ
;
329 md
= mmc_blk_get(bdev
->bd_disk
);
335 card
= md
->queue
.card
;
341 mmc_claim_host(card
->host
);
343 if (idata
->ic
.is_acmd
) {
344 err
= mmc_app_cmd(card
->host
, card
);
349 /* data.flags must already be set before doing this. */
350 mmc_set_data_timeout(&data
, card
);
351 /* Allow overriding the timeout_ns for empirical tuning. */
352 if (idata
->ic
.data_timeout_ns
)
353 data
.timeout_ns
= idata
->ic
.data_timeout_ns
;
355 if ((cmd
.flags
& MMC_RSP_R1B
) == MMC_RSP_R1B
) {
357 * Pretend this is a data transfer and rely on the host driver
358 * to compute timeout. When all host drivers support
359 * cmd.cmd_timeout for R1B, this can be changed to:
362 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
364 data
.timeout_ns
= idata
->ic
.cmd_timeout_ms
* 1000000;
367 mmc_wait_for_req(card
->host
, &mrq
);
370 dev_err(mmc_dev(card
->host
), "%s: cmd error %d\n",
371 __func__
, cmd
.error
);
376 dev_err(mmc_dev(card
->host
), "%s: data error %d\n",
377 __func__
, data
.error
);
383 * According to the SD specs, some commands require a delay after
384 * issuing the command.
386 if (idata
->ic
.postsleep_min_us
)
387 usleep_range(idata
->ic
.postsleep_min_us
, idata
->ic
.postsleep_max_us
);
389 if (copy_to_user(&(ic_ptr
->response
), cmd
.resp
, sizeof(cmd
.resp
))) {
394 if (!idata
->ic
.write_flag
) {
395 if (copy_to_user((void __user
*)(unsigned long) idata
->ic
.data_ptr
,
396 idata
->buf
, idata
->buf_bytes
)) {
403 mmc_release_host(card
->host
);
412 static int mmc_blk_ioctl(struct block_device
*bdev
, fmode_t mode
,
413 unsigned int cmd
, unsigned long arg
)
416 if (cmd
== MMC_IOC_CMD
)
417 ret
= mmc_blk_ioctl_cmd(bdev
, (struct mmc_ioc_cmd __user
*)arg
);
422 static int mmc_blk_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
423 unsigned int cmd
, unsigned long arg
)
425 return mmc_blk_ioctl(bdev
, mode
, cmd
, (unsigned long) compat_ptr(arg
));
429 static const struct block_device_operations mmc_bdops
= {
430 .open
= mmc_blk_open
,
431 .release
= mmc_blk_release
,
432 .getgeo
= mmc_blk_getgeo
,
433 .owner
= THIS_MODULE
,
434 .ioctl
= mmc_blk_ioctl
,
436 .compat_ioctl
= mmc_blk_compat_ioctl
,
440 static inline int mmc_blk_part_switch(struct mmc_card
*card
,
441 struct mmc_blk_data
*md
)
444 struct mmc_blk_data
*main_md
= mmc_get_drvdata(card
);
445 if (main_md
->part_curr
== md
->part_type
)
448 if (mmc_card_mmc(card
)) {
449 card
->ext_csd
.part_config
&= ~EXT_CSD_PART_CONFIG_ACC_MASK
;
450 card
->ext_csd
.part_config
|= md
->part_type
;
452 ret
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
453 EXT_CSD_PART_CONFIG
, card
->ext_csd
.part_config
,
454 card
->ext_csd
.part_time
);
459 main_md
->part_curr
= md
->part_type
;
463 static u32
mmc_sd_num_wr_blocks(struct mmc_card
*card
)
469 struct mmc_request mrq
= {0};
470 struct mmc_command cmd
= {0};
471 struct mmc_data data
= {0};
472 unsigned int timeout_us
;
474 struct scatterlist sg
;
476 cmd
.opcode
= MMC_APP_CMD
;
477 cmd
.arg
= card
->rca
<< 16;
478 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
480 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
483 if (!mmc_host_is_spi(card
->host
) && !(cmd
.resp
[0] & R1_APP_CMD
))
486 memset(&cmd
, 0, sizeof(struct mmc_command
));
488 cmd
.opcode
= SD_APP_SEND_NUM_WR_BLKS
;
490 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
492 data
.timeout_ns
= card
->csd
.tacc_ns
* 100;
493 data
.timeout_clks
= card
->csd
.tacc_clks
* 100;
495 timeout_us
= data
.timeout_ns
/ 1000;
496 timeout_us
+= data
.timeout_clks
* 1000 /
497 (card
->host
->ios
.clock
/ 1000);
499 if (timeout_us
> 100000) {
500 data
.timeout_ns
= 100000000;
501 data
.timeout_clks
= 0;
506 data
.flags
= MMC_DATA_READ
;
513 blocks
= kmalloc(4, GFP_KERNEL
);
517 sg_init_one(&sg
, blocks
, 4);
519 mmc_wait_for_req(card
->host
, &mrq
);
521 result
= ntohl(*blocks
);
524 if (cmd
.error
|| data
.error
)
530 static int send_stop(struct mmc_card
*card
, u32
*status
)
532 struct mmc_command cmd
= {0};
535 cmd
.opcode
= MMC_STOP_TRANSMISSION
;
536 cmd
.flags
= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
| MMC_CMD_AC
;
537 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 5);
539 *status
= cmd
.resp
[0];
543 static int get_card_status(struct mmc_card
*card
, u32
*status
, int retries
)
545 struct mmc_command cmd
= {0};
548 cmd
.opcode
= MMC_SEND_STATUS
;
549 if (!mmc_host_is_spi(card
->host
))
550 cmd
.arg
= card
->rca
<< 16;
551 cmd
.flags
= MMC_RSP_SPI_R2
| MMC_RSP_R1
| MMC_CMD_AC
;
552 err
= mmc_wait_for_cmd(card
->host
, &cmd
, retries
);
554 *status
= cmd
.resp
[0];
560 #define ERR_CONTINUE 0
562 static int mmc_blk_cmd_error(struct request
*req
, const char *name
, int error
,
563 bool status_valid
, u32 status
)
567 /* response crc error, retry the r/w cmd */
568 pr_err("%s: %s sending %s command, card status %#x\n",
569 req
->rq_disk
->disk_name
, "response CRC error",
574 pr_err("%s: %s sending %s command, card status %#x\n",
575 req
->rq_disk
->disk_name
, "timed out", name
, status
);
577 /* If the status cmd initially failed, retry the r/w cmd */
582 * If it was a r/w cmd crc error, or illegal command
583 * (eg, issued in wrong state) then retry - we should
584 * have corrected the state problem above.
586 if (status
& (R1_COM_CRC_ERROR
| R1_ILLEGAL_COMMAND
))
589 /* Otherwise abort the command */
593 /* We don't understand the error code the driver gave us */
594 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
595 req
->rq_disk
->disk_name
, error
, status
);
601 * Initial r/w and stop cmd error recovery.
602 * We don't know whether the card received the r/w cmd or not, so try to
603 * restore things back to a sane state. Essentially, we do this as follows:
604 * - Obtain card status. If the first attempt to obtain card status fails,
605 * the status word will reflect the failed status cmd, not the failed
606 * r/w cmd. If we fail to obtain card status, it suggests we can no
607 * longer communicate with the card.
608 * - Check the card state. If the card received the cmd but there was a
609 * transient problem with the response, it might still be in a data transfer
610 * mode. Try to send it a stop command. If this fails, we can't recover.
611 * - If the r/w cmd failed due to a response CRC error, it was probably
612 * transient, so retry the cmd.
613 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
614 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
615 * illegal cmd, retry.
616 * Otherwise we don't understand what happened, so abort.
618 static int mmc_blk_cmd_recovery(struct mmc_card
*card
, struct request
*req
,
619 struct mmc_blk_request
*brq
)
621 bool prev_cmd_status_valid
= true;
622 u32 status
, stop_status
= 0;
626 * Try to get card status which indicates both the card state
627 * and why there was no response. If the first attempt fails,
628 * we can't be sure the returned status is for the r/w command.
630 for (retry
= 2; retry
>= 0; retry
--) {
631 err
= get_card_status(card
, &status
, 0);
635 prev_cmd_status_valid
= false;
636 pr_err("%s: error %d sending status command, %sing\n",
637 req
->rq_disk
->disk_name
, err
, retry
? "retry" : "abort");
640 /* We couldn't get a response from the card. Give up. */
645 * Check the current card state. If it is in some data transfer
646 * mode, tell it to stop (and hopefully transition back to TRAN.)
648 if (R1_CURRENT_STATE(status
) == R1_STATE_DATA
||
649 R1_CURRENT_STATE(status
) == R1_STATE_RCV
) {
650 err
= send_stop(card
, &stop_status
);
652 pr_err("%s: error %d sending stop command\n",
653 req
->rq_disk
->disk_name
, err
);
656 * If the stop cmd also timed out, the card is probably
657 * not present, so abort. Other errors are bad news too.
663 /* Check for set block count errors */
665 return mmc_blk_cmd_error(req
, "SET_BLOCK_COUNT", brq
->sbc
.error
,
666 prev_cmd_status_valid
, status
);
668 /* Check for r/w command errors */
670 return mmc_blk_cmd_error(req
, "r/w cmd", brq
->cmd
.error
,
671 prev_cmd_status_valid
, status
);
673 /* Now for stop errors. These aren't fatal to the transfer. */
674 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
675 req
->rq_disk
->disk_name
, brq
->stop
.error
,
676 brq
->cmd
.resp
[0], status
);
679 * Subsitute in our own stop status as this will give the error
680 * state which happened during the execution of the r/w command.
683 brq
->stop
.resp
[0] = stop_status
;
689 static int mmc_blk_issue_discard_rq(struct mmc_queue
*mq
, struct request
*req
)
691 struct mmc_blk_data
*md
= mq
->data
;
692 struct mmc_card
*card
= md
->queue
.card
;
693 unsigned int from
, nr
, arg
;
696 if (!mmc_can_erase(card
)) {
701 from
= blk_rq_pos(req
);
702 nr
= blk_rq_sectors(req
);
704 if (mmc_can_trim(card
))
709 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
710 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
711 INAND_CMD38_ARG_EXT_CSD
,
712 arg
== MMC_TRIM_ARG
?
713 INAND_CMD38_ARG_TRIM
:
714 INAND_CMD38_ARG_ERASE
,
719 err
= mmc_erase(card
, from
, nr
, arg
);
721 spin_lock_irq(&md
->lock
);
722 __blk_end_request(req
, err
, blk_rq_bytes(req
));
723 spin_unlock_irq(&md
->lock
);
728 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue
*mq
,
731 struct mmc_blk_data
*md
= mq
->data
;
732 struct mmc_card
*card
= md
->queue
.card
;
733 unsigned int from
, nr
, arg
;
736 if (!mmc_can_secure_erase_trim(card
)) {
741 from
= blk_rq_pos(req
);
742 nr
= blk_rq_sectors(req
);
744 if (mmc_can_trim(card
) && !mmc_erase_group_aligned(card
, from
, nr
))
745 arg
= MMC_SECURE_TRIM1_ARG
;
747 arg
= MMC_SECURE_ERASE_ARG
;
749 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
750 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
751 INAND_CMD38_ARG_EXT_CSD
,
752 arg
== MMC_SECURE_TRIM1_ARG
?
753 INAND_CMD38_ARG_SECTRIM1
:
754 INAND_CMD38_ARG_SECERASE
,
759 err
= mmc_erase(card
, from
, nr
, arg
);
760 if (!err
&& arg
== MMC_SECURE_TRIM1_ARG
) {
761 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
762 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
763 INAND_CMD38_ARG_EXT_CSD
,
764 INAND_CMD38_ARG_SECTRIM2
,
769 err
= mmc_erase(card
, from
, nr
, MMC_SECURE_TRIM2_ARG
);
772 spin_lock_irq(&md
->lock
);
773 __blk_end_request(req
, err
, blk_rq_bytes(req
));
774 spin_unlock_irq(&md
->lock
);
779 static int mmc_blk_issue_flush(struct mmc_queue
*mq
, struct request
*req
)
781 struct mmc_blk_data
*md
= mq
->data
;
784 * No-op, only service this because we need REQ_FUA for reliable
787 spin_lock_irq(&md
->lock
);
788 __blk_end_request_all(req
, 0);
789 spin_unlock_irq(&md
->lock
);
795 * Reformat current write as a reliable write, supporting
796 * both legacy and the enhanced reliable write MMC cards.
797 * In each transfer we'll handle only as much as a single
798 * reliable write can handle, thus finish the request in
799 * partial completions.
801 static inline void mmc_apply_rel_rw(struct mmc_blk_request
*brq
,
802 struct mmc_card
*card
,
805 if (!(card
->ext_csd
.rel_param
& EXT_CSD_WR_REL_PARAM_EN
)) {
806 /* Legacy mode imposes restrictions on transfers. */
807 if (!IS_ALIGNED(brq
->cmd
.arg
, card
->ext_csd
.rel_sectors
))
808 brq
->data
.blocks
= 1;
810 if (brq
->data
.blocks
> card
->ext_csd
.rel_sectors
)
811 brq
->data
.blocks
= card
->ext_csd
.rel_sectors
;
812 else if (brq
->data
.blocks
< card
->ext_csd
.rel_sectors
)
813 brq
->data
.blocks
= 1;
818 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
819 R1_ADDRESS_ERROR | /* Misaligned address */ \
820 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
821 R1_WP_VIOLATION | /* Tried to write to protected block */ \
822 R1_CC_ERROR | /* Card controller error */ \
823 R1_ERROR) /* General/unknown error */
825 static int mmc_blk_err_check(struct mmc_card
*card
,
826 struct mmc_async_req
*areq
)
828 enum mmc_blk_status ret
= MMC_BLK_SUCCESS
;
829 struct mmc_queue_req
*mq_mrq
= container_of(areq
, struct mmc_queue_req
,
831 struct mmc_blk_request
*brq
= &mq_mrq
->brq
;
832 struct request
*req
= mq_mrq
->req
;
835 * sbc.error indicates a problem with the set block count
836 * command. No data will have been transferred.
838 * cmd.error indicates a problem with the r/w command. No
839 * data will have been transferred.
841 * stop.error indicates a problem with the stop command. Data
842 * may have been transferred, or may still be transferring.
844 if (brq
->sbc
.error
|| brq
->cmd
.error
|| brq
->stop
.error
) {
845 switch (mmc_blk_cmd_recovery(card
, req
, brq
)) {
847 return MMC_BLK_RETRY
;
849 return MMC_BLK_ABORT
;
856 * Check for errors relating to the execution of the
857 * initial command - such as address errors. No data
858 * has been transferred.
860 if (brq
->cmd
.resp
[0] & CMD_ERRORS
) {
861 pr_err("%s: r/w command failed, status = %#x\n",
862 req
->rq_disk
->disk_name
, brq
->cmd
.resp
[0]);
863 return MMC_BLK_ABORT
;
867 * Everything else is either success, or a data error of some
868 * kind. If it was a write, we may have transitioned to
869 * program mode, which we have to wait for it to complete.
871 if (!mmc_host_is_spi(card
->host
) && rq_data_dir(req
) != READ
) {
874 int err
= get_card_status(card
, &status
, 5);
876 printk(KERN_ERR
"%s: error %d requesting status\n",
877 req
->rq_disk
->disk_name
, err
);
878 return MMC_BLK_CMD_ERR
;
881 * Some cards mishandle the status bits,
882 * so make sure to check both the busy
883 * indication and the card state.
885 } while (!(status
& R1_READY_FOR_DATA
) ||
886 (R1_CURRENT_STATE(status
) == R1_STATE_PRG
));
889 if (brq
->data
.error
) {
890 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
891 req
->rq_disk
->disk_name
, brq
->data
.error
,
892 (unsigned)blk_rq_pos(req
),
893 (unsigned)blk_rq_sectors(req
),
894 brq
->cmd
.resp
[0], brq
->stop
.resp
[0]);
896 if (rq_data_dir(req
) == READ
) {
897 if (brq
->data
.blocks
> 1) {
898 /* Redo read one sector at a time */
899 pr_warning("%s: retrying using single block read\n",
900 req
->rq_disk
->disk_name
);
901 return MMC_BLK_RETRY_SINGLE
;
903 return MMC_BLK_DATA_ERR
;
905 return MMC_BLK_CMD_ERR
;
909 if (ret
== MMC_BLK_SUCCESS
&&
910 blk_rq_bytes(req
) != brq
->data
.bytes_xfered
)
911 ret
= MMC_BLK_PARTIAL
;
916 static void mmc_blk_rw_rq_prep(struct mmc_queue_req
*mqrq
,
917 struct mmc_card
*card
,
919 struct mmc_queue
*mq
)
921 u32 readcmd
, writecmd
;
922 struct mmc_blk_request
*brq
= &mqrq
->brq
;
923 struct request
*req
= mqrq
->req
;
924 struct mmc_blk_data
*md
= mq
->data
;
927 * Reliable writes are used to implement Forced Unit Access and
928 * REQ_META accesses, and are supported only on MMCs.
930 * XXX: this really needs a good explanation of why REQ_META
931 * is treated special.
933 bool do_rel_wr
= ((req
->cmd_flags
& REQ_FUA
) ||
934 (req
->cmd_flags
& REQ_META
)) &&
935 (rq_data_dir(req
) == WRITE
) &&
936 (md
->flags
& MMC_BLK_REL_WR
);
938 memset(brq
, 0, sizeof(struct mmc_blk_request
));
939 brq
->mrq
.cmd
= &brq
->cmd
;
940 brq
->mrq
.data
= &brq
->data
;
942 brq
->cmd
.arg
= blk_rq_pos(req
);
943 if (!mmc_card_blockaddr(card
))
945 brq
->cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
946 brq
->data
.blksz
= 512;
947 brq
->stop
.opcode
= MMC_STOP_TRANSMISSION
;
949 brq
->stop
.flags
= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
| MMC_CMD_AC
;
950 brq
->data
.blocks
= blk_rq_sectors(req
);
953 * The block layer doesn't support all sector count
954 * restrictions, so we need to be prepared for too big
957 if (brq
->data
.blocks
> card
->host
->max_blk_count
)
958 brq
->data
.blocks
= card
->host
->max_blk_count
;
961 * After a read error, we redo the request one sector at a time
962 * in order to accurately determine which sectors can be read
965 if (disable_multi
&& brq
->data
.blocks
> 1)
966 brq
->data
.blocks
= 1;
968 if (brq
->data
.blocks
> 1 || do_rel_wr
) {
969 /* SPI multiblock writes terminate using a special
970 * token, not a STOP_TRANSMISSION request.
972 if (!mmc_host_is_spi(card
->host
) ||
973 rq_data_dir(req
) == READ
)
974 brq
->mrq
.stop
= &brq
->stop
;
975 readcmd
= MMC_READ_MULTIPLE_BLOCK
;
976 writecmd
= MMC_WRITE_MULTIPLE_BLOCK
;
978 brq
->mrq
.stop
= NULL
;
979 readcmd
= MMC_READ_SINGLE_BLOCK
;
980 writecmd
= MMC_WRITE_BLOCK
;
982 if (rq_data_dir(req
) == READ
) {
983 brq
->cmd
.opcode
= readcmd
;
984 brq
->data
.flags
|= MMC_DATA_READ
;
986 brq
->cmd
.opcode
= writecmd
;
987 brq
->data
.flags
|= MMC_DATA_WRITE
;
991 mmc_apply_rel_rw(brq
, card
, req
);
994 * Pre-defined multi-block transfers are preferable to
995 * open ended-ones (and necessary for reliable writes).
996 * However, it is not sufficient to just send CMD23,
997 * and avoid the final CMD12, as on an error condition
998 * CMD12 (stop) needs to be sent anyway. This, coupled
999 * with Auto-CMD23 enhancements provided by some
1000 * hosts, means that the complexity of dealing
1001 * with this is best left to the host. If CMD23 is
1002 * supported by card and host, we'll fill sbc in and let
1003 * the host deal with handling it correctly. This means
1004 * that for hosts that don't expose MMC_CAP_CMD23, no
1005 * change of behavior will be observed.
1007 * N.B: Some MMC cards experience perf degradation.
1008 * We'll avoid using CMD23-bounded multiblock writes for
1009 * these, while retaining features like reliable writes.
1012 if ((md
->flags
& MMC_BLK_CMD23
) &&
1013 mmc_op_multi(brq
->cmd
.opcode
) &&
1014 (do_rel_wr
|| !(card
->quirks
& MMC_QUIRK_BLK_NO_CMD23
))) {
1015 brq
->sbc
.opcode
= MMC_SET_BLOCK_COUNT
;
1016 brq
->sbc
.arg
= brq
->data
.blocks
|
1017 (do_rel_wr
? (1 << 31) : 0);
1018 brq
->sbc
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
1019 brq
->mrq
.sbc
= &brq
->sbc
;
1022 mmc_set_data_timeout(&brq
->data
, card
);
1024 brq
->data
.sg
= mqrq
->sg
;
1025 brq
->data
.sg_len
= mmc_queue_map_sg(mq
, mqrq
);
1028 * Adjust the sg list so it is the same size as the
1031 if (brq
->data
.blocks
!= blk_rq_sectors(req
)) {
1032 int i
, data_size
= brq
->data
.blocks
<< 9;
1033 struct scatterlist
*sg
;
1035 for_each_sg(brq
->data
.sg
, sg
, brq
->data
.sg_len
, i
) {
1036 data_size
-= sg
->length
;
1037 if (data_size
<= 0) {
1038 sg
->length
+= data_size
;
1043 brq
->data
.sg_len
= i
;
1046 mqrq
->mmc_active
.mrq
= &brq
->mrq
;
1047 mqrq
->mmc_active
.err_check
= mmc_blk_err_check
;
1049 mmc_queue_bounce_pre(mqrq
);
1052 static int mmc_blk_issue_rw_rq(struct mmc_queue
*mq
, struct request
*rqc
)
1054 struct mmc_blk_data
*md
= mq
->data
;
1055 struct mmc_card
*card
= md
->queue
.card
;
1056 struct mmc_blk_request
*brq
= &mq
->mqrq_cur
->brq
;
1057 int ret
= 1, disable_multi
= 0, retry
= 0;
1058 enum mmc_blk_status status
;
1059 struct mmc_queue_req
*mq_rq
;
1060 struct request
*req
;
1061 struct mmc_async_req
*areq
;
1063 if (!rqc
&& !mq
->mqrq_prev
->req
)
1068 mmc_blk_rw_rq_prep(mq
->mqrq_cur
, card
, 0, mq
);
1069 areq
= &mq
->mqrq_cur
->mmc_active
;
1072 areq
= mmc_start_req(card
->host
, areq
, (int *) &status
);
1076 mq_rq
= container_of(areq
, struct mmc_queue_req
, mmc_active
);
1079 mmc_queue_bounce_post(mq_rq
);
1082 case MMC_BLK_SUCCESS
:
1083 case MMC_BLK_PARTIAL
:
1085 * A block was successfully transferred.
1087 spin_lock_irq(&md
->lock
);
1088 ret
= __blk_end_request(req
, 0,
1089 brq
->data
.bytes_xfered
);
1090 spin_unlock_irq(&md
->lock
);
1091 if (status
== MMC_BLK_SUCCESS
&& ret
) {
1093 * The blk_end_request has returned non zero
1094 * even though all data is transfered and no
1095 * erros returned by host.
1096 * If this happen it's a bug.
1098 printk(KERN_ERR
"%s BUG rq_tot %d d_xfer %d\n",
1099 __func__
, blk_rq_bytes(req
),
1100 brq
->data
.bytes_xfered
);
1105 case MMC_BLK_CMD_ERR
:
1107 case MMC_BLK_RETRY_SINGLE
:
1115 case MMC_BLK_DATA_ERR
:
1117 * After an error, we redo I/O one sector at a
1118 * time, so we only reach here after trying to
1119 * read a single sector.
1121 spin_lock_irq(&md
->lock
);
1122 ret
= __blk_end_request(req
, -EIO
,
1124 spin_unlock_irq(&md
->lock
);
1132 * In case of a none complete request
1133 * prepare it again and resend.
1135 mmc_blk_rw_rq_prep(mq_rq
, card
, disable_multi
, mq
);
1136 mmc_start_req(card
->host
, &mq_rq
->mmc_active
, NULL
);
1144 * If this is an SD card and we're writing, we can first
1145 * mark the known good sectors as ok.
1147 * If the card is not SD, we can still ok written sectors
1148 * as reported by the controller (which might be less than
1149 * the real number of written sectors, but never more).
1151 if (mmc_card_sd(card
)) {
1154 blocks
= mmc_sd_num_wr_blocks(card
);
1155 if (blocks
!= (u32
)-1) {
1156 spin_lock_irq(&md
->lock
);
1157 ret
= __blk_end_request(req
, 0, blocks
<< 9);
1158 spin_unlock_irq(&md
->lock
);
1161 spin_lock_irq(&md
->lock
);
1162 ret
= __blk_end_request(req
, 0, brq
->data
.bytes_xfered
);
1163 spin_unlock_irq(&md
->lock
);
1167 spin_lock_irq(&md
->lock
);
1169 ret
= __blk_end_request(req
, -EIO
, blk_rq_cur_bytes(req
));
1170 spin_unlock_irq(&md
->lock
);
1174 mmc_blk_rw_rq_prep(mq
->mqrq_cur
, card
, 0, mq
);
1175 mmc_start_req(card
->host
, &mq
->mqrq_cur
->mmc_active
, NULL
);
1181 static int mmc_blk_issue_rq(struct mmc_queue
*mq
, struct request
*req
)
1184 struct mmc_blk_data
*md
= mq
->data
;
1185 struct mmc_card
*card
= md
->queue
.card
;
1187 if (req
&& !mq
->mqrq_prev
->req
)
1188 /* claim host only for the first request */
1189 mmc_claim_host(card
->host
);
1191 ret
= mmc_blk_part_switch(card
, md
);
1197 if (req
&& req
->cmd_flags
& REQ_DISCARD
) {
1198 /* complete ongoing async transfer before issuing discard */
1199 if (card
->host
->areq
)
1200 mmc_blk_issue_rw_rq(mq
, NULL
);
1201 if (req
->cmd_flags
& REQ_SECURE
)
1202 ret
= mmc_blk_issue_secdiscard_rq(mq
, req
);
1204 ret
= mmc_blk_issue_discard_rq(mq
, req
);
1205 } else if (req
&& req
->cmd_flags
& REQ_FLUSH
) {
1206 /* complete ongoing async transfer before issuing flush */
1207 if (card
->host
->areq
)
1208 mmc_blk_issue_rw_rq(mq
, NULL
);
1209 ret
= mmc_blk_issue_flush(mq
, req
);
1211 ret
= mmc_blk_issue_rw_rq(mq
, req
);
1216 /* release host only when there are no more requests */
1217 mmc_release_host(card
->host
);
1221 static inline int mmc_blk_readonly(struct mmc_card
*card
)
1223 return mmc_card_readonly(card
) ||
1224 !(card
->csd
.cmdclass
& CCC_BLOCK_WRITE
);
1227 static struct mmc_blk_data
*mmc_blk_alloc_req(struct mmc_card
*card
,
1228 struct device
*parent
,
1231 const char *subname
)
1233 struct mmc_blk_data
*md
;
1236 devidx
= find_first_zero_bit(dev_use
, max_devices
);
1237 if (devidx
>= max_devices
)
1238 return ERR_PTR(-ENOSPC
);
1239 __set_bit(devidx
, dev_use
);
1241 md
= kzalloc(sizeof(struct mmc_blk_data
), GFP_KERNEL
);
1248 * !subname implies we are creating main mmc_blk_data that will be
1249 * associated with mmc_card with mmc_set_drvdata. Due to device
1250 * partitions, devidx will not coincide with a per-physical card
1251 * index anymore so we keep track of a name index.
1254 md
->name_idx
= find_first_zero_bit(name_use
, max_devices
);
1255 __set_bit(md
->name_idx
, name_use
);
1258 md
->name_idx
= ((struct mmc_blk_data
*)
1259 dev_to_disk(parent
)->private_data
)->name_idx
;
1262 * Set the read-only status based on the supported commands
1263 * and the write protect switch.
1265 md
->read_only
= mmc_blk_readonly(card
);
1267 md
->disk
= alloc_disk(perdev_minors
);
1268 if (md
->disk
== NULL
) {
1273 spin_lock_init(&md
->lock
);
1274 INIT_LIST_HEAD(&md
->part
);
1277 ret
= mmc_init_queue(&md
->queue
, card
, &md
->lock
, subname
);
1281 md
->queue
.issue_fn
= mmc_blk_issue_rq
;
1282 md
->queue
.data
= md
;
1284 md
->disk
->major
= MMC_BLOCK_MAJOR
;
1285 md
->disk
->first_minor
= devidx
* perdev_minors
;
1286 md
->disk
->fops
= &mmc_bdops
;
1287 md
->disk
->private_data
= md
;
1288 md
->disk
->queue
= md
->queue
.queue
;
1289 md
->disk
->driverfs_dev
= parent
;
1290 set_disk_ro(md
->disk
, md
->read_only
|| default_ro
);
1293 * As discussed on lkml, GENHD_FL_REMOVABLE should:
1295 * - be set for removable media with permanent block devices
1296 * - be unset for removable block devices with permanent media
1298 * Since MMC block devices clearly fall under the second
1299 * case, we do not set GENHD_FL_REMOVABLE. Userspace
1300 * should use the block device creation/destruction hotplug
1301 * messages to tell when the card is present.
1304 snprintf(md
->disk
->disk_name
, sizeof(md
->disk
->disk_name
),
1305 "mmcblk%d%s", md
->name_idx
, subname
? subname
: "");
1307 blk_queue_logical_block_size(md
->queue
.queue
, 512);
1308 set_capacity(md
->disk
, size
);
1310 if (mmc_host_cmd23(card
->host
)) {
1311 if (mmc_card_mmc(card
) ||
1312 (mmc_card_sd(card
) &&
1313 card
->scr
.cmds
& SD_SCR_CMD23_SUPPORT
))
1314 md
->flags
|= MMC_BLK_CMD23
;
1317 if (mmc_card_mmc(card
) &&
1318 md
->flags
& MMC_BLK_CMD23
&&
1319 ((card
->ext_csd
.rel_param
& EXT_CSD_WR_REL_PARAM_EN
) ||
1320 card
->ext_csd
.rel_sectors
)) {
1321 md
->flags
|= MMC_BLK_REL_WR
;
1322 blk_queue_flush(md
->queue
.queue
, REQ_FLUSH
| REQ_FUA
);
1332 return ERR_PTR(ret
);
1335 static struct mmc_blk_data
*mmc_blk_alloc(struct mmc_card
*card
)
1338 struct mmc_blk_data
*md
;
1340 if (!mmc_card_sd(card
) && mmc_card_blockaddr(card
)) {
1342 * The EXT_CSD sector count is in number or 512 byte
1345 size
= card
->ext_csd
.sectors
;
1348 * The CSD capacity field is in units of read_blkbits.
1349 * set_capacity takes units of 512 bytes.
1351 size
= card
->csd
.capacity
<< (card
->csd
.read_blkbits
- 9);
1354 md
= mmc_blk_alloc_req(card
, &card
->dev
, size
, false, NULL
);
1358 static int mmc_blk_alloc_part(struct mmc_card
*card
,
1359 struct mmc_blk_data
*md
,
1360 unsigned int part_type
,
1363 const char *subname
)
1366 struct mmc_blk_data
*part_md
;
1368 part_md
= mmc_blk_alloc_req(card
, disk_to_dev(md
->disk
), size
, default_ro
,
1370 if (IS_ERR(part_md
))
1371 return PTR_ERR(part_md
);
1372 part_md
->part_type
= part_type
;
1373 list_add(&part_md
->part
, &md
->part
);
1375 string_get_size((u64
)get_capacity(part_md
->disk
) << 9, STRING_UNITS_2
,
1376 cap_str
, sizeof(cap_str
));
1377 printk(KERN_INFO
"%s: %s %s partition %u %s\n",
1378 part_md
->disk
->disk_name
, mmc_card_id(card
),
1379 mmc_card_name(card
), part_md
->part_type
, cap_str
);
1383 static int mmc_blk_alloc_parts(struct mmc_card
*card
, struct mmc_blk_data
*md
)
1387 if (!mmc_card_mmc(card
))
1390 if (card
->ext_csd
.boot_size
) {
1391 ret
= mmc_blk_alloc_part(card
, md
, EXT_CSD_PART_CONFIG_ACC_BOOT0
,
1392 card
->ext_csd
.boot_size
>> 9,
1397 ret
= mmc_blk_alloc_part(card
, md
, EXT_CSD_PART_CONFIG_ACC_BOOT1
,
1398 card
->ext_csd
.boot_size
>> 9,
1409 mmc_blk_set_blksize(struct mmc_blk_data
*md
, struct mmc_card
*card
)
1413 mmc_claim_host(card
->host
);
1414 err
= mmc_set_blocklen(card
, 512);
1415 mmc_release_host(card
->host
);
1418 printk(KERN_ERR
"%s: unable to set block size to 512: %d\n",
1419 md
->disk
->disk_name
, err
);
1426 static void mmc_blk_remove_req(struct mmc_blk_data
*md
)
1429 if (md
->disk
->flags
& GENHD_FL_UP
) {
1430 device_remove_file(disk_to_dev(md
->disk
), &md
->force_ro
);
1432 /* Stop new requests from getting into the queue */
1433 del_gendisk(md
->disk
);
1436 /* Then flush out any already in there */
1437 mmc_cleanup_queue(&md
->queue
);
1442 static void mmc_blk_remove_parts(struct mmc_card
*card
,
1443 struct mmc_blk_data
*md
)
1445 struct list_head
*pos
, *q
;
1446 struct mmc_blk_data
*part_md
;
1448 __clear_bit(md
->name_idx
, name_use
);
1449 list_for_each_safe(pos
, q
, &md
->part
) {
1450 part_md
= list_entry(pos
, struct mmc_blk_data
, part
);
1452 mmc_blk_remove_req(part_md
);
1456 static int mmc_add_disk(struct mmc_blk_data
*md
)
1461 md
->force_ro
.show
= force_ro_show
;
1462 md
->force_ro
.store
= force_ro_store
;
1463 sysfs_attr_init(&md
->force_ro
.attr
);
1464 md
->force_ro
.attr
.name
= "force_ro";
1465 md
->force_ro
.attr
.mode
= S_IRUGO
| S_IWUSR
;
1466 ret
= device_create_file(disk_to_dev(md
->disk
), &md
->force_ro
);
1468 del_gendisk(md
->disk
);
1473 static const struct mmc_fixup blk_fixups
[] =
1475 MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk
, MMC_QUIRK_INAND_CMD38
),
1476 MMC_FIXUP("SEM04G", 0x2, 0x100, add_quirk
, MMC_QUIRK_INAND_CMD38
),
1477 MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk
, MMC_QUIRK_INAND_CMD38
),
1478 MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk
, MMC_QUIRK_INAND_CMD38
),
1479 MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk
, MMC_QUIRK_INAND_CMD38
),
1482 * Some MMC cards experience performance degradation with CMD23
1483 * instead of CMD12-bounded multiblock transfers. For now we'll
1484 * black list what's bad...
1485 * - Certain Toshiba cards.
1487 * N.B. This doesn't affect SD cards.
1489 MMC_FIXUP("MMC08G", 0x11, CID_OEMID_ANY
, add_quirk_mmc
,
1490 MMC_QUIRK_BLK_NO_CMD23
),
1491 MMC_FIXUP("MMC16G", 0x11, CID_OEMID_ANY
, add_quirk_mmc
,
1492 MMC_QUIRK_BLK_NO_CMD23
),
1493 MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY
, add_quirk_mmc
,
1494 MMC_QUIRK_BLK_NO_CMD23
),
1498 static int mmc_blk_probe(struct mmc_card
*card
)
1500 struct mmc_blk_data
*md
, *part_md
;
1505 * Check that the card supports the command class(es) we need.
1507 if (!(card
->csd
.cmdclass
& CCC_BLOCK_READ
))
1510 md
= mmc_blk_alloc(card
);
1514 err
= mmc_blk_set_blksize(md
, card
);
1518 string_get_size((u64
)get_capacity(md
->disk
) << 9, STRING_UNITS_2
,
1519 cap_str
, sizeof(cap_str
));
1520 printk(KERN_INFO
"%s: %s %s %s %s\n",
1521 md
->disk
->disk_name
, mmc_card_id(card
), mmc_card_name(card
),
1522 cap_str
, md
->read_only
? "(ro)" : "");
1524 if (mmc_blk_alloc_parts(card
, md
))
1527 mmc_set_drvdata(card
, md
);
1528 mmc_fixup_device(card
, blk_fixups
);
1530 if (mmc_add_disk(md
))
1533 list_for_each_entry(part_md
, &md
->part
, part
) {
1534 if (mmc_add_disk(part_md
))
1540 mmc_blk_remove_parts(card
, md
);
1541 mmc_blk_remove_req(md
);
1545 static void mmc_blk_remove(struct mmc_card
*card
)
1547 struct mmc_blk_data
*md
= mmc_get_drvdata(card
);
1549 mmc_blk_remove_parts(card
, md
);
1550 mmc_claim_host(card
->host
);
1551 mmc_blk_part_switch(card
, md
);
1552 mmc_release_host(card
->host
);
1553 mmc_blk_remove_req(md
);
1554 mmc_set_drvdata(card
, NULL
);
1558 static int mmc_blk_suspend(struct mmc_card
*card
, pm_message_t state
)
1560 struct mmc_blk_data
*part_md
;
1561 struct mmc_blk_data
*md
= mmc_get_drvdata(card
);
1564 mmc_queue_suspend(&md
->queue
);
1565 list_for_each_entry(part_md
, &md
->part
, part
) {
1566 mmc_queue_suspend(&part_md
->queue
);
1572 static int mmc_blk_resume(struct mmc_card
*card
)
1574 struct mmc_blk_data
*part_md
;
1575 struct mmc_blk_data
*md
= mmc_get_drvdata(card
);
1578 mmc_blk_set_blksize(md
, card
);
1581 * Resume involves the card going into idle state,
1582 * so current partition is always the main one.
1584 md
->part_curr
= md
->part_type
;
1585 mmc_queue_resume(&md
->queue
);
1586 list_for_each_entry(part_md
, &md
->part
, part
) {
1587 mmc_queue_resume(&part_md
->queue
);
1593 #define mmc_blk_suspend NULL
1594 #define mmc_blk_resume NULL
1597 static struct mmc_driver mmc_driver
= {
1601 .probe
= mmc_blk_probe
,
1602 .remove
= mmc_blk_remove
,
1603 .suspend
= mmc_blk_suspend
,
1604 .resume
= mmc_blk_resume
,
1607 static int __init
mmc_blk_init(void)
1611 if (perdev_minors
!= CONFIG_MMC_BLOCK_MINORS
)
1612 pr_info("mmcblk: using %d minors per device\n", perdev_minors
);
1614 max_devices
= 256 / perdev_minors
;
1616 res
= register_blkdev(MMC_BLOCK_MAJOR
, "mmc");
1620 res
= mmc_register_driver(&mmc_driver
);
1626 unregister_blkdev(MMC_BLOCK_MAJOR
, "mmc");
1631 static void __exit
mmc_blk_exit(void)
1633 mmc_unregister_driver(&mmc_driver
);
1634 unregister_blkdev(MMC_BLOCK_MAJOR
, "mmc");
1637 module_init(mmc_blk_init
);
1638 module_exit(mmc_blk_exit
);
1640 MODULE_LICENSE("GPL");
1641 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");