1 // SPDX-License-Identifier: GPL-2.0
3 * Block driver for media (i.e., flash cards)
5 * Copyright 2002 Hewlett-Packard Company
6 * Copyright 2005-2008 Pierre Ossman
8 * Use consistent with the GNU GPL is permitted,
9 * provided that this copyright notice is
10 * preserved in its entirety in all copies and derived works.
12 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
13 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
14 * FITNESS FOR ANY PARTICULAR PURPOSE.
16 * Many thanks to Alessandro Rubini and Jonathan Corbet!
18 * Author: Andrew Christian
21 #include <linux/moduleparam.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
25 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/errno.h>
29 #include <linux/hdreg.h>
30 #include <linux/kdev_t.h>
31 #include <linux/kref.h>
32 #include <linux/blkdev.h>
33 #include <linux/cdev.h>
34 #include <linux/mutex.h>
35 #include <linux/scatterlist.h>
36 #include <linux/string.h>
37 #include <linux/string_helpers.h>
38 #include <linux/delay.h>
39 #include <linux/capability.h>
40 #include <linux/compat.h>
41 #include <linux/pm_runtime.h>
42 #include <linux/idr.h>
43 #include <linux/debugfs.h>
44 #include <linux/rpmb.h>
46 #include <linux/mmc/ioctl.h>
47 #include <linux/mmc/card.h>
48 #include <linux/mmc/host.h>
49 #include <linux/mmc/mmc.h>
50 #include <linux/mmc/sd.h>
52 #include <linux/uaccess.h>
53 #include <linux/unaligned.h>
66 MODULE_ALIAS("mmc:block");
67 #ifdef MODULE_PARAM_PREFIX
68 #undef MODULE_PARAM_PREFIX
70 #define MODULE_PARAM_PREFIX "mmcblk."
73 * Set a 10 second timeout for polling write request busy state. Note, mmc core
74 * is setting a 3 second timeout for SD cards, and SDHCI has long had a 10
75 * second software timer to timeout the whole request, so 10 seconds should be
78 #define MMC_BLK_TIMEOUT_MS (10 * 1000)
79 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
80 #define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8)
83 * struct rpmb_frame - rpmb frame as defined by eMMC 5.1 (JESD84-B51)
85 * @stuff : stuff bytes
86 * @key_mac : The authentication key or the message authentication
87 * code (MAC) depending on the request/response type.
88 * The MAC will be delivered in the last (or the only)
90 * @data : Data to be written or read by signed access.
91 * @nonce : Random number generated by the host for the requests
92 * and copied to the response by the RPMB engine.
93 * @write_counter: Counter value for the total amount of the successful
94 * authenticated data write requests made by the host.
95 * @addr : Address of the data to be programmed to or read
96 * from the RPMB. Address is the serial number of
97 * the accessed block (half sector 256B).
98 * @block_count : Number of blocks (half sectors, 256B) requested to be
100 * @result : Includes information about the status of the write counter
101 * (valid, expired) and result of the access made to the RPMB.
102 * @req_resp : Defines the type of request and response to/from the memory.
104 * The stuff bytes and big-endian properties are modeled to fit to the spec.
111 __be32 write_counter
;
118 #define RPMB_PROGRAM_KEY 0x1 /* Program RPMB Authentication Key */
119 #define RPMB_GET_WRITE_COUNTER 0x2 /* Read RPMB write counter */
120 #define RPMB_WRITE_DATA 0x3 /* Write data to RPMB partition */
121 #define RPMB_READ_DATA 0x4 /* Read data from RPMB partition */
122 #define RPMB_RESULT_READ 0x5 /* Read result request (Internal) */
124 static DEFINE_MUTEX(block_mutex
);
127 * The defaults come from config options but can be overriden by module
128 * or bootarg options.
130 static int perdev_minors
= CONFIG_MMC_BLOCK_MINORS
;
133 * We've only got one major, so number of mmcblk devices is
134 * limited to (1 << 20) / number of minors per device. It is also
135 * limited by the MAX_DEVICES below.
137 static int max_devices
;
139 #define MAX_DEVICES 256
141 static DEFINE_IDA(mmc_blk_ida
);
142 static DEFINE_IDA(mmc_rpmb_ida
);
144 struct mmc_blk_busy_data
{
145 struct mmc_card
*card
;
150 * There is one mmc_blk_data per slot.
152 struct mmc_blk_data
{
153 struct device
*parent
;
154 struct gendisk
*disk
;
155 struct mmc_queue queue
;
156 struct list_head part
;
157 struct list_head rpmbs
;
160 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
161 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
164 unsigned int read_only
;
165 unsigned int part_type
;
166 unsigned int reset_done
;
167 #define MMC_BLK_READ BIT(0)
168 #define MMC_BLK_WRITE BIT(1)
169 #define MMC_BLK_DISCARD BIT(2)
170 #define MMC_BLK_SECDISCARD BIT(3)
171 #define MMC_BLK_CQE_RECOVERY BIT(4)
172 #define MMC_BLK_TRIM BIT(5)
175 * Only set in main mmc_blk_data associated
176 * with mmc_card with dev_set_drvdata, and keeps
177 * track of the current selected device partition.
179 unsigned int part_curr
;
180 #define MMC_BLK_PART_INVALID UINT_MAX /* Unknown partition active */
183 /* debugfs files (only in main mmc_blk_data) */
184 struct dentry
*status_dentry
;
185 struct dentry
*ext_csd_dentry
;
188 /* Device type for RPMB character devices */
189 static dev_t mmc_rpmb_devt
;
191 /* Bus type for RPMB character devices */
192 static const struct bus_type mmc_rpmb_bus_type
= {
197 * struct mmc_rpmb_data - special RPMB device type for these areas
198 * @dev: the device for the RPMB area
199 * @chrdev: character device for the RPMB area
200 * @id: unique device ID number
201 * @part_index: partition index (0 on first)
202 * @md: parent MMC block device
203 * @rdev: registered RPMB device
204 * @node: list item, so we can put this device on a list
206 struct mmc_rpmb_data
{
210 unsigned int part_index
;
211 struct mmc_blk_data
*md
;
212 struct rpmb_dev
*rdev
;
213 struct list_head node
;
216 static DEFINE_MUTEX(open_lock
);
218 module_param(perdev_minors
, int, 0444);
219 MODULE_PARM_DESC(perdev_minors
, "Minors numbers to allocate per device");
221 static inline int mmc_blk_part_switch(struct mmc_card
*card
,
222 unsigned int part_type
);
223 static void mmc_blk_rw_rq_prep(struct mmc_queue_req
*mqrq
,
224 struct mmc_card
*card
,
226 struct mmc_queue
*mq
);
227 static void mmc_blk_hsq_req_done(struct mmc_request
*mrq
);
228 static int mmc_spi_err_check(struct mmc_card
*card
);
229 static int mmc_blk_busy_cb(void *cb_data
, bool *busy
);
231 static struct mmc_blk_data
*mmc_blk_get(struct gendisk
*disk
)
233 struct mmc_blk_data
*md
;
235 mutex_lock(&open_lock
);
236 md
= disk
->private_data
;
237 if (md
&& !kref_get_unless_zero(&md
->kref
))
239 mutex_unlock(&open_lock
);
244 static inline int mmc_get_devidx(struct gendisk
*disk
)
246 int devidx
= disk
->first_minor
/ perdev_minors
;
250 static void mmc_blk_kref_release(struct kref
*ref
)
252 struct mmc_blk_data
*md
= container_of(ref
, struct mmc_blk_data
, kref
);
255 devidx
= mmc_get_devidx(md
->disk
);
256 ida_free(&mmc_blk_ida
, devidx
);
258 mutex_lock(&open_lock
);
259 md
->disk
->private_data
= NULL
;
260 mutex_unlock(&open_lock
);
266 static void mmc_blk_put(struct mmc_blk_data
*md
)
268 kref_put(&md
->kref
, mmc_blk_kref_release
);
271 static ssize_t
power_ro_lock_show(struct device
*dev
,
272 struct device_attribute
*attr
, char *buf
)
275 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
276 struct mmc_card
*card
= md
->queue
.card
;
279 if (card
->ext_csd
.boot_ro_lock
& EXT_CSD_BOOT_WP_B_PERM_WP_EN
)
281 else if (card
->ext_csd
.boot_ro_lock
& EXT_CSD_BOOT_WP_B_PWR_WP_EN
)
284 ret
= sysfs_emit(buf
, "%d\n", locked
);
291 static ssize_t
power_ro_lock_store(struct device
*dev
,
292 struct device_attribute
*attr
, const char *buf
, size_t count
)
295 struct mmc_blk_data
*md
, *part_md
;
296 struct mmc_queue
*mq
;
300 if (kstrtoul(buf
, 0, &set
))
306 md
= mmc_blk_get(dev_to_disk(dev
));
309 /* Dispatch locking to the block layer */
310 req
= blk_mq_alloc_request(mq
->queue
, REQ_OP_DRV_OUT
, 0);
312 count
= PTR_ERR(req
);
315 req_to_mmc_queue_req(req
)->drv_op
= MMC_DRV_OP_BOOT_WP
;
316 req_to_mmc_queue_req(req
)->drv_op_result
= -EIO
;
317 blk_execute_rq(req
, false);
318 ret
= req_to_mmc_queue_req(req
)->drv_op_result
;
319 blk_mq_free_request(req
);
322 pr_info("%s: Locking boot partition ro until next power on\n",
323 md
->disk
->disk_name
);
324 set_disk_ro(md
->disk
, 1);
326 list_for_each_entry(part_md
, &md
->part
, part
)
327 if (part_md
->area_type
== MMC_BLK_DATA_AREA_BOOT
) {
328 pr_info("%s: Locking boot partition ro until next power on\n", part_md
->disk
->disk_name
);
329 set_disk_ro(part_md
->disk
, 1);
337 static DEVICE_ATTR(ro_lock_until_next_power_on
, 0,
338 power_ro_lock_show
, power_ro_lock_store
);
340 static ssize_t
force_ro_show(struct device
*dev
, struct device_attribute
*attr
,
344 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
346 ret
= sysfs_emit(buf
, "%d\n",
347 get_disk_ro(dev_to_disk(dev
)) ^
353 static ssize_t
force_ro_store(struct device
*dev
, struct device_attribute
*attr
,
354 const char *buf
, size_t count
)
357 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
360 if (kstrtoul(buf
, 0, &set
)) {
365 set_disk_ro(dev_to_disk(dev
), set
|| md
->read_only
);
372 static DEVICE_ATTR(force_ro
, 0644, force_ro_show
, force_ro_store
);
374 static struct attribute
*mmc_disk_attrs
[] = {
375 &dev_attr_force_ro
.attr
,
376 &dev_attr_ro_lock_until_next_power_on
.attr
,
380 static umode_t
mmc_disk_attrs_is_visible(struct kobject
*kobj
,
381 struct attribute
*a
, int n
)
383 struct device
*dev
= kobj_to_dev(kobj
);
384 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
385 umode_t mode
= a
->mode
;
387 if (a
== &dev_attr_ro_lock_until_next_power_on
.attr
&&
388 (md
->area_type
& MMC_BLK_DATA_AREA_BOOT
) &&
389 md
->queue
.card
->ext_csd
.boot_ro_lockable
) {
391 if (!(md
->queue
.card
->ext_csd
.boot_ro_lock
&
392 EXT_CSD_BOOT_WP_B_PWR_WP_DIS
))
400 static const struct attribute_group mmc_disk_attr_group
= {
401 .is_visible
= mmc_disk_attrs_is_visible
,
402 .attrs
= mmc_disk_attrs
,
405 static const struct attribute_group
*mmc_disk_attr_groups
[] = {
406 &mmc_disk_attr_group
,
410 static int mmc_blk_open(struct gendisk
*disk
, blk_mode_t mode
)
412 struct mmc_blk_data
*md
= mmc_blk_get(disk
);
415 mutex_lock(&block_mutex
);
418 if ((mode
& BLK_OPEN_WRITE
) && md
->read_only
) {
423 mutex_unlock(&block_mutex
);
428 static void mmc_blk_release(struct gendisk
*disk
)
430 struct mmc_blk_data
*md
= disk
->private_data
;
432 mutex_lock(&block_mutex
);
434 mutex_unlock(&block_mutex
);
438 mmc_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
440 geo
->cylinders
= get_capacity(bdev
->bd_disk
) / (4 * 16);
446 struct mmc_blk_ioc_data
{
447 struct mmc_ioc_cmd ic
;
451 #define MMC_BLK_IOC_DROP BIT(0) /* drop this mrq */
452 #define MMC_BLK_IOC_SBC BIT(1) /* use mrq.sbc */
454 struct mmc_rpmb_data
*rpmb
;
457 static struct mmc_blk_ioc_data
*mmc_blk_ioctl_copy_from_user(
458 struct mmc_ioc_cmd __user
*user
)
460 struct mmc_blk_ioc_data
*idata
;
463 idata
= kzalloc(sizeof(*idata
), GFP_KERNEL
);
469 if (copy_from_user(&idata
->ic
, user
, sizeof(idata
->ic
))) {
474 idata
->buf_bytes
= (u64
) idata
->ic
.blksz
* idata
->ic
.blocks
;
475 if (idata
->buf_bytes
> MMC_IOC_MAX_BYTES
) {
480 if (!idata
->buf_bytes
) {
485 idata
->buf
= memdup_user((void __user
*)(unsigned long)
486 idata
->ic
.data_ptr
, idata
->buf_bytes
);
487 if (IS_ERR(idata
->buf
)) {
488 err
= PTR_ERR(idata
->buf
);
500 static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user
*ic_ptr
,
501 struct mmc_blk_ioc_data
*idata
)
503 struct mmc_ioc_cmd
*ic
= &idata
->ic
;
505 if (copy_to_user(&(ic_ptr
->response
), ic
->response
,
506 sizeof(ic
->response
)))
509 if (!idata
->ic
.write_flag
) {
510 if (copy_to_user((void __user
*)(unsigned long)ic
->data_ptr
,
511 idata
->buf
, idata
->buf_bytes
))
518 static int __mmc_blk_ioctl_cmd(struct mmc_card
*card
, struct mmc_blk_data
*md
,
519 struct mmc_blk_ioc_data
**idatas
, int i
)
521 struct mmc_command cmd
= {}, sbc
= {};
522 struct mmc_data data
= {};
523 struct mmc_request mrq
= {};
524 struct scatterlist sg
;
526 unsigned int busy_timeout_ms
;
528 unsigned int target_part
;
529 struct mmc_blk_ioc_data
*idata
= idatas
[i
];
530 struct mmc_blk_ioc_data
*prev_idata
= NULL
;
532 if (!card
|| !md
|| !idata
)
535 if (idata
->flags
& MMC_BLK_IOC_DROP
)
538 if (idata
->flags
& MMC_BLK_IOC_SBC
&& i
> 0)
539 prev_idata
= idatas
[i
- 1];
542 * The RPMB accesses comes in from the character device, so we
543 * need to target these explicitly. Else we just target the
544 * partition type for the block device the ioctl() was issued
548 /* Support multiple RPMB partitions */
549 target_part
= idata
->rpmb
->part_index
;
550 target_part
|= EXT_CSD_PART_CONFIG_ACC_RPMB
;
552 target_part
= md
->part_type
;
555 cmd
.opcode
= idata
->ic
.opcode
;
556 cmd
.arg
= idata
->ic
.arg
;
557 cmd
.flags
= idata
->ic
.flags
;
559 if (idata
->buf_bytes
) {
562 data
.blksz
= idata
->ic
.blksz
;
563 data
.blocks
= idata
->ic
.blocks
;
565 sg_init_one(data
.sg
, idata
->buf
, idata
->buf_bytes
);
567 if (idata
->ic
.write_flag
)
568 data
.flags
= MMC_DATA_WRITE
;
570 data
.flags
= MMC_DATA_READ
;
572 /* data.flags must already be set before doing this. */
573 mmc_set_data_timeout(&data
, card
);
575 /* Allow overriding the timeout_ns for empirical tuning. */
576 if (idata
->ic
.data_timeout_ns
)
577 data
.timeout_ns
= idata
->ic
.data_timeout_ns
;
584 err
= mmc_blk_part_switch(card
, target_part
);
588 if (idata
->ic
.is_acmd
) {
589 err
= mmc_app_cmd(card
->host
, card
);
594 if (idata
->rpmb
|| prev_idata
) {
595 sbc
.opcode
= MMC_SET_BLOCK_COUNT
;
597 * We don't do any blockcount validation because the max size
598 * may be increased by a future standard. We just copy the
599 * 'Reliable Write' bit here.
601 sbc
.arg
= data
.blocks
| (idata
->ic
.write_flag
& BIT(31));
603 sbc
.arg
= prev_idata
->ic
.arg
;
604 sbc
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
608 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd
.arg
) == EXT_CSD_SANITIZE_START
) &&
609 (cmd
.opcode
== MMC_SWITCH
))
610 return mmc_sanitize(card
, idata
->ic
.cmd_timeout_ms
);
612 /* If it's an R1B response we need some more preparations. */
613 busy_timeout_ms
= idata
->ic
.cmd_timeout_ms
? : MMC_BLK_TIMEOUT_MS
;
614 r1b_resp
= (cmd
.flags
& MMC_RSP_R1B
) == MMC_RSP_R1B
;
616 mmc_prepare_busy_cmd(card
->host
, &cmd
, busy_timeout_ms
);
618 mmc_wait_for_req(card
->host
, &mrq
);
619 memcpy(&idata
->ic
.response
, cmd
.resp
, sizeof(cmd
.resp
));
622 memcpy(&prev_idata
->ic
.response
, sbc
.resp
, sizeof(sbc
.resp
));
624 dev_err(mmc_dev(card
->host
), "%s: sbc error %d\n",
625 __func__
, sbc
.error
);
631 dev_err(mmc_dev(card
->host
), "%s: cmd error %d\n",
632 __func__
, cmd
.error
);
636 dev_err(mmc_dev(card
->host
), "%s: data error %d\n",
637 __func__
, data
.error
);
642 * Make sure the cache of the PARTITION_CONFIG register and
643 * PARTITION_ACCESS bits is updated in case the ioctl ext_csd write
644 * changed it successfully.
646 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd
.arg
) == EXT_CSD_PART_CONFIG
) &&
647 (cmd
.opcode
== MMC_SWITCH
)) {
648 struct mmc_blk_data
*main_md
= dev_get_drvdata(&card
->dev
);
649 u8 value
= MMC_EXTRACT_VALUE_FROM_ARG(cmd
.arg
);
652 * Update cache so the next mmc_blk_part_switch call operates
653 * on up-to-date data.
655 card
->ext_csd
.part_config
= value
;
656 main_md
->part_curr
= value
& EXT_CSD_PART_CONFIG_ACC_MASK
;
660 * Make sure to update CACHE_CTRL in case it was changed. The cache
661 * will get turned back on if the card is re-initialized, e.g.
662 * suspend/resume or hw reset in recovery.
664 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd
.arg
) == EXT_CSD_CACHE_CTRL
) &&
665 (cmd
.opcode
== MMC_SWITCH
)) {
666 u8 value
= MMC_EXTRACT_VALUE_FROM_ARG(cmd
.arg
) & 1;
668 card
->ext_csd
.cache_ctrl
= value
;
672 * According to the SD specs, some commands require a delay after
673 * issuing the command.
675 if (idata
->ic
.postsleep_min_us
)
676 usleep_range(idata
->ic
.postsleep_min_us
, idata
->ic
.postsleep_max_us
);
678 if (mmc_host_is_spi(card
->host
)) {
679 if (idata
->ic
.write_flag
|| r1b_resp
|| cmd
.flags
& MMC_RSP_SPI_BUSY
)
680 return mmc_spi_err_check(card
);
685 * Ensure RPMB, writes and R1B responses are completed by polling with
686 * CMD13. Note that, usually we don't need to poll when using HW busy
687 * detection, but here it's needed since some commands may indicate the
688 * error through the R1 status bits.
690 if (idata
->rpmb
|| idata
->ic
.write_flag
|| r1b_resp
) {
691 struct mmc_blk_busy_data cb_data
= {
695 err
= __mmc_poll_for_busy(card
->host
, 0, busy_timeout_ms
,
696 &mmc_blk_busy_cb
, &cb_data
);
698 idata
->ic
.response
[0] = cb_data
.status
;
704 static int mmc_blk_ioctl_cmd(struct mmc_blk_data
*md
,
705 struct mmc_ioc_cmd __user
*ic_ptr
,
706 struct mmc_rpmb_data
*rpmb
)
708 struct mmc_blk_ioc_data
*idata
;
709 struct mmc_blk_ioc_data
*idatas
[1];
710 struct mmc_queue
*mq
;
711 struct mmc_card
*card
;
712 int err
= 0, ioc_err
= 0;
715 idata
= mmc_blk_ioctl_copy_from_user(ic_ptr
);
717 return PTR_ERR(idata
);
718 /* This will be NULL on non-RPMB ioctl():s */
721 card
= md
->queue
.card
;
728 * Dispatch the ioctl() into the block request queue.
731 req
= blk_mq_alloc_request(mq
->queue
,
732 idata
->ic
.write_flag
? REQ_OP_DRV_OUT
: REQ_OP_DRV_IN
, 0);
738 req_to_mmc_queue_req(req
)->drv_op
=
739 rpmb
? MMC_DRV_OP_IOCTL_RPMB
: MMC_DRV_OP_IOCTL
;
740 req_to_mmc_queue_req(req
)->drv_op_result
= -EIO
;
741 req_to_mmc_queue_req(req
)->drv_op_data
= idatas
;
742 req_to_mmc_queue_req(req
)->ioc_count
= 1;
743 blk_execute_rq(req
, false);
744 ioc_err
= req_to_mmc_queue_req(req
)->drv_op_result
;
745 err
= mmc_blk_ioctl_copy_to_user(ic_ptr
, idata
);
746 blk_mq_free_request(req
);
751 return ioc_err
? ioc_err
: err
;
754 static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data
*md
,
755 struct mmc_ioc_multi_cmd __user
*user
,
756 struct mmc_rpmb_data
*rpmb
)
758 struct mmc_blk_ioc_data
**idata
= NULL
;
759 struct mmc_ioc_cmd __user
*cmds
= user
->cmds
;
760 struct mmc_card
*card
;
761 struct mmc_queue
*mq
;
762 int err
= 0, ioc_err
= 0;
767 if (copy_from_user(&num_of_cmds
, &user
->num_of_cmds
,
768 sizeof(num_of_cmds
)))
774 if (num_of_cmds
> MMC_IOC_MAX_CMDS
)
778 idata
= kcalloc(n
, sizeof(*idata
), GFP_KERNEL
);
782 for (i
= 0; i
< n
; i
++) {
783 idata
[i
] = mmc_blk_ioctl_copy_from_user(&cmds
[i
]);
784 if (IS_ERR(idata
[i
])) {
785 err
= PTR_ERR(idata
[i
]);
789 /* This will be NULL on non-RPMB ioctl():s */
790 idata
[i
]->rpmb
= rpmb
;
793 card
= md
->queue
.card
;
801 * Dispatch the ioctl()s into the block request queue.
804 req
= blk_mq_alloc_request(mq
->queue
,
805 idata
[0]->ic
.write_flag
? REQ_OP_DRV_OUT
: REQ_OP_DRV_IN
, 0);
810 req_to_mmc_queue_req(req
)->drv_op
=
811 rpmb
? MMC_DRV_OP_IOCTL_RPMB
: MMC_DRV_OP_IOCTL
;
812 req_to_mmc_queue_req(req
)->drv_op_result
= -EIO
;
813 req_to_mmc_queue_req(req
)->drv_op_data
= idata
;
814 req_to_mmc_queue_req(req
)->ioc_count
= n
;
815 blk_execute_rq(req
, false);
816 ioc_err
= req_to_mmc_queue_req(req
)->drv_op_result
;
818 /* copy to user if data and response */
819 for (i
= 0; i
< n
&& !err
; i
++)
820 err
= mmc_blk_ioctl_copy_to_user(&cmds
[i
], idata
[i
]);
822 blk_mq_free_request(req
);
825 for (i
= 0; i
< n
; i
++) {
826 kfree(idata
[i
]->buf
);
830 return ioc_err
? ioc_err
: err
;
833 static int mmc_blk_check_blkdev(struct block_device
*bdev
)
836 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
837 * whole block device, not on a partition. This prevents overspray
838 * between sibling partitions.
840 if (!capable(CAP_SYS_RAWIO
) || bdev_is_partition(bdev
))
845 static int mmc_blk_ioctl(struct block_device
*bdev
, blk_mode_t mode
,
846 unsigned int cmd
, unsigned long arg
)
848 struct mmc_blk_data
*md
;
853 ret
= mmc_blk_check_blkdev(bdev
);
856 md
= mmc_blk_get(bdev
->bd_disk
);
859 ret
= mmc_blk_ioctl_cmd(md
,
860 (struct mmc_ioc_cmd __user
*)arg
,
864 case MMC_IOC_MULTI_CMD
:
865 ret
= mmc_blk_check_blkdev(bdev
);
868 md
= mmc_blk_get(bdev
->bd_disk
);
871 ret
= mmc_blk_ioctl_multi_cmd(md
,
872 (struct mmc_ioc_multi_cmd __user
*)arg
,
882 static int mmc_blk_compat_ioctl(struct block_device
*bdev
, blk_mode_t mode
,
883 unsigned int cmd
, unsigned long arg
)
885 return mmc_blk_ioctl(bdev
, mode
, cmd
, (unsigned long) compat_ptr(arg
));
889 static int mmc_blk_alternative_gpt_sector(struct gendisk
*disk
,
892 struct mmc_blk_data
*md
;
895 md
= mmc_blk_get(disk
);
900 ret
= mmc_card_alternative_gpt_sector(md
->queue
.card
, sector
);
909 static const struct block_device_operations mmc_bdops
= {
910 .open
= mmc_blk_open
,
911 .release
= mmc_blk_release
,
912 .getgeo
= mmc_blk_getgeo
,
913 .owner
= THIS_MODULE
,
914 .ioctl
= mmc_blk_ioctl
,
916 .compat_ioctl
= mmc_blk_compat_ioctl
,
918 .alternative_gpt_sector
= mmc_blk_alternative_gpt_sector
,
921 static int mmc_blk_part_switch_pre(struct mmc_card
*card
,
922 unsigned int part_type
)
924 const unsigned int mask
= EXT_CSD_PART_CONFIG_ACC_MASK
;
925 const unsigned int rpmb
= EXT_CSD_PART_CONFIG_ACC_RPMB
;
928 if ((part_type
& mask
) == rpmb
) {
929 if (card
->ext_csd
.cmdq_en
) {
930 ret
= mmc_cmdq_disable(card
);
934 mmc_retune_pause(card
->host
);
940 static int mmc_blk_part_switch_post(struct mmc_card
*card
,
941 unsigned int part_type
)
943 const unsigned int mask
= EXT_CSD_PART_CONFIG_ACC_MASK
;
944 const unsigned int rpmb
= EXT_CSD_PART_CONFIG_ACC_RPMB
;
947 if ((part_type
& mask
) == rpmb
) {
948 mmc_retune_unpause(card
->host
);
949 if (card
->reenable_cmdq
&& !card
->ext_csd
.cmdq_en
)
950 ret
= mmc_cmdq_enable(card
);
956 static inline int mmc_blk_part_switch(struct mmc_card
*card
,
957 unsigned int part_type
)
960 struct mmc_blk_data
*main_md
= dev_get_drvdata(&card
->dev
);
962 if (main_md
->part_curr
== part_type
)
965 if (mmc_card_mmc(card
)) {
966 u8 part_config
= card
->ext_csd
.part_config
;
968 ret
= mmc_blk_part_switch_pre(card
, part_type
);
972 part_config
&= ~EXT_CSD_PART_CONFIG_ACC_MASK
;
973 part_config
|= part_type
;
975 ret
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
976 EXT_CSD_PART_CONFIG
, part_config
,
977 card
->ext_csd
.part_time
);
979 mmc_blk_part_switch_post(card
, part_type
);
983 card
->ext_csd
.part_config
= part_config
;
985 ret
= mmc_blk_part_switch_post(card
, main_md
->part_curr
);
988 main_md
->part_curr
= part_type
;
992 static int mmc_sd_num_wr_blocks(struct mmc_card
*card
, u32
*written_blocks
)
997 u8 resp_sz
= mmc_card_ult_capacity(card
) ? 8 : 4;
998 unsigned int noio_flag
;
1000 struct mmc_request mrq
= {};
1001 struct mmc_command cmd
= {};
1002 struct mmc_data data
= {};
1003 struct scatterlist sg
;
1005 err
= mmc_app_cmd(card
->host
, card
);
1009 cmd
.opcode
= SD_APP_SEND_NUM_WR_BLKS
;
1011 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
1013 data
.blksz
= resp_sz
;
1015 data
.flags
= MMC_DATA_READ
;
1018 mmc_set_data_timeout(&data
, card
);
1023 noio_flag
= memalloc_noio_save();
1024 blocks
= kmalloc(resp_sz
, GFP_KERNEL
);
1025 memalloc_noio_restore(noio_flag
);
1029 sg_init_one(&sg
, blocks
, resp_sz
);
1031 mmc_wait_for_req(card
->host
, &mrq
);
1033 if (mmc_card_ult_capacity(card
)) {
1035 * Normally, ACMD22 returns the number of written sectors as
1036 * u32. SDUC, however, returns it as u64. This is not a
1037 * superfluous requirement, because SDUC writes may exceed 2TB.
1038 * For Linux mmc however, the previously write operation could
1039 * not be more than the block layer limits, thus just make room
1040 * for a u64 and cast the response back to u32.
1042 result
= clamp_val(get_unaligned_be64(blocks
), 0, UINT_MAX
);
1044 result
= ntohl(*blocks
);
1048 if (cmd
.error
|| data
.error
)
1051 *written_blocks
= result
;
1056 static unsigned int mmc_blk_clock_khz(struct mmc_host
*host
)
1058 if (host
->actual_clock
)
1059 return host
->actual_clock
/ 1000;
1061 /* Clock may be subject to a divisor, fudge it by a factor of 2. */
1062 if (host
->ios
.clock
)
1063 return host
->ios
.clock
/ 2000;
1065 /* How can there be no clock */
1067 return 100; /* 100 kHz is minimum possible value */
1070 static unsigned int mmc_blk_data_timeout_ms(struct mmc_host
*host
,
1071 struct mmc_data
*data
)
1073 unsigned int ms
= DIV_ROUND_UP(data
->timeout_ns
, 1000000);
1076 if (data
->timeout_clks
) {
1077 khz
= mmc_blk_clock_khz(host
);
1078 ms
+= DIV_ROUND_UP(data
->timeout_clks
, khz
);
1085 * Attempts to reset the card and get back to the requested partition.
1086 * Therefore any error here must result in cancelling the block layer
1087 * request, it must not be reattempted without going through the mmc_blk
1088 * partition sanity checks.
1090 static int mmc_blk_reset(struct mmc_blk_data
*md
, struct mmc_host
*host
,
1094 struct mmc_blk_data
*main_md
= dev_get_drvdata(&host
->card
->dev
);
1096 if (md
->reset_done
& type
)
1099 md
->reset_done
|= type
;
1100 err
= mmc_hw_reset(host
->card
);
1102 * A successful reset will leave the card in the main partition, but
1103 * upon failure it might not be, so set it to MMC_BLK_PART_INVALID
1106 main_md
->part_curr
= err
? MMC_BLK_PART_INVALID
: main_md
->part_type
;
1109 /* Ensure we switch back to the correct partition */
1110 if (mmc_blk_part_switch(host
->card
, md
->part_type
))
1112 * We have failed to get back into the correct
1113 * partition, so we need to abort the whole request.
1119 static inline void mmc_blk_reset_success(struct mmc_blk_data
*md
, int type
)
1121 md
->reset_done
&= ~type
;
1124 static void mmc_blk_check_sbc(struct mmc_queue_req
*mq_rq
)
1126 struct mmc_blk_ioc_data
**idata
= mq_rq
->drv_op_data
;
1129 for (i
= 1; i
< mq_rq
->ioc_count
; i
++) {
1130 if (idata
[i
- 1]->ic
.opcode
== MMC_SET_BLOCK_COUNT
&&
1131 mmc_op_multi(idata
[i
]->ic
.opcode
)) {
1132 idata
[i
- 1]->flags
|= MMC_BLK_IOC_DROP
;
1133 idata
[i
]->flags
|= MMC_BLK_IOC_SBC
;
1139 * The non-block commands come back from the block layer after it queued it and
1140 * processed it with all other requests and then they get issued in this
1143 static void mmc_blk_issue_drv_op(struct mmc_queue
*mq
, struct request
*req
)
1145 struct mmc_queue_req
*mq_rq
;
1146 struct mmc_card
*card
= mq
->card
;
1147 struct mmc_blk_data
*md
= mq
->blkdata
;
1148 struct mmc_blk_ioc_data
**idata
;
1155 mq_rq
= req_to_mmc_queue_req(req
);
1156 rpmb_ioctl
= (mq_rq
->drv_op
== MMC_DRV_OP_IOCTL_RPMB
);
1158 switch (mq_rq
->drv_op
) {
1159 case MMC_DRV_OP_IOCTL
:
1160 if (card
->ext_csd
.cmdq_en
) {
1161 ret
= mmc_cmdq_disable(card
);
1166 mmc_blk_check_sbc(mq_rq
);
1169 case MMC_DRV_OP_IOCTL_RPMB
:
1170 idata
= mq_rq
->drv_op_data
;
1171 for (i
= 0, ret
= 0; i
< mq_rq
->ioc_count
; i
++) {
1172 ret
= __mmc_blk_ioctl_cmd(card
, md
, idata
, i
);
1176 /* Always switch back to main area after RPMB access */
1178 mmc_blk_part_switch(card
, 0);
1179 else if (card
->reenable_cmdq
&& !card
->ext_csd
.cmdq_en
)
1180 mmc_cmdq_enable(card
);
1182 case MMC_DRV_OP_BOOT_WP
:
1183 ret
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
, EXT_CSD_BOOT_WP
,
1184 card
->ext_csd
.boot_ro_lock
|
1185 EXT_CSD_BOOT_WP_B_PWR_WP_EN
,
1186 card
->ext_csd
.part_time
);
1188 pr_err("%s: Locking boot partition ro until next power on failed: %d\n",
1189 md
->disk
->disk_name
, ret
);
1191 card
->ext_csd
.boot_ro_lock
|=
1192 EXT_CSD_BOOT_WP_B_PWR_WP_EN
;
1194 case MMC_DRV_OP_GET_CARD_STATUS
:
1195 ret
= mmc_send_status(card
, &status
);
1199 case MMC_DRV_OP_GET_EXT_CSD
:
1200 ext_csd
= mq_rq
->drv_op_data
;
1201 ret
= mmc_get_ext_csd(card
, ext_csd
);
1204 pr_err("%s: unknown driver specific operation\n",
1205 md
->disk
->disk_name
);
1209 mq_rq
->drv_op_result
= ret
;
1210 blk_mq_end_request(req
, ret
? BLK_STS_IOERR
: BLK_STS_OK
);
1213 static void mmc_blk_issue_erase_rq(struct mmc_queue
*mq
, struct request
*req
,
1214 int type
, unsigned int erase_arg
)
1216 struct mmc_blk_data
*md
= mq
->blkdata
;
1217 struct mmc_card
*card
= md
->queue
.card
;
1221 blk_status_t status
= BLK_STS_OK
;
1223 if (!mmc_can_erase(card
)) {
1224 status
= BLK_STS_NOTSUPP
;
1228 from
= blk_rq_pos(req
);
1229 nr
= blk_rq_sectors(req
);
1233 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
1234 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1235 INAND_CMD38_ARG_EXT_CSD
,
1236 erase_arg
== MMC_TRIM_ARG
?
1237 INAND_CMD38_ARG_TRIM
:
1238 INAND_CMD38_ARG_ERASE
,
1239 card
->ext_csd
.generic_cmd6_time
);
1242 err
= mmc_erase(card
, from
, nr
, erase_arg
);
1243 } while (err
== -EIO
&& !mmc_blk_reset(md
, card
->host
, type
));
1245 status
= BLK_STS_IOERR
;
1247 mmc_blk_reset_success(md
, type
);
1249 blk_mq_end_request(req
, status
);
1252 static void mmc_blk_issue_trim_rq(struct mmc_queue
*mq
, struct request
*req
)
1254 mmc_blk_issue_erase_rq(mq
, req
, MMC_BLK_TRIM
, MMC_TRIM_ARG
);
1257 static void mmc_blk_issue_discard_rq(struct mmc_queue
*mq
, struct request
*req
)
1259 struct mmc_blk_data
*md
= mq
->blkdata
;
1260 struct mmc_card
*card
= md
->queue
.card
;
1261 unsigned int arg
= card
->erase_arg
;
1263 if (mmc_card_broken_sd_discard(card
))
1266 mmc_blk_issue_erase_rq(mq
, req
, MMC_BLK_DISCARD
, arg
);
1269 static void mmc_blk_issue_secdiscard_rq(struct mmc_queue
*mq
,
1270 struct request
*req
)
1272 struct mmc_blk_data
*md
= mq
->blkdata
;
1273 struct mmc_card
*card
= md
->queue
.card
;
1274 unsigned int nr
, arg
;
1276 int err
= 0, type
= MMC_BLK_SECDISCARD
;
1277 blk_status_t status
= BLK_STS_OK
;
1279 if (!(mmc_can_secure_erase_trim(card
))) {
1280 status
= BLK_STS_NOTSUPP
;
1284 from
= blk_rq_pos(req
);
1285 nr
= blk_rq_sectors(req
);
1287 if (mmc_can_trim(card
) && !mmc_erase_group_aligned(card
, from
, nr
))
1288 arg
= MMC_SECURE_TRIM1_ARG
;
1290 arg
= MMC_SECURE_ERASE_ARG
;
1293 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
1294 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1295 INAND_CMD38_ARG_EXT_CSD
,
1296 arg
== MMC_SECURE_TRIM1_ARG
?
1297 INAND_CMD38_ARG_SECTRIM1
:
1298 INAND_CMD38_ARG_SECERASE
,
1299 card
->ext_csd
.generic_cmd6_time
);
1304 err
= mmc_erase(card
, from
, nr
, arg
);
1308 status
= BLK_STS_IOERR
;
1312 if (arg
== MMC_SECURE_TRIM1_ARG
) {
1313 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
1314 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1315 INAND_CMD38_ARG_EXT_CSD
,
1316 INAND_CMD38_ARG_SECTRIM2
,
1317 card
->ext_csd
.generic_cmd6_time
);
1322 err
= mmc_erase(card
, from
, nr
, MMC_SECURE_TRIM2_ARG
);
1326 status
= BLK_STS_IOERR
;
1332 if (err
&& !mmc_blk_reset(md
, card
->host
, type
))
1335 mmc_blk_reset_success(md
, type
);
1337 blk_mq_end_request(req
, status
);
1340 static void mmc_blk_issue_flush(struct mmc_queue
*mq
, struct request
*req
)
1342 struct mmc_blk_data
*md
= mq
->blkdata
;
1343 struct mmc_card
*card
= md
->queue
.card
;
1346 ret
= mmc_flush_cache(card
->host
);
1347 blk_mq_end_request(req
, ret
? BLK_STS_IOERR
: BLK_STS_OK
);
1351 * Reformat current write as a reliable write, supporting
1352 * both legacy and the enhanced reliable write MMC cards.
1353 * In each transfer we'll handle only as much as a single
1354 * reliable write can handle, thus finish the request in
1355 * partial completions.
1357 static inline void mmc_apply_rel_rw(struct mmc_blk_request
*brq
,
1358 struct mmc_card
*card
,
1359 struct request
*req
)
1361 if (!(card
->ext_csd
.rel_param
& EXT_CSD_WR_REL_PARAM_EN
)) {
1362 /* Legacy mode imposes restrictions on transfers. */
1363 if (!IS_ALIGNED(blk_rq_pos(req
), card
->ext_csd
.rel_sectors
))
1364 brq
->data
.blocks
= 1;
1366 if (brq
->data
.blocks
> card
->ext_csd
.rel_sectors
)
1367 brq
->data
.blocks
= card
->ext_csd
.rel_sectors
;
1368 else if (brq
->data
.blocks
< card
->ext_csd
.rel_sectors
)
1369 brq
->data
.blocks
= 1;
1373 #define CMD_ERRORS_EXCL_OOR \
1374 (R1_ADDRESS_ERROR | /* Misaligned address */ \
1375 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
1376 R1_WP_VIOLATION | /* Tried to write to protected block */ \
1377 R1_CARD_ECC_FAILED | /* Card ECC failed */ \
1378 R1_CC_ERROR | /* Card controller error */ \
1379 R1_ERROR) /* General/unknown error */
1381 #define CMD_ERRORS \
1382 (CMD_ERRORS_EXCL_OOR | \
1383 R1_OUT_OF_RANGE) /* Command argument out of range */ \
1385 static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
1390 * Per the SD specification(physical layer version 4.10)[1],
1391 * section 4.3.3, it explicitly states that "When the last
1392 * block of user area is read using CMD18, the host should
1393 * ignore OUT_OF_RANGE error that may occur even the sequence
1394 * is correct". And JESD84-B51 for eMMC also has a similar
1395 * statement on section 6.8.3.
1397 * Multiple block read/write could be done by either predefined
1398 * method, namely CMD23, or open-ending mode. For open-ending mode,
1399 * we should ignore the OUT_OF_RANGE error as it's normal behaviour.
1401 * However the spec[1] doesn't tell us whether we should also
1402 * ignore that for predefined method. But per the spec[1], section
1403 * 4.15 Set Block Count Command, it says"If illegal block count
1404 * is set, out of range error will be indicated during read/write
1405 * operation (For example, data transfer is stopped at user area
1406 * boundary)." In another word, we could expect a out of range error
1407 * in the response for the following CMD18/25. And if argument of
1408 * CMD23 + the argument of CMD18/25 exceed the max number of blocks,
1409 * we could also expect to get a -ETIMEDOUT or any error number from
1410 * the host drivers due to missing data response(for write)/data(for
1411 * read), as the cards will stop the data transfer by itself per the
1412 * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode.
1415 if (!brq
->stop
.error
) {
1416 bool oor_with_open_end
;
1417 /* If there is no error yet, check R1 response */
1419 val
= brq
->stop
.resp
[0] & CMD_ERRORS
;
1420 oor_with_open_end
= val
& R1_OUT_OF_RANGE
&& !brq
->mrq
.sbc
;
1422 if (val
&& !oor_with_open_end
)
1423 brq
->stop
.error
= -EIO
;
1427 static void mmc_blk_data_prep(struct mmc_queue
*mq
, struct mmc_queue_req
*mqrq
,
1428 int recovery_mode
, bool *do_rel_wr_p
,
1429 bool *do_data_tag_p
)
1431 struct mmc_blk_data
*md
= mq
->blkdata
;
1432 struct mmc_card
*card
= md
->queue
.card
;
1433 struct mmc_blk_request
*brq
= &mqrq
->brq
;
1434 struct request
*req
= mmc_queue_req_to_req(mqrq
);
1435 bool do_rel_wr
, do_data_tag
;
1438 * Reliable writes are used to implement Forced Unit Access and
1439 * are supported only on MMCs.
1441 do_rel_wr
= (req
->cmd_flags
& REQ_FUA
) &&
1442 rq_data_dir(req
) == WRITE
&&
1443 (md
->flags
& MMC_BLK_REL_WR
);
1445 memset(brq
, 0, sizeof(struct mmc_blk_request
));
1447 mmc_crypto_prepare_req(mqrq
);
1449 brq
->mrq
.data
= &brq
->data
;
1450 brq
->mrq
.tag
= req
->tag
;
1452 brq
->stop
.opcode
= MMC_STOP_TRANSMISSION
;
1455 if (rq_data_dir(req
) == READ
) {
1456 brq
->data
.flags
= MMC_DATA_READ
;
1457 brq
->stop
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
1459 brq
->data
.flags
= MMC_DATA_WRITE
;
1460 brq
->stop
.flags
= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
| MMC_CMD_AC
;
1463 brq
->data
.blksz
= 512;
1464 brq
->data
.blocks
= blk_rq_sectors(req
);
1465 brq
->data
.blk_addr
= blk_rq_pos(req
);
1468 * The command queue supports 2 priorities: "high" (1) and "simple" (0).
1469 * The eMMC will give "high" priority tasks priority over "simple"
1470 * priority tasks. Here we always set "simple" priority by not setting
1475 * The block layer doesn't support all sector count
1476 * restrictions, so we need to be prepared for too big
1479 if (brq
->data
.blocks
> card
->host
->max_blk_count
)
1480 brq
->data
.blocks
= card
->host
->max_blk_count
;
1482 if (brq
->data
.blocks
> 1) {
1484 * Some SD cards in SPI mode return a CRC error or even lock up
1485 * completely when trying to read the last block using a
1486 * multiblock read command.
1488 if (mmc_host_is_spi(card
->host
) && (rq_data_dir(req
) == READ
) &&
1489 (blk_rq_pos(req
) + blk_rq_sectors(req
) ==
1490 get_capacity(md
->disk
)))
1494 * After a read error, we redo the request one (native) sector
1495 * at a time in order to accurately determine which
1496 * sectors can be read successfully.
1499 brq
->data
.blocks
= queue_physical_block_size(mq
->queue
) >> 9;
1502 * Some controllers have HW issues while operating
1503 * in multiple I/O mode
1505 if (card
->host
->ops
->multi_io_quirk
)
1506 brq
->data
.blocks
= card
->host
->ops
->multi_io_quirk(card
,
1507 (rq_data_dir(req
) == READ
) ?
1508 MMC_DATA_READ
: MMC_DATA_WRITE
,
1513 mmc_apply_rel_rw(brq
, card
, req
);
1514 brq
->data
.flags
|= MMC_DATA_REL_WR
;
1518 * Data tag is used only during writing meta data to speed
1519 * up write and any subsequent read of this meta data
1521 do_data_tag
= card
->ext_csd
.data_tag_unit_size
&&
1522 (req
->cmd_flags
& REQ_META
) &&
1523 (rq_data_dir(req
) == WRITE
) &&
1524 ((brq
->data
.blocks
* brq
->data
.blksz
) >=
1525 card
->ext_csd
.data_tag_unit_size
);
1528 brq
->data
.flags
|= MMC_DATA_DAT_TAG
;
1530 mmc_set_data_timeout(&brq
->data
, card
);
1532 brq
->data
.sg
= mqrq
->sg
;
1533 brq
->data
.sg_len
= mmc_queue_map_sg(mq
, mqrq
);
1536 * Adjust the sg list so it is the same size as the
1539 if (brq
->data
.blocks
!= blk_rq_sectors(req
)) {
1540 int i
, data_size
= brq
->data
.blocks
<< 9;
1541 struct scatterlist
*sg
;
1543 for_each_sg(brq
->data
.sg
, sg
, brq
->data
.sg_len
, i
) {
1544 data_size
-= sg
->length
;
1545 if (data_size
<= 0) {
1546 sg
->length
+= data_size
;
1551 brq
->data
.sg_len
= i
;
1555 *do_rel_wr_p
= do_rel_wr
;
1558 *do_data_tag_p
= do_data_tag
;
1561 #define MMC_CQE_RETRIES 2
1563 static void mmc_blk_cqe_complete_rq(struct mmc_queue
*mq
, struct request
*req
)
1565 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
1566 struct mmc_request
*mrq
= &mqrq
->brq
.mrq
;
1567 struct request_queue
*q
= req
->q
;
1568 struct mmc_host
*host
= mq
->card
->host
;
1569 enum mmc_issue_type issue_type
= mmc_issue_type(mq
, req
);
1570 unsigned long flags
;
1574 mmc_cqe_post_req(host
, mrq
);
1576 if (mrq
->cmd
&& mrq
->cmd
->error
)
1577 err
= mrq
->cmd
->error
;
1578 else if (mrq
->data
&& mrq
->data
->error
)
1579 err
= mrq
->data
->error
;
1584 if (mqrq
->retries
++ < MMC_CQE_RETRIES
)
1585 blk_mq_requeue_request(req
, true);
1587 blk_mq_end_request(req
, BLK_STS_IOERR
);
1588 } else if (mrq
->data
) {
1589 if (blk_update_request(req
, BLK_STS_OK
, mrq
->data
->bytes_xfered
))
1590 blk_mq_requeue_request(req
, true);
1592 __blk_mq_end_request(req
, BLK_STS_OK
);
1593 } else if (mq
->in_recovery
) {
1594 blk_mq_requeue_request(req
, true);
1596 blk_mq_end_request(req
, BLK_STS_OK
);
1599 spin_lock_irqsave(&mq
->lock
, flags
);
1601 mq
->in_flight
[issue_type
] -= 1;
1603 put_card
= (mmc_tot_in_flight(mq
) == 0);
1605 mmc_cqe_check_busy(mq
);
1607 spin_unlock_irqrestore(&mq
->lock
, flags
);
1610 blk_mq_run_hw_queues(q
, true);
1613 mmc_put_card(mq
->card
, &mq
->ctx
);
1616 void mmc_blk_cqe_recovery(struct mmc_queue
*mq
)
1618 struct mmc_card
*card
= mq
->card
;
1619 struct mmc_host
*host
= card
->host
;
1622 pr_debug("%s: CQE recovery start\n", mmc_hostname(host
));
1624 err
= mmc_cqe_recovery(host
);
1626 mmc_blk_reset(mq
->blkdata
, host
, MMC_BLK_CQE_RECOVERY
);
1627 mmc_blk_reset_success(mq
->blkdata
, MMC_BLK_CQE_RECOVERY
);
1629 pr_debug("%s: CQE recovery done\n", mmc_hostname(host
));
1632 static void mmc_blk_cqe_req_done(struct mmc_request
*mrq
)
1634 struct mmc_queue_req
*mqrq
= container_of(mrq
, struct mmc_queue_req
,
1636 struct request
*req
= mmc_queue_req_to_req(mqrq
);
1637 struct request_queue
*q
= req
->q
;
1638 struct mmc_queue
*mq
= q
->queuedata
;
1641 * Block layer timeouts race with completions which means the normal
1642 * completion path cannot be used during recovery.
1644 if (mq
->in_recovery
)
1645 mmc_blk_cqe_complete_rq(mq
, req
);
1646 else if (likely(!blk_should_fake_timeout(req
->q
)))
1647 blk_mq_complete_request(req
);
1650 static int mmc_blk_cqe_start_req(struct mmc_host
*host
, struct mmc_request
*mrq
)
1652 mrq
->done
= mmc_blk_cqe_req_done
;
1653 mrq
->recovery_notifier
= mmc_cqe_recovery_notifier
;
1655 return mmc_cqe_start_req(host
, mrq
);
1658 static struct mmc_request
*mmc_blk_cqe_prep_dcmd(struct mmc_queue_req
*mqrq
,
1659 struct request
*req
)
1661 struct mmc_blk_request
*brq
= &mqrq
->brq
;
1663 memset(brq
, 0, sizeof(*brq
));
1665 brq
->mrq
.cmd
= &brq
->cmd
;
1666 brq
->mrq
.tag
= req
->tag
;
1671 static int mmc_blk_cqe_issue_flush(struct mmc_queue
*mq
, struct request
*req
)
1673 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
1674 struct mmc_request
*mrq
= mmc_blk_cqe_prep_dcmd(mqrq
, req
);
1676 mrq
->cmd
->opcode
= MMC_SWITCH
;
1677 mrq
->cmd
->arg
= (MMC_SWITCH_MODE_WRITE_BYTE
<< 24) |
1678 (EXT_CSD_FLUSH_CACHE
<< 16) |
1680 EXT_CSD_CMD_SET_NORMAL
;
1681 mrq
->cmd
->flags
= MMC_CMD_AC
| MMC_RSP_R1B
;
1683 return mmc_blk_cqe_start_req(mq
->card
->host
, mrq
);
1686 static int mmc_blk_hsq_issue_rw_rq(struct mmc_queue
*mq
, struct request
*req
)
1688 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
1689 struct mmc_host
*host
= mq
->card
->host
;
1692 mmc_blk_rw_rq_prep(mqrq
, mq
->card
, 0, mq
);
1693 mqrq
->brq
.mrq
.done
= mmc_blk_hsq_req_done
;
1694 mmc_pre_req(host
, &mqrq
->brq
.mrq
);
1696 err
= mmc_cqe_start_req(host
, &mqrq
->brq
.mrq
);
1698 mmc_post_req(host
, &mqrq
->brq
.mrq
, err
);
1703 static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue
*mq
, struct request
*req
)
1705 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
1706 struct mmc_host
*host
= mq
->card
->host
;
1708 if (host
->hsq_enabled
)
1709 return mmc_blk_hsq_issue_rw_rq(mq
, req
);
1711 mmc_blk_data_prep(mq
, mqrq
, 0, NULL
, NULL
);
1713 return mmc_blk_cqe_start_req(mq
->card
->host
, &mqrq
->brq
.mrq
);
1716 static void mmc_blk_rw_rq_prep(struct mmc_queue_req
*mqrq
,
1717 struct mmc_card
*card
,
1719 struct mmc_queue
*mq
)
1721 u32 readcmd
, writecmd
;
1722 struct mmc_blk_request
*brq
= &mqrq
->brq
;
1723 struct request
*req
= mmc_queue_req_to_req(mqrq
);
1724 struct mmc_blk_data
*md
= mq
->blkdata
;
1725 bool do_rel_wr
, do_data_tag
;
1727 mmc_blk_data_prep(mq
, mqrq
, recovery_mode
, &do_rel_wr
, &do_data_tag
);
1729 brq
->mrq
.cmd
= &brq
->cmd
;
1731 brq
->cmd
.arg
= blk_rq_pos(req
);
1732 if (!mmc_card_blockaddr(card
))
1734 brq
->cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
1736 if (brq
->data
.blocks
> 1 || do_rel_wr
) {
1737 /* SPI multiblock writes terminate using a special
1738 * token, not a STOP_TRANSMISSION request.
1740 if (!mmc_host_is_spi(card
->host
) ||
1741 rq_data_dir(req
) == READ
)
1742 brq
->mrq
.stop
= &brq
->stop
;
1743 readcmd
= MMC_READ_MULTIPLE_BLOCK
;
1744 writecmd
= MMC_WRITE_MULTIPLE_BLOCK
;
1746 brq
->mrq
.stop
= NULL
;
1747 readcmd
= MMC_READ_SINGLE_BLOCK
;
1748 writecmd
= MMC_WRITE_BLOCK
;
1750 brq
->cmd
.opcode
= rq_data_dir(req
) == READ
? readcmd
: writecmd
;
1753 * Pre-defined multi-block transfers are preferable to
1754 * open ended-ones (and necessary for reliable writes).
1755 * However, it is not sufficient to just send CMD23,
1756 * and avoid the final CMD12, as on an error condition
1757 * CMD12 (stop) needs to be sent anyway. This, coupled
1758 * with Auto-CMD23 enhancements provided by some
1759 * hosts, means that the complexity of dealing
1760 * with this is best left to the host. If CMD23 is
1761 * supported by card and host, we'll fill sbc in and let
1762 * the host deal with handling it correctly. This means
1763 * that for hosts that don't expose MMC_CAP_CMD23, no
1764 * change of behavior will be observed.
1766 * N.B: Some MMC cards experience perf degradation.
1767 * We'll avoid using CMD23-bounded multiblock writes for
1768 * these, while retaining features like reliable writes.
1770 if ((md
->flags
& MMC_BLK_CMD23
) && mmc_op_multi(brq
->cmd
.opcode
) &&
1771 (do_rel_wr
|| !(card
->quirks
& MMC_QUIRK_BLK_NO_CMD23
) ||
1773 brq
->sbc
.opcode
= MMC_SET_BLOCK_COUNT
;
1774 brq
->sbc
.arg
= brq
->data
.blocks
|
1775 (do_rel_wr
? (1 << 31) : 0) |
1776 (do_data_tag
? (1 << 29) : 0);
1777 brq
->sbc
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
1778 brq
->mrq
.sbc
= &brq
->sbc
;
1781 if (mmc_card_ult_capacity(card
)) {
1782 brq
->cmd
.ext_addr
= blk_rq_pos(req
) >> 32;
1783 brq
->cmd
.has_ext_addr
= true;
1787 #define MMC_MAX_RETRIES 5
1788 #define MMC_DATA_RETRIES 2
1789 #define MMC_NO_RETRIES (MMC_MAX_RETRIES + 1)
1791 static int mmc_blk_send_stop(struct mmc_card
*card
, unsigned int timeout
)
1793 struct mmc_command cmd
= {
1794 .opcode
= MMC_STOP_TRANSMISSION
,
1795 .flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
,
1796 /* Some hosts wait for busy anyway, so provide a busy timeout */
1797 .busy_timeout
= timeout
,
1800 return mmc_wait_for_cmd(card
->host
, &cmd
, 5);
1803 static int mmc_blk_fix_state(struct mmc_card
*card
, struct request
*req
)
1805 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
1806 struct mmc_blk_request
*brq
= &mqrq
->brq
;
1807 unsigned int timeout
= mmc_blk_data_timeout_ms(card
->host
, &brq
->data
);
1810 mmc_retune_hold_now(card
->host
);
1812 mmc_blk_send_stop(card
, timeout
);
1814 err
= mmc_poll_for_busy(card
, timeout
, false, MMC_BUSY_IO
);
1816 mmc_retune_release(card
->host
);
1821 #define MMC_READ_SINGLE_RETRIES 2
1823 /* Single (native) sector read during recovery */
1824 static void mmc_blk_read_single(struct mmc_queue
*mq
, struct request
*req
)
1826 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
1827 struct mmc_request
*mrq
= &mqrq
->brq
.mrq
;
1828 struct mmc_card
*card
= mq
->card
;
1829 struct mmc_host
*host
= card
->host
;
1830 blk_status_t error
= BLK_STS_OK
;
1831 size_t bytes_per_read
= queue_physical_block_size(mq
->queue
);
1838 while (retries
++ <= MMC_READ_SINGLE_RETRIES
) {
1839 mmc_blk_rw_rq_prep(mqrq
, card
, 1, mq
);
1841 mmc_wait_for_req(host
, mrq
);
1843 err
= mmc_send_status(card
, &status
);
1847 if (!mmc_host_is_spi(host
) &&
1848 !mmc_ready_for_data(status
)) {
1849 err
= mmc_blk_fix_state(card
, req
);
1854 if (!mrq
->cmd
->error
)
1858 if (mrq
->cmd
->error
||
1860 (!mmc_host_is_spi(host
) &&
1861 (mrq
->cmd
->resp
[0] & CMD_ERRORS
|| status
& CMD_ERRORS
)))
1862 error
= BLK_STS_IOERR
;
1866 } while (blk_update_request(req
, error
, bytes_per_read
));
1871 mrq
->data
->bytes_xfered
= 0;
1872 blk_update_request(req
, BLK_STS_IOERR
, bytes_per_read
);
1873 /* Let it try the remaining request again */
1874 if (mqrq
->retries
> MMC_MAX_RETRIES
- 1)
1875 mqrq
->retries
= MMC_MAX_RETRIES
- 1;
1878 static inline bool mmc_blk_oor_valid(struct mmc_blk_request
*brq
)
1880 return !!brq
->mrq
.sbc
;
1883 static inline u32
mmc_blk_stop_err_bits(struct mmc_blk_request
*brq
)
1885 return mmc_blk_oor_valid(brq
) ? CMD_ERRORS
: CMD_ERRORS_EXCL_OOR
;
1889 * Check for errors the host controller driver might not have seen such as
1890 * response mode errors or invalid card state.
1892 static bool mmc_blk_status_error(struct request
*req
, u32 status
)
1894 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
1895 struct mmc_blk_request
*brq
= &mqrq
->brq
;
1896 struct mmc_queue
*mq
= req
->q
->queuedata
;
1899 if (mmc_host_is_spi(mq
->card
->host
))
1902 stop_err_bits
= mmc_blk_stop_err_bits(brq
);
1904 return brq
->cmd
.resp
[0] & CMD_ERRORS
||
1905 brq
->stop
.resp
[0] & stop_err_bits
||
1906 status
& stop_err_bits
||
1907 (rq_data_dir(req
) == WRITE
&& !mmc_ready_for_data(status
));
1910 static inline bool mmc_blk_cmd_started(struct mmc_blk_request
*brq
)
1912 return !brq
->sbc
.error
&& !brq
->cmd
.error
&&
1913 !(brq
->cmd
.resp
[0] & CMD_ERRORS
);
1917 * Requests are completed by mmc_blk_mq_complete_rq() which sets simple
1919 * 1. A request that has transferred at least some data is considered
1920 * successful and will be requeued if there is remaining data to
1922 * 2. Otherwise the number of retries is incremented and the request
1923 * will be requeued if there are remaining retries.
1924 * 3. Otherwise the request will be errored out.
1925 * That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and
1926 * mqrq->retries. So there are only 4 possible actions here:
1927 * 1. do not accept the bytes_xfered value i.e. set it to zero
1928 * 2. change mqrq->retries to determine the number of retries
1929 * 3. try to reset the card
1930 * 4. read one sector at a time
1932 static void mmc_blk_mq_rw_recovery(struct mmc_queue
*mq
, struct request
*req
)
1934 int type
= rq_data_dir(req
) == READ
? MMC_BLK_READ
: MMC_BLK_WRITE
;
1935 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
1936 struct mmc_blk_request
*brq
= &mqrq
->brq
;
1937 struct mmc_blk_data
*md
= mq
->blkdata
;
1938 struct mmc_card
*card
= mq
->card
;
1944 * Some errors the host driver might not have seen. Set the number of
1945 * bytes transferred to zero in that case.
1947 err
= __mmc_send_status(card
, &status
, 0);
1948 if (err
|| mmc_blk_status_error(req
, status
))
1949 brq
->data
.bytes_xfered
= 0;
1951 mmc_retune_release(card
->host
);
1954 * Try again to get the status. This also provides an opportunity for
1958 err
= __mmc_send_status(card
, &status
, 0);
1961 * Nothing more to do after the number of bytes transferred has been
1962 * updated and there is no card.
1964 if (err
&& mmc_detect_card_removed(card
->host
))
1967 /* Try to get back to "tran" state */
1968 if (!mmc_host_is_spi(mq
->card
->host
) &&
1969 (err
|| !mmc_ready_for_data(status
)))
1970 err
= mmc_blk_fix_state(mq
->card
, req
);
1973 * Special case for SD cards where the card might record the number of
1976 if (!err
&& mmc_blk_cmd_started(brq
) && mmc_card_sd(card
) &&
1977 rq_data_dir(req
) == WRITE
) {
1978 if (mmc_sd_num_wr_blocks(card
, &blocks
))
1979 brq
->data
.bytes_xfered
= 0;
1981 brq
->data
.bytes_xfered
= blocks
<< 9;
1984 /* Reset if the card is in a bad state */
1985 if (!mmc_host_is_spi(mq
->card
->host
) &&
1986 err
&& mmc_blk_reset(md
, card
->host
, type
)) {
1987 pr_err("%s: recovery failed!\n", req
->q
->disk
->disk_name
);
1988 mqrq
->retries
= MMC_NO_RETRIES
;
1993 * If anything was done, just return and if there is anything remaining
1994 * on the request it will get requeued.
1996 if (brq
->data
.bytes_xfered
)
1999 /* Reset before last retry */
2000 if (mqrq
->retries
+ 1 == MMC_MAX_RETRIES
&&
2001 mmc_blk_reset(md
, card
->host
, type
))
2004 /* Command errors fail fast, so use all MMC_MAX_RETRIES */
2005 if (brq
->sbc
.error
|| brq
->cmd
.error
)
2008 /* Reduce the remaining retries for data errors */
2009 if (mqrq
->retries
< MMC_MAX_RETRIES
- MMC_DATA_RETRIES
) {
2010 mqrq
->retries
= MMC_MAX_RETRIES
- MMC_DATA_RETRIES
;
2014 if (rq_data_dir(req
) == READ
&& brq
->data
.blocks
>
2015 queue_physical_block_size(mq
->queue
) >> 9) {
2016 /* Read one (native) sector at a time */
2017 mmc_blk_read_single(mq
, req
);
2022 static inline bool mmc_blk_rq_error(struct mmc_blk_request
*brq
)
2024 mmc_blk_eval_resp_error(brq
);
2026 return brq
->sbc
.error
|| brq
->cmd
.error
|| brq
->stop
.error
||
2027 brq
->data
.error
|| brq
->cmd
.resp
[0] & CMD_ERRORS
;
2030 static int mmc_spi_err_check(struct mmc_card
*card
)
2036 * SPI does not have a TRAN state we have to wait on, instead the
2037 * card is ready again when it no longer holds the line LOW.
2038 * We still have to ensure two things here before we know the write
2040 * 1. The card has not disconnected during busy and we actually read our
2041 * own pull-up, thinking it was still connected, so ensure it
2043 * 2. Check for any error bits, in particular R1_SPI_IDLE to catch a
2044 * just reconnected card after being disconnected during busy.
2046 err
= __mmc_send_status(card
, &status
, 0);
2049 /* All R1 and R2 bits of SPI are errors in our case */
2055 static int mmc_blk_busy_cb(void *cb_data
, bool *busy
)
2057 struct mmc_blk_busy_data
*data
= cb_data
;
2061 err
= mmc_send_status(data
->card
, &status
);
2065 /* Accumulate response error bits. */
2066 data
->status
|= status
;
2068 *busy
= !mmc_ready_for_data(status
);
2072 static int mmc_blk_card_busy(struct mmc_card
*card
, struct request
*req
)
2074 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
2075 struct mmc_blk_busy_data cb_data
;
2078 if (rq_data_dir(req
) == READ
)
2081 if (mmc_host_is_spi(card
->host
)) {
2082 err
= mmc_spi_err_check(card
);
2084 mqrq
->brq
.data
.bytes_xfered
= 0;
2088 cb_data
.card
= card
;
2090 err
= __mmc_poll_for_busy(card
->host
, 0, MMC_BLK_TIMEOUT_MS
,
2091 &mmc_blk_busy_cb
, &cb_data
);
2094 * Do not assume data transferred correctly if there are any error bits
2097 if (cb_data
.status
& mmc_blk_stop_err_bits(&mqrq
->brq
)) {
2098 mqrq
->brq
.data
.bytes_xfered
= 0;
2099 err
= err
? err
: -EIO
;
2102 /* Copy the exception bit so it will be seen later on */
2103 if (mmc_card_mmc(card
) && cb_data
.status
& R1_EXCEPTION_EVENT
)
2104 mqrq
->brq
.cmd
.resp
[0] |= R1_EXCEPTION_EVENT
;
2109 static inline void mmc_blk_rw_reset_success(struct mmc_queue
*mq
,
2110 struct request
*req
)
2112 int type
= rq_data_dir(req
) == READ
? MMC_BLK_READ
: MMC_BLK_WRITE
;
2114 mmc_blk_reset_success(mq
->blkdata
, type
);
2117 static void mmc_blk_mq_complete_rq(struct mmc_queue
*mq
, struct request
*req
)
2119 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
2120 unsigned int nr_bytes
= mqrq
->brq
.data
.bytes_xfered
;
2123 if (blk_update_request(req
, BLK_STS_OK
, nr_bytes
))
2124 blk_mq_requeue_request(req
, true);
2126 __blk_mq_end_request(req
, BLK_STS_OK
);
2127 } else if (!blk_rq_bytes(req
)) {
2128 __blk_mq_end_request(req
, BLK_STS_IOERR
);
2129 } else if (mqrq
->retries
++ < MMC_MAX_RETRIES
) {
2130 blk_mq_requeue_request(req
, true);
2132 if (mmc_card_removed(mq
->card
))
2133 req
->rq_flags
|= RQF_QUIET
;
2134 blk_mq_end_request(req
, BLK_STS_IOERR
);
2138 static bool mmc_blk_urgent_bkops_needed(struct mmc_queue
*mq
,
2139 struct mmc_queue_req
*mqrq
)
2141 return mmc_card_mmc(mq
->card
) && !mmc_host_is_spi(mq
->card
->host
) &&
2142 (mqrq
->brq
.cmd
.resp
[0] & R1_EXCEPTION_EVENT
||
2143 mqrq
->brq
.stop
.resp
[0] & R1_EXCEPTION_EVENT
);
2146 static void mmc_blk_urgent_bkops(struct mmc_queue
*mq
,
2147 struct mmc_queue_req
*mqrq
)
2149 if (mmc_blk_urgent_bkops_needed(mq
, mqrq
))
2150 mmc_run_bkops(mq
->card
);
2153 static void mmc_blk_hsq_req_done(struct mmc_request
*mrq
)
2155 struct mmc_queue_req
*mqrq
=
2156 container_of(mrq
, struct mmc_queue_req
, brq
.mrq
);
2157 struct request
*req
= mmc_queue_req_to_req(mqrq
);
2158 struct request_queue
*q
= req
->q
;
2159 struct mmc_queue
*mq
= q
->queuedata
;
2160 struct mmc_host
*host
= mq
->card
->host
;
2161 unsigned long flags
;
2163 if (mmc_blk_rq_error(&mqrq
->brq
) ||
2164 mmc_blk_urgent_bkops_needed(mq
, mqrq
)) {
2165 spin_lock_irqsave(&mq
->lock
, flags
);
2166 mq
->recovery_needed
= true;
2167 mq
->recovery_req
= req
;
2168 spin_unlock_irqrestore(&mq
->lock
, flags
);
2170 host
->cqe_ops
->cqe_recovery_start(host
);
2172 schedule_work(&mq
->recovery_work
);
2176 mmc_blk_rw_reset_success(mq
, req
);
2179 * Block layer timeouts race with completions which means the normal
2180 * completion path cannot be used during recovery.
2182 if (mq
->in_recovery
)
2183 mmc_blk_cqe_complete_rq(mq
, req
);
2184 else if (likely(!blk_should_fake_timeout(req
->q
)))
2185 blk_mq_complete_request(req
);
2188 void mmc_blk_mq_complete(struct request
*req
)
2190 struct mmc_queue
*mq
= req
->q
->queuedata
;
2191 struct mmc_host
*host
= mq
->card
->host
;
2193 if (host
->cqe_enabled
)
2194 mmc_blk_cqe_complete_rq(mq
, req
);
2195 else if (likely(!blk_should_fake_timeout(req
->q
)))
2196 mmc_blk_mq_complete_rq(mq
, req
);
2199 static void mmc_blk_mq_poll_completion(struct mmc_queue
*mq
,
2200 struct request
*req
)
2202 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
2203 struct mmc_host
*host
= mq
->card
->host
;
2205 if (mmc_blk_rq_error(&mqrq
->brq
) ||
2206 mmc_blk_card_busy(mq
->card
, req
)) {
2207 mmc_blk_mq_rw_recovery(mq
, req
);
2209 mmc_blk_rw_reset_success(mq
, req
);
2210 mmc_retune_release(host
);
2213 mmc_blk_urgent_bkops(mq
, mqrq
);
2216 static void mmc_blk_mq_dec_in_flight(struct mmc_queue
*mq
, enum mmc_issue_type issue_type
)
2218 unsigned long flags
;
2221 spin_lock_irqsave(&mq
->lock
, flags
);
2223 mq
->in_flight
[issue_type
] -= 1;
2225 put_card
= (mmc_tot_in_flight(mq
) == 0);
2227 spin_unlock_irqrestore(&mq
->lock
, flags
);
2230 mmc_put_card(mq
->card
, &mq
->ctx
);
2233 static void mmc_blk_mq_post_req(struct mmc_queue
*mq
, struct request
*req
,
2236 enum mmc_issue_type issue_type
= mmc_issue_type(mq
, req
);
2237 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
2238 struct mmc_request
*mrq
= &mqrq
->brq
.mrq
;
2239 struct mmc_host
*host
= mq
->card
->host
;
2241 mmc_post_req(host
, mrq
, 0);
2244 * Block layer timeouts race with completions which means the normal
2245 * completion path cannot be used during recovery.
2247 if (mq
->in_recovery
) {
2248 mmc_blk_mq_complete_rq(mq
, req
);
2249 } else if (likely(!blk_should_fake_timeout(req
->q
))) {
2251 blk_mq_complete_request_direct(req
, mmc_blk_mq_complete
);
2253 blk_mq_complete_request(req
);
2256 mmc_blk_mq_dec_in_flight(mq
, issue_type
);
2259 void mmc_blk_mq_recovery(struct mmc_queue
*mq
)
2261 struct request
*req
= mq
->recovery_req
;
2262 struct mmc_host
*host
= mq
->card
->host
;
2263 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
2265 mq
->recovery_req
= NULL
;
2266 mq
->rw_wait
= false;
2268 if (mmc_blk_rq_error(&mqrq
->brq
)) {
2269 mmc_retune_hold_now(host
);
2270 mmc_blk_mq_rw_recovery(mq
, req
);
2273 mmc_blk_urgent_bkops(mq
, mqrq
);
2275 mmc_blk_mq_post_req(mq
, req
, true);
2278 static void mmc_blk_mq_complete_prev_req(struct mmc_queue
*mq
,
2279 struct request
**prev_req
)
2281 if (mmc_host_done_complete(mq
->card
->host
))
2284 mutex_lock(&mq
->complete_lock
);
2286 if (!mq
->complete_req
)
2289 mmc_blk_mq_poll_completion(mq
, mq
->complete_req
);
2292 *prev_req
= mq
->complete_req
;
2294 mmc_blk_mq_post_req(mq
, mq
->complete_req
, true);
2296 mq
->complete_req
= NULL
;
2299 mutex_unlock(&mq
->complete_lock
);
2302 void mmc_blk_mq_complete_work(struct work_struct
*work
)
2304 struct mmc_queue
*mq
= container_of(work
, struct mmc_queue
,
2307 mmc_blk_mq_complete_prev_req(mq
, NULL
);
2310 static void mmc_blk_mq_req_done(struct mmc_request
*mrq
)
2312 struct mmc_queue_req
*mqrq
= container_of(mrq
, struct mmc_queue_req
,
2314 struct request
*req
= mmc_queue_req_to_req(mqrq
);
2315 struct request_queue
*q
= req
->q
;
2316 struct mmc_queue
*mq
= q
->queuedata
;
2317 struct mmc_host
*host
= mq
->card
->host
;
2318 unsigned long flags
;
2320 if (!mmc_host_done_complete(host
)) {
2324 * We cannot complete the request in this context, so record
2325 * that there is a request to complete, and that a following
2326 * request does not need to wait (although it does need to
2327 * complete complete_req first).
2329 spin_lock_irqsave(&mq
->lock
, flags
);
2330 mq
->complete_req
= req
;
2331 mq
->rw_wait
= false;
2332 waiting
= mq
->waiting
;
2333 spin_unlock_irqrestore(&mq
->lock
, flags
);
2336 * If 'waiting' then the waiting task will complete this
2337 * request, otherwise queue a work to do it. Note that
2338 * complete_work may still race with the dispatch of a following
2344 queue_work(mq
->card
->complete_wq
, &mq
->complete_work
);
2349 /* Take the recovery path for errors or urgent background operations */
2350 if (mmc_blk_rq_error(&mqrq
->brq
) ||
2351 mmc_blk_urgent_bkops_needed(mq
, mqrq
)) {
2352 spin_lock_irqsave(&mq
->lock
, flags
);
2353 mq
->recovery_needed
= true;
2354 mq
->recovery_req
= req
;
2355 spin_unlock_irqrestore(&mq
->lock
, flags
);
2357 schedule_work(&mq
->recovery_work
);
2361 mmc_blk_rw_reset_success(mq
, req
);
2363 mq
->rw_wait
= false;
2366 /* context unknown */
2367 mmc_blk_mq_post_req(mq
, req
, false);
2370 static bool mmc_blk_rw_wait_cond(struct mmc_queue
*mq
, int *err
)
2372 unsigned long flags
;
2376 * Wait while there is another request in progress, but not if recovery
2377 * is needed. Also indicate whether there is a request waiting to start.
2379 spin_lock_irqsave(&mq
->lock
, flags
);
2380 if (mq
->recovery_needed
) {
2384 done
= !mq
->rw_wait
;
2386 mq
->waiting
= !done
;
2387 spin_unlock_irqrestore(&mq
->lock
, flags
);
2392 static int mmc_blk_rw_wait(struct mmc_queue
*mq
, struct request
**prev_req
)
2396 wait_event(mq
->wait
, mmc_blk_rw_wait_cond(mq
, &err
));
2398 /* Always complete the previous request if there is one */
2399 mmc_blk_mq_complete_prev_req(mq
, prev_req
);
2404 static int mmc_blk_mq_issue_rw_rq(struct mmc_queue
*mq
,
2405 struct request
*req
)
2407 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
2408 struct mmc_host
*host
= mq
->card
->host
;
2409 struct request
*prev_req
= NULL
;
2412 mmc_blk_rw_rq_prep(mqrq
, mq
->card
, 0, mq
);
2414 mqrq
->brq
.mrq
.done
= mmc_blk_mq_req_done
;
2416 mmc_pre_req(host
, &mqrq
->brq
.mrq
);
2418 err
= mmc_blk_rw_wait(mq
, &prev_req
);
2424 err
= mmc_start_request(host
, &mqrq
->brq
.mrq
);
2427 mmc_blk_mq_post_req(mq
, prev_req
, true);
2430 mq
->rw_wait
= false;
2432 /* Release re-tuning here where there is no synchronization required */
2433 if (err
|| mmc_host_done_complete(host
))
2434 mmc_retune_release(host
);
2438 mmc_post_req(host
, &mqrq
->brq
.mrq
, err
);
2443 static int mmc_blk_wait_for_idle(struct mmc_queue
*mq
, struct mmc_host
*host
)
2445 if (host
->cqe_enabled
)
2446 return host
->cqe_ops
->cqe_wait_for_idle(host
);
2448 return mmc_blk_rw_wait(mq
, NULL
);
2451 enum mmc_issued
mmc_blk_mq_issue_rq(struct mmc_queue
*mq
, struct request
*req
)
2453 struct mmc_blk_data
*md
= mq
->blkdata
;
2454 struct mmc_card
*card
= md
->queue
.card
;
2455 struct mmc_host
*host
= card
->host
;
2458 ret
= mmc_blk_part_switch(card
, md
->part_type
);
2460 return MMC_REQ_FAILED_TO_START
;
2462 switch (mmc_issue_type(mq
, req
)) {
2463 case MMC_ISSUE_SYNC
:
2464 ret
= mmc_blk_wait_for_idle(mq
, host
);
2466 return MMC_REQ_BUSY
;
2467 switch (req_op(req
)) {
2469 case REQ_OP_DRV_OUT
:
2470 mmc_blk_issue_drv_op(mq
, req
);
2472 case REQ_OP_DISCARD
:
2473 mmc_blk_issue_discard_rq(mq
, req
);
2475 case REQ_OP_SECURE_ERASE
:
2476 mmc_blk_issue_secdiscard_rq(mq
, req
);
2478 case REQ_OP_WRITE_ZEROES
:
2479 mmc_blk_issue_trim_rq(mq
, req
);
2482 mmc_blk_issue_flush(mq
, req
);
2486 return MMC_REQ_FAILED_TO_START
;
2488 return MMC_REQ_FINISHED
;
2489 case MMC_ISSUE_DCMD
:
2490 case MMC_ISSUE_ASYNC
:
2491 switch (req_op(req
)) {
2493 if (!mmc_cache_enabled(host
)) {
2494 blk_mq_end_request(req
, BLK_STS_OK
);
2495 return MMC_REQ_FINISHED
;
2497 ret
= mmc_blk_cqe_issue_flush(mq
, req
);
2500 card
->written_flag
= true;
2503 if (host
->cqe_enabled
)
2504 ret
= mmc_blk_cqe_issue_rw_rq(mq
, req
);
2506 ret
= mmc_blk_mq_issue_rw_rq(mq
, req
);
2513 return MMC_REQ_STARTED
;
2514 return ret
== -EBUSY
? MMC_REQ_BUSY
: MMC_REQ_FAILED_TO_START
;
2517 return MMC_REQ_FAILED_TO_START
;
2521 static inline int mmc_blk_readonly(struct mmc_card
*card
)
2523 return mmc_card_readonly(card
) ||
2524 !(card
->csd
.cmdclass
& CCC_BLOCK_WRITE
);
2528 * Search for a declared partitions node for the disk in mmc-card related node.
2530 * This is to permit support for partition table defined in DT in special case
2531 * where a partition table is not written in the disk and is expected to be
2532 * passed from the running system.
2534 * For the user disk, "partitions" node is searched.
2535 * For the special HW disk, "partitions-" node with the appended name is used
2536 * following this conversion table (to adhere to JEDEC naming)
2537 * - boot0 -> partitions-boot1
2538 * - boot1 -> partitions-boot2
2539 * - gp0 -> partitions-gp1
2540 * - gp1 -> partitions-gp2
2541 * - gp2 -> partitions-gp3
2542 * - gp3 -> partitions-gp4
2544 static struct fwnode_handle
*mmc_blk_get_partitions_node(struct device
*mmc_dev
,
2545 const char *subname
)
2547 const char *node_name
= "partitions";
2550 mmc_dev
= mmc_dev
->parent
;
2553 * Check if we are allocating a BOOT disk boot0/1 disk.
2554 * In DT we use the JEDEC naming boot1/2.
2556 if (!strcmp(subname
, "boot0"))
2557 node_name
= "partitions-boot1";
2558 if (!strcmp(subname
, "boot1"))
2559 node_name
= "partitions-boot2";
2561 * Check if we are allocating a GP disk gp0/1/2/3 disk.
2562 * In DT we use the JEDEC naming gp1/2/3/4.
2564 if (!strcmp(subname
, "gp0"))
2565 node_name
= "partitions-gp1";
2566 if (!strcmp(subname
, "gp1"))
2567 node_name
= "partitions-gp2";
2568 if (!strcmp(subname
, "gp2"))
2569 node_name
= "partitions-gp3";
2570 if (!strcmp(subname
, "gp3"))
2571 node_name
= "partitions-gp4";
2574 return device_get_named_child_node(mmc_dev
, node_name
);
2577 static struct mmc_blk_data
*mmc_blk_alloc_req(struct mmc_card
*card
,
2578 struct device
*parent
,
2581 const char *subname
,
2583 unsigned int part_type
)
2585 struct fwnode_handle
*disk_fwnode
;
2586 struct mmc_blk_data
*md
;
2589 unsigned int features
= 0;
2591 devidx
= ida_alloc_max(&mmc_blk_ida
, max_devices
- 1, GFP_KERNEL
);
2594 * We get -ENOSPC because there are no more any available
2595 * devidx. The reason may be that, either userspace haven't yet
2596 * unmounted the partitions, which postpones mmc_blk_release()
2597 * from being called, or the device has more partitions than
2600 if (devidx
== -ENOSPC
)
2601 dev_err(mmc_dev(card
->host
),
2602 "no more device IDs available\n");
2604 return ERR_PTR(devidx
);
2607 md
= kzalloc(sizeof(*md
), GFP_KERNEL
);
2613 md
->area_type
= area_type
;
2616 * Set the read-only status based on the supported commands
2617 * and the write protect switch.
2619 md
->read_only
= mmc_blk_readonly(card
);
2621 if (mmc_host_cmd23(card
->host
)) {
2622 if ((mmc_card_mmc(card
) &&
2623 card
->csd
.mmca_vsn
>= CSD_SPEC_VER_3
) ||
2624 (mmc_card_sd(card
) && !mmc_card_ult_capacity(card
) &&
2625 card
->scr
.cmds
& SD_SCR_CMD23_SUPPORT
))
2626 md
->flags
|= MMC_BLK_CMD23
;
2629 if (md
->flags
& MMC_BLK_CMD23
&&
2630 ((card
->ext_csd
.rel_param
& EXT_CSD_WR_REL_PARAM_EN
) ||
2631 card
->ext_csd
.rel_sectors
)) {
2632 md
->flags
|= MMC_BLK_REL_WR
;
2633 features
|= (BLK_FEAT_WRITE_CACHE
| BLK_FEAT_FUA
);
2634 } else if (mmc_cache_enabled(card
->host
)) {
2635 features
|= BLK_FEAT_WRITE_CACHE
;
2638 md
->disk
= mmc_init_queue(&md
->queue
, card
, features
);
2639 if (IS_ERR(md
->disk
)) {
2640 ret
= PTR_ERR(md
->disk
);
2644 INIT_LIST_HEAD(&md
->part
);
2645 INIT_LIST_HEAD(&md
->rpmbs
);
2646 kref_init(&md
->kref
);
2648 md
->queue
.blkdata
= md
;
2649 md
->part_type
= part_type
;
2651 md
->disk
->major
= MMC_BLOCK_MAJOR
;
2652 md
->disk
->minors
= perdev_minors
;
2653 md
->disk
->first_minor
= devidx
* perdev_minors
;
2654 md
->disk
->fops
= &mmc_bdops
;
2655 md
->disk
->private_data
= md
;
2656 md
->parent
= parent
;
2657 set_disk_ro(md
->disk
, md
->read_only
|| default_ro
);
2658 if (area_type
& (MMC_BLK_DATA_AREA_RPMB
| MMC_BLK_DATA_AREA_BOOT
))
2659 md
->disk
->flags
|= GENHD_FL_NO_PART
;
2662 * As discussed on lkml, GENHD_FL_REMOVABLE should:
2664 * - be set for removable media with permanent block devices
2665 * - be unset for removable block devices with permanent media
2667 * Since MMC block devices clearly fall under the second
2668 * case, we do not set GENHD_FL_REMOVABLE. Userspace
2669 * should use the block device creation/destruction hotplug
2670 * messages to tell when the card is present.
2673 snprintf(md
->disk
->disk_name
, sizeof(md
->disk
->disk_name
),
2674 "mmcblk%u%s", card
->host
->index
, subname
? subname
: "");
2676 set_capacity(md
->disk
, size
);
2678 string_get_size((u64
)size
, 512, STRING_UNITS_2
,
2679 cap_str
, sizeof(cap_str
));
2680 pr_info("%s: %s %s %s%s\n",
2681 md
->disk
->disk_name
, mmc_card_id(card
), mmc_card_name(card
),
2682 cap_str
, md
->read_only
? " (ro)" : "");
2684 /* used in ->open, must be set before add_disk: */
2685 if (area_type
== MMC_BLK_DATA_AREA_MAIN
)
2686 dev_set_drvdata(&card
->dev
, md
);
2687 disk_fwnode
= mmc_blk_get_partitions_node(parent
, subname
);
2688 ret
= add_disk_fwnode(md
->parent
, md
->disk
, mmc_disk_attr_groups
,
2696 blk_mq_free_tag_set(&md
->queue
.tag_set
);
2700 ida_free(&mmc_blk_ida
, devidx
);
2701 return ERR_PTR(ret
);
2704 static struct mmc_blk_data
*mmc_blk_alloc(struct mmc_card
*card
)
2708 if (!mmc_card_sd(card
) && mmc_card_blockaddr(card
)) {
2710 * The EXT_CSD sector count is in number or 512 byte
2713 size
= card
->ext_csd
.sectors
;
2716 * The CSD capacity field is in units of read_blkbits.
2717 * set_capacity takes units of 512 bytes.
2719 size
= (typeof(sector_t
))card
->csd
.capacity
2720 << (card
->csd
.read_blkbits
- 9);
2723 return mmc_blk_alloc_req(card
, &card
->dev
, size
, false, NULL
,
2724 MMC_BLK_DATA_AREA_MAIN
, 0);
2727 static int mmc_blk_alloc_part(struct mmc_card
*card
,
2728 struct mmc_blk_data
*md
,
2729 unsigned int part_type
,
2732 const char *subname
,
2735 struct mmc_blk_data
*part_md
;
2737 part_md
= mmc_blk_alloc_req(card
, disk_to_dev(md
->disk
), size
, default_ro
,
2738 subname
, area_type
, part_type
);
2739 if (IS_ERR(part_md
))
2740 return PTR_ERR(part_md
);
2741 list_add(&part_md
->part
, &md
->part
);
2747 * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev
2748 * @filp: the character device file
2749 * @cmd: the ioctl() command
2750 * @arg: the argument from userspace
2752 * This will essentially just redirect the ioctl()s coming in over to
2753 * the main block device spawning the RPMB character device.
2755 static long mmc_rpmb_ioctl(struct file
*filp
, unsigned int cmd
,
2758 struct mmc_rpmb_data
*rpmb
= filp
->private_data
;
2763 ret
= mmc_blk_ioctl_cmd(rpmb
->md
,
2764 (struct mmc_ioc_cmd __user
*)arg
,
2767 case MMC_IOC_MULTI_CMD
:
2768 ret
= mmc_blk_ioctl_multi_cmd(rpmb
->md
,
2769 (struct mmc_ioc_multi_cmd __user
*)arg
,
2780 #ifdef CONFIG_COMPAT
2781 static long mmc_rpmb_ioctl_compat(struct file
*filp
, unsigned int cmd
,
2784 return mmc_rpmb_ioctl(filp
, cmd
, (unsigned long)compat_ptr(arg
));
2788 static int mmc_rpmb_chrdev_open(struct inode
*inode
, struct file
*filp
)
2790 struct mmc_rpmb_data
*rpmb
= container_of(inode
->i_cdev
,
2791 struct mmc_rpmb_data
, chrdev
);
2793 get_device(&rpmb
->dev
);
2794 filp
->private_data
= rpmb
;
2796 return nonseekable_open(inode
, filp
);
2799 static int mmc_rpmb_chrdev_release(struct inode
*inode
, struct file
*filp
)
2801 struct mmc_rpmb_data
*rpmb
= container_of(inode
->i_cdev
,
2802 struct mmc_rpmb_data
, chrdev
);
2804 put_device(&rpmb
->dev
);
2809 static const struct file_operations mmc_rpmb_fileops
= {
2810 .release
= mmc_rpmb_chrdev_release
,
2811 .open
= mmc_rpmb_chrdev_open
,
2812 .owner
= THIS_MODULE
,
2813 .unlocked_ioctl
= mmc_rpmb_ioctl
,
2814 #ifdef CONFIG_COMPAT
2815 .compat_ioctl
= mmc_rpmb_ioctl_compat
,
2819 static void mmc_blk_rpmb_device_release(struct device
*dev
)
2821 struct mmc_rpmb_data
*rpmb
= dev_get_drvdata(dev
);
2823 rpmb_dev_unregister(rpmb
->rdev
);
2824 mmc_blk_put(rpmb
->md
);
2825 ida_free(&mmc_rpmb_ida
, rpmb
->id
);
2829 static void free_idata(struct mmc_blk_ioc_data
**idata
, unsigned int cmd_count
)
2833 for (n
= 0; n
< cmd_count
; n
++)
2838 static struct mmc_blk_ioc_data
**alloc_idata(struct mmc_rpmb_data
*rpmb
,
2839 unsigned int cmd_count
)
2841 struct mmc_blk_ioc_data
**idata
;
2844 idata
= kcalloc(cmd_count
, sizeof(*idata
), GFP_KERNEL
);
2848 for (n
= 0; n
< cmd_count
; n
++) {
2849 idata
[n
] = kcalloc(1, sizeof(**idata
), GFP_KERNEL
);
2851 free_idata(idata
, n
);
2854 idata
[n
]->rpmb
= rpmb
;
2860 static void set_idata(struct mmc_blk_ioc_data
*idata
, u32 opcode
,
2861 int write_flag
, u8
*buf
, unsigned int buf_bytes
)
2864 * The size of an RPMB frame must match what's expected by the
2867 BUILD_BUG_ON(sizeof(struct rpmb_frame
) != 512);
2869 idata
->ic
.opcode
= opcode
;
2870 idata
->ic
.flags
= MMC_RSP_R1
| MMC_CMD_ADTC
;
2871 idata
->ic
.write_flag
= write_flag
;
2872 idata
->ic
.blksz
= sizeof(struct rpmb_frame
);
2873 idata
->ic
.blocks
= buf_bytes
/ idata
->ic
.blksz
;
2875 idata
->buf_bytes
= buf_bytes
;
2878 static int mmc_route_rpmb_frames(struct device
*dev
, u8
*req
,
2879 unsigned int req_len
, u8
*resp
,
2880 unsigned int resp_len
)
2882 struct rpmb_frame
*frm
= (struct rpmb_frame
*)req
;
2883 struct mmc_rpmb_data
*rpmb
= dev_get_drvdata(dev
);
2884 struct mmc_blk_data
*md
= rpmb
->md
;
2885 struct mmc_blk_ioc_data
**idata
;
2886 struct mmc_queue_req
*mq_rq
;
2887 unsigned int cmd_count
;
2893 if (IS_ERR(md
->queue
.card
))
2894 return PTR_ERR(md
->queue
.card
);
2896 if (req_len
< sizeof(*frm
))
2899 req_type
= be16_to_cpu(frm
->req_resp
);
2901 case RPMB_PROGRAM_KEY
:
2902 if (req_len
!= sizeof(struct rpmb_frame
) ||
2903 resp_len
!= sizeof(struct rpmb_frame
))
2907 case RPMB_GET_WRITE_COUNTER
:
2908 if (req_len
!= sizeof(struct rpmb_frame
) ||
2909 resp_len
!= sizeof(struct rpmb_frame
))
2913 case RPMB_WRITE_DATA
:
2914 if (req_len
% sizeof(struct rpmb_frame
) ||
2915 resp_len
!= sizeof(struct rpmb_frame
))
2919 case RPMB_READ_DATA
:
2920 if (req_len
!= sizeof(struct rpmb_frame
) ||
2921 resp_len
% sizeof(struct rpmb_frame
))
2934 idata
= alloc_idata(rpmb
, cmd_count
);
2939 struct rpmb_frame
*frm
= (struct rpmb_frame
*)resp
;
2941 /* Send write request frame(s) */
2942 set_idata(idata
[0], MMC_WRITE_MULTIPLE_BLOCK
,
2943 1 | MMC_CMD23_ARG_REL_WR
, req
, req_len
);
2945 /* Send result request frame */
2946 memset(frm
, 0, sizeof(*frm
));
2947 frm
->req_resp
= cpu_to_be16(RPMB_RESULT_READ
);
2948 set_idata(idata
[1], MMC_WRITE_MULTIPLE_BLOCK
, 1, resp
,
2951 /* Read response frame */
2952 set_idata(idata
[2], MMC_READ_MULTIPLE_BLOCK
, 0, resp
, resp_len
);
2954 /* Send write request frame(s) */
2955 set_idata(idata
[0], MMC_WRITE_MULTIPLE_BLOCK
, 1, req
, req_len
);
2957 /* Read response frame */
2958 set_idata(idata
[1], MMC_READ_MULTIPLE_BLOCK
, 0, resp
, resp_len
);
2961 rq
= blk_mq_alloc_request(md
->queue
.queue
, REQ_OP_DRV_OUT
, 0);
2967 mq_rq
= req_to_mmc_queue_req(rq
);
2968 mq_rq
->drv_op
= MMC_DRV_OP_IOCTL_RPMB
;
2969 mq_rq
->drv_op_result
= -EIO
;
2970 mq_rq
->drv_op_data
= idata
;
2971 mq_rq
->ioc_count
= cmd_count
;
2972 blk_execute_rq(rq
, false);
2973 ret
= req_to_mmc_queue_req(rq
)->drv_op_result
;
2975 blk_mq_free_request(rq
);
2978 free_idata(idata
, cmd_count
);
2982 static int mmc_blk_alloc_rpmb_part(struct mmc_card
*card
,
2983 struct mmc_blk_data
*md
,
2984 unsigned int part_index
,
2986 const char *subname
)
2989 char rpmb_name
[DISK_NAME_LEN
];
2991 struct mmc_rpmb_data
*rpmb
;
2993 /* This creates the minor number for the RPMB char device */
2994 devidx
= ida_alloc_max(&mmc_rpmb_ida
, max_devices
- 1, GFP_KERNEL
);
2998 rpmb
= kzalloc(sizeof(*rpmb
), GFP_KERNEL
);
3000 ida_free(&mmc_rpmb_ida
, devidx
);
3004 snprintf(rpmb_name
, sizeof(rpmb_name
),
3005 "mmcblk%u%s", card
->host
->index
, subname
? subname
: "");
3008 rpmb
->part_index
= part_index
;
3009 rpmb
->dev
.init_name
= rpmb_name
;
3010 rpmb
->dev
.bus
= &mmc_rpmb_bus_type
;
3011 rpmb
->dev
.devt
= MKDEV(MAJOR(mmc_rpmb_devt
), rpmb
->id
);
3012 rpmb
->dev
.parent
= &card
->dev
;
3013 rpmb
->dev
.release
= mmc_blk_rpmb_device_release
;
3014 device_initialize(&rpmb
->dev
);
3015 dev_set_drvdata(&rpmb
->dev
, rpmb
);
3016 mmc_blk_get(md
->disk
);
3019 cdev_init(&rpmb
->chrdev
, &mmc_rpmb_fileops
);
3020 rpmb
->chrdev
.owner
= THIS_MODULE
;
3021 ret
= cdev_device_add(&rpmb
->chrdev
, &rpmb
->dev
);
3023 pr_err("%s: could not add character device\n", rpmb_name
);
3024 goto out_put_device
;
3027 list_add(&rpmb
->node
, &md
->rpmbs
);
3029 string_get_size((u64
)size
, 512, STRING_UNITS_2
,
3030 cap_str
, sizeof(cap_str
));
3032 pr_info("%s: %s %s %s, chardev (%d:%d)\n",
3033 rpmb_name
, mmc_card_id(card
), mmc_card_name(card
), cap_str
,
3034 MAJOR(mmc_rpmb_devt
), rpmb
->id
);
3039 put_device(&rpmb
->dev
);
3043 static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data
*rpmb
)
3046 cdev_device_del(&rpmb
->chrdev
, &rpmb
->dev
);
3047 put_device(&rpmb
->dev
);
3050 /* MMC Physical partitions consist of two boot partitions and
3051 * up to four general purpose partitions.
3052 * For each partition enabled in EXT_CSD a block device will be allocatedi
3053 * to provide access to the partition.
3056 static int mmc_blk_alloc_parts(struct mmc_card
*card
, struct mmc_blk_data
*md
)
3060 if (!mmc_card_mmc(card
))
3063 for (idx
= 0; idx
< card
->nr_parts
; idx
++) {
3064 if (card
->part
[idx
].area_type
& MMC_BLK_DATA_AREA_RPMB
) {
3066 * RPMB partitions does not provide block access, they
3067 * are only accessed using ioctl():s. Thus create
3068 * special RPMB block devices that do not have a
3069 * backing block queue for these.
3071 ret
= mmc_blk_alloc_rpmb_part(card
, md
,
3072 card
->part
[idx
].part_cfg
,
3073 card
->part
[idx
].size
>> 9,
3074 card
->part
[idx
].name
);
3077 } else if (card
->part
[idx
].size
) {
3078 ret
= mmc_blk_alloc_part(card
, md
,
3079 card
->part
[idx
].part_cfg
,
3080 card
->part
[idx
].size
>> 9,
3081 card
->part
[idx
].force_ro
,
3082 card
->part
[idx
].name
,
3083 card
->part
[idx
].area_type
);
3092 static void mmc_blk_remove_req(struct mmc_blk_data
*md
)
3095 * Flush remaining requests and free queues. It is freeing the queue
3096 * that stops new requests from being accepted.
3098 del_gendisk(md
->disk
);
3099 mmc_cleanup_queue(&md
->queue
);
3103 static void mmc_blk_remove_parts(struct mmc_card
*card
,
3104 struct mmc_blk_data
*md
)
3106 struct list_head
*pos
, *q
;
3107 struct mmc_blk_data
*part_md
;
3108 struct mmc_rpmb_data
*rpmb
;
3110 /* Remove RPMB partitions */
3111 list_for_each_safe(pos
, q
, &md
->rpmbs
) {
3112 rpmb
= list_entry(pos
, struct mmc_rpmb_data
, node
);
3114 mmc_blk_remove_rpmb_part(rpmb
);
3116 /* Remove block partitions */
3117 list_for_each_safe(pos
, q
, &md
->part
) {
3118 part_md
= list_entry(pos
, struct mmc_blk_data
, part
);
3120 mmc_blk_remove_req(part_md
);
3124 #ifdef CONFIG_DEBUG_FS
3126 static int mmc_dbg_card_status_get(void *data
, u64
*val
)
3128 struct mmc_card
*card
= data
;
3129 struct mmc_blk_data
*md
= dev_get_drvdata(&card
->dev
);
3130 struct mmc_queue
*mq
= &md
->queue
;
3131 struct request
*req
;
3134 /* Ask the block layer about the card status */
3135 req
= blk_mq_alloc_request(mq
->queue
, REQ_OP_DRV_IN
, 0);
3137 return PTR_ERR(req
);
3138 req_to_mmc_queue_req(req
)->drv_op
= MMC_DRV_OP_GET_CARD_STATUS
;
3139 req_to_mmc_queue_req(req
)->drv_op_result
= -EIO
;
3140 blk_execute_rq(req
, false);
3141 ret
= req_to_mmc_queue_req(req
)->drv_op_result
;
3146 blk_mq_free_request(req
);
3150 DEFINE_DEBUGFS_ATTRIBUTE(mmc_dbg_card_status_fops
, mmc_dbg_card_status_get
,
3153 /* That is two digits * 512 + 1 for newline */
3154 #define EXT_CSD_STR_LEN 1025
3156 static int mmc_ext_csd_open(struct inode
*inode
, struct file
*filp
)
3158 struct mmc_card
*card
= inode
->i_private
;
3159 struct mmc_blk_data
*md
= dev_get_drvdata(&card
->dev
);
3160 struct mmc_queue
*mq
= &md
->queue
;
3161 struct request
*req
;
3167 buf
= kmalloc(EXT_CSD_STR_LEN
+ 1, GFP_KERNEL
);
3171 /* Ask the block layer for the EXT CSD */
3172 req
= blk_mq_alloc_request(mq
->queue
, REQ_OP_DRV_IN
, 0);
3177 req_to_mmc_queue_req(req
)->drv_op
= MMC_DRV_OP_GET_EXT_CSD
;
3178 req_to_mmc_queue_req(req
)->drv_op_result
= -EIO
;
3179 req_to_mmc_queue_req(req
)->drv_op_data
= &ext_csd
;
3180 blk_execute_rq(req
, false);
3181 err
= req_to_mmc_queue_req(req
)->drv_op_result
;
3182 blk_mq_free_request(req
);
3184 pr_err("FAILED %d\n", err
);
3188 for (i
= 0; i
< 512; i
++)
3189 n
+= sprintf(buf
+ n
, "%02x", ext_csd
[i
]);
3190 n
+= sprintf(buf
+ n
, "\n");
3192 if (n
!= EXT_CSD_STR_LEN
) {
3198 filp
->private_data
= buf
;
3207 static ssize_t
mmc_ext_csd_read(struct file
*filp
, char __user
*ubuf
,
3208 size_t cnt
, loff_t
*ppos
)
3210 char *buf
= filp
->private_data
;
3212 return simple_read_from_buffer(ubuf
, cnt
, ppos
,
3213 buf
, EXT_CSD_STR_LEN
);
3216 static int mmc_ext_csd_release(struct inode
*inode
, struct file
*file
)
3218 kfree(file
->private_data
);
3222 static const struct file_operations mmc_dbg_ext_csd_fops
= {
3223 .open
= mmc_ext_csd_open
,
3224 .read
= mmc_ext_csd_read
,
3225 .release
= mmc_ext_csd_release
,
3226 .llseek
= default_llseek
,
3229 static void mmc_blk_add_debugfs(struct mmc_card
*card
, struct mmc_blk_data
*md
)
3231 struct dentry
*root
;
3233 if (!card
->debugfs_root
)
3236 root
= card
->debugfs_root
;
3238 if (mmc_card_mmc(card
) || mmc_card_sd(card
)) {
3240 debugfs_create_file_unsafe("status", 0400, root
,
3242 &mmc_dbg_card_status_fops
);
3245 if (mmc_card_mmc(card
)) {
3246 md
->ext_csd_dentry
=
3247 debugfs_create_file("ext_csd", S_IRUSR
, root
, card
,
3248 &mmc_dbg_ext_csd_fops
);
3252 static void mmc_blk_remove_debugfs(struct mmc_card
*card
,
3253 struct mmc_blk_data
*md
)
3255 if (!card
->debugfs_root
)
3258 debugfs_remove(md
->status_dentry
);
3259 md
->status_dentry
= NULL
;
3261 debugfs_remove(md
->ext_csd_dentry
);
3262 md
->ext_csd_dentry
= NULL
;
3267 static void mmc_blk_add_debugfs(struct mmc_card
*card
, struct mmc_blk_data
*md
)
3271 static void mmc_blk_remove_debugfs(struct mmc_card
*card
,
3272 struct mmc_blk_data
*md
)
3276 #endif /* CONFIG_DEBUG_FS */
3278 static void mmc_blk_rpmb_add(struct mmc_card
*card
)
3280 struct mmc_blk_data
*md
= dev_get_drvdata(&card
->dev
);
3281 struct mmc_rpmb_data
*rpmb
;
3282 struct rpmb_dev
*rdev
;
3285 struct rpmb_descr descr
= {
3286 .type
= RPMB_TYPE_EMMC
,
3287 .route_frames
= mmc_route_rpmb_frames
,
3288 .reliable_wr_count
= card
->ext_csd
.enhanced_rpmb_supported
?
3290 .capacity
= card
->ext_csd
.raw_rpmb_size_mult
,
3291 .dev_id
= (void *)cid
,
3292 .dev_id_len
= sizeof(cid
),
3296 * Provice CID as an octet array. The CID needs to be interpreted
3297 * when used as input to derive the RPMB key since some fields
3298 * will change due to firmware updates.
3300 for (n
= 0; n
< 4; n
++)
3301 cid
[n
] = be32_to_cpu((__force __be32
)card
->raw_cid
[n
]);
3303 list_for_each_entry(rpmb
, &md
->rpmbs
, node
) {
3304 rdev
= rpmb_dev_register(&rpmb
->dev
, &descr
);
3306 pr_warn("%s: could not register RPMB device\n",
3307 dev_name(&rpmb
->dev
));
3314 static int mmc_blk_probe(struct mmc_card
*card
)
3316 struct mmc_blk_data
*md
;
3320 * Check that the card supports the command class(es) we need.
3322 if (!(card
->csd
.cmdclass
& CCC_BLOCK_READ
))
3325 mmc_fixup_device(card
, mmc_blk_fixups
);
3327 card
->complete_wq
= alloc_workqueue("mmc_complete",
3328 WQ_MEM_RECLAIM
| WQ_HIGHPRI
, 0);
3329 if (!card
->complete_wq
) {
3330 pr_err("Failed to create mmc completion workqueue");
3334 md
= mmc_blk_alloc(card
);
3340 ret
= mmc_blk_alloc_parts(card
, md
);
3344 /* Add two debugfs entries */
3345 mmc_blk_add_debugfs(card
, md
);
3347 pm_runtime_set_autosuspend_delay(&card
->dev
, 3000);
3348 pm_runtime_use_autosuspend(&card
->dev
);
3351 * Don't enable runtime PM for SD-combo cards here. Leave that
3352 * decision to be taken during the SDIO init sequence instead.
3354 if (!mmc_card_sd_combo(card
)) {
3355 pm_runtime_set_active(&card
->dev
);
3356 pm_runtime_enable(&card
->dev
);
3359 mmc_blk_rpmb_add(card
);
3364 mmc_blk_remove_parts(card
, md
);
3365 mmc_blk_remove_req(md
);
3367 destroy_workqueue(card
->complete_wq
);
3371 static void mmc_blk_remove(struct mmc_card
*card
)
3373 struct mmc_blk_data
*md
= dev_get_drvdata(&card
->dev
);
3375 mmc_blk_remove_debugfs(card
, md
);
3376 mmc_blk_remove_parts(card
, md
);
3377 pm_runtime_get_sync(&card
->dev
);
3378 if (md
->part_curr
!= md
->part_type
) {
3379 mmc_claim_host(card
->host
);
3380 mmc_blk_part_switch(card
, md
->part_type
);
3381 mmc_release_host(card
->host
);
3383 if (!mmc_card_sd_combo(card
))
3384 pm_runtime_disable(&card
->dev
);
3385 pm_runtime_put_noidle(&card
->dev
);
3386 mmc_blk_remove_req(md
);
3387 destroy_workqueue(card
->complete_wq
);
3390 static int _mmc_blk_suspend(struct mmc_card
*card
)
3392 struct mmc_blk_data
*part_md
;
3393 struct mmc_blk_data
*md
= dev_get_drvdata(&card
->dev
);
3396 mmc_queue_suspend(&md
->queue
);
3397 list_for_each_entry(part_md
, &md
->part
, part
) {
3398 mmc_queue_suspend(&part_md
->queue
);
3404 static void mmc_blk_shutdown(struct mmc_card
*card
)
3406 _mmc_blk_suspend(card
);
3409 #ifdef CONFIG_PM_SLEEP
3410 static int mmc_blk_suspend(struct device
*dev
)
3412 struct mmc_card
*card
= mmc_dev_to_card(dev
);
3414 return _mmc_blk_suspend(card
);
3417 static int mmc_blk_resume(struct device
*dev
)
3419 struct mmc_blk_data
*part_md
;
3420 struct mmc_blk_data
*md
= dev_get_drvdata(dev
);
3424 * Resume involves the card going into idle state,
3425 * so current partition is always the main one.
3427 md
->part_curr
= md
->part_type
;
3428 mmc_queue_resume(&md
->queue
);
3429 list_for_each_entry(part_md
, &md
->part
, part
) {
3430 mmc_queue_resume(&part_md
->queue
);
3437 static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops
, mmc_blk_suspend
, mmc_blk_resume
);
3439 static struct mmc_driver mmc_driver
= {
3442 .pm
= &mmc_blk_pm_ops
,
3444 .probe
= mmc_blk_probe
,
3445 .remove
= mmc_blk_remove
,
3446 .shutdown
= mmc_blk_shutdown
,
3449 static int __init
mmc_blk_init(void)
3453 res
= bus_register(&mmc_rpmb_bus_type
);
3455 pr_err("mmcblk: could not register RPMB bus type\n");
3458 res
= alloc_chrdev_region(&mmc_rpmb_devt
, 0, MAX_DEVICES
, "rpmb");
3460 pr_err("mmcblk: failed to allocate rpmb chrdev region\n");
3464 if (perdev_minors
!= CONFIG_MMC_BLOCK_MINORS
)
3465 pr_info("mmcblk: using %d minors per device\n", perdev_minors
);
3467 max_devices
= min(MAX_DEVICES
, (1 << MINORBITS
) / perdev_minors
);
3469 res
= register_blkdev(MMC_BLOCK_MAJOR
, "mmc");
3471 goto out_chrdev_unreg
;
3473 res
= mmc_register_driver(&mmc_driver
);
3475 goto out_blkdev_unreg
;
3480 unregister_blkdev(MMC_BLOCK_MAJOR
, "mmc");
3482 unregister_chrdev_region(mmc_rpmb_devt
, MAX_DEVICES
);
3484 bus_unregister(&mmc_rpmb_bus_type
);
3488 static void __exit
mmc_blk_exit(void)
3490 mmc_unregister_driver(&mmc_driver
);
3491 unregister_blkdev(MMC_BLOCK_MAJOR
, "mmc");
3492 unregister_chrdev_region(mmc_rpmb_devt
, MAX_DEVICES
);
3493 bus_unregister(&mmc_rpmb_bus_type
);
3496 module_init(mmc_blk_init
);
3497 module_exit(mmc_blk_exit
);
3499 MODULE_LICENSE("GPL");
3500 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");