framebuffer: fix border color
[linux/fpc-iii.git] / drivers / mmc / card / block.c
blob4802f7ff158e318a247177c8a702431cdabbb38e
1 /*
2 * Block driver for media (i.e., flash cards)
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
17 * Author: Andrew Christian
18 * 28 May 2002
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
24 #include <linux/kernel.h>
25 #include <linux/fs.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h>
34 #include <linux/delay.h>
35 #include <linux/capability.h>
36 #include <linux/compat.h>
38 #include <linux/mmc/ioctl.h>
39 #include <linux/mmc/card.h>
40 #include <linux/mmc/host.h>
41 #include <linux/mmc/mmc.h>
42 #include <linux/mmc/sd.h>
44 #include <asm/system.h>
45 #include <asm/uaccess.h>
47 #include "queue.h"
49 MODULE_ALIAS("mmc:block");
50 #ifdef MODULE_PARAM_PREFIX
51 #undef MODULE_PARAM_PREFIX
52 #endif
53 #define MODULE_PARAM_PREFIX "mmcblk."
55 #define INAND_CMD38_ARG_EXT_CSD 113
56 #define INAND_CMD38_ARG_ERASE 0x00
57 #define INAND_CMD38_ARG_TRIM 0x01
58 #define INAND_CMD38_ARG_SECERASE 0x80
59 #define INAND_CMD38_ARG_SECTRIM1 0x81
60 #define INAND_CMD38_ARG_SECTRIM2 0x88
62 static DEFINE_MUTEX(block_mutex);
65 * The defaults come from config options but can be overriden by module
66 * or bootarg options.
68 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
71 * We've only got one major, so number of mmcblk devices is
72 * limited to 256 / number of minors per device.
74 static int max_devices;
76 /* 256 minors, so at most 256 separate devices */
77 static DECLARE_BITMAP(dev_use, 256);
78 static DECLARE_BITMAP(name_use, 256);
81 * There is one mmc_blk_data per slot.
83 struct mmc_blk_data {
84 spinlock_t lock;
85 struct gendisk *disk;
86 struct mmc_queue queue;
87 struct list_head part;
89 unsigned int flags;
90 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
91 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
93 unsigned int usage;
94 unsigned int read_only;
95 unsigned int part_type;
96 unsigned int name_idx;
97 unsigned int reset_done;
98 #define MMC_BLK_READ BIT(0)
99 #define MMC_BLK_WRITE BIT(1)
100 #define MMC_BLK_DISCARD BIT(2)
101 #define MMC_BLK_SECDISCARD BIT(3)
104 * Only set in main mmc_blk_data associated
105 * with mmc_card with mmc_set_drvdata, and keeps
106 * track of the current selected device partition.
108 unsigned int part_curr;
109 struct device_attribute force_ro;
112 static DEFINE_MUTEX(open_lock);
114 enum mmc_blk_status {
115 MMC_BLK_SUCCESS = 0,
116 MMC_BLK_PARTIAL,
117 MMC_BLK_CMD_ERR,
118 MMC_BLK_RETRY,
119 MMC_BLK_ABORT,
120 MMC_BLK_DATA_ERR,
121 MMC_BLK_ECC_ERR,
124 module_param(perdev_minors, int, 0444);
125 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
127 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
129 struct mmc_blk_data *md;
131 mutex_lock(&open_lock);
132 md = disk->private_data;
133 if (md && md->usage == 0)
134 md = NULL;
135 if (md)
136 md->usage++;
137 mutex_unlock(&open_lock);
139 return md;
142 static inline int mmc_get_devidx(struct gendisk *disk)
144 int devmaj = MAJOR(disk_devt(disk));
145 int devidx = MINOR(disk_devt(disk)) / perdev_minors;
147 if (!devmaj)
148 devidx = disk->first_minor / perdev_minors;
149 return devidx;
152 static void mmc_blk_put(struct mmc_blk_data *md)
154 mutex_lock(&open_lock);
155 md->usage--;
156 if (md->usage == 0) {
157 int devidx = mmc_get_devidx(md->disk);
158 blk_cleanup_queue(md->queue.queue);
160 __clear_bit(devidx, dev_use);
162 put_disk(md->disk);
163 kfree(md);
165 mutex_unlock(&open_lock);
168 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
169 char *buf)
171 int ret;
172 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
174 ret = snprintf(buf, PAGE_SIZE, "%d",
175 get_disk_ro(dev_to_disk(dev)) ^
176 md->read_only);
177 mmc_blk_put(md);
178 return ret;
181 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
182 const char *buf, size_t count)
184 int ret;
185 char *end;
186 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
187 unsigned long set = simple_strtoul(buf, &end, 0);
188 if (end == buf) {
189 ret = -EINVAL;
190 goto out;
193 set_disk_ro(dev_to_disk(dev), set || md->read_only);
194 ret = count;
195 out:
196 mmc_blk_put(md);
197 return ret;
200 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
202 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
203 int ret = -ENXIO;
205 mutex_lock(&block_mutex);
206 if (md) {
207 if (md->usage == 2)
208 check_disk_change(bdev);
209 ret = 0;
211 if ((mode & FMODE_WRITE) && md->read_only) {
212 mmc_blk_put(md);
213 ret = -EROFS;
216 mutex_unlock(&block_mutex);
218 return ret;
221 static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
223 struct mmc_blk_data *md = disk->private_data;
225 mutex_lock(&block_mutex);
226 mmc_blk_put(md);
227 mutex_unlock(&block_mutex);
228 return 0;
231 static int
232 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
234 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
235 geo->heads = 4;
236 geo->sectors = 16;
237 return 0;
240 struct mmc_blk_ioc_data {
241 struct mmc_ioc_cmd ic;
242 unsigned char *buf;
243 u64 buf_bytes;
246 static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
247 struct mmc_ioc_cmd __user *user)
249 struct mmc_blk_ioc_data *idata;
250 int err;
252 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
253 if (!idata) {
254 err = -ENOMEM;
255 goto out;
258 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
259 err = -EFAULT;
260 goto idata_err;
263 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
264 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
265 err = -EOVERFLOW;
266 goto idata_err;
269 if (!idata->buf_bytes)
270 return idata;
272 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
273 if (!idata->buf) {
274 err = -ENOMEM;
275 goto idata_err;
278 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
279 idata->ic.data_ptr, idata->buf_bytes)) {
280 err = -EFAULT;
281 goto copy_err;
284 return idata;
286 copy_err:
287 kfree(idata->buf);
288 idata_err:
289 kfree(idata);
290 out:
291 return ERR_PTR(err);
294 static int mmc_blk_ioctl_cmd(struct block_device *bdev,
295 struct mmc_ioc_cmd __user *ic_ptr)
297 struct mmc_blk_ioc_data *idata;
298 struct mmc_blk_data *md;
299 struct mmc_card *card;
300 struct mmc_command cmd = {0};
301 struct mmc_data data = {0};
302 struct mmc_request mrq = {NULL};
303 struct scatterlist sg;
304 int err;
307 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
308 * whole block device, not on a partition. This prevents overspray
309 * between sibling partitions.
311 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
312 return -EPERM;
314 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
315 if (IS_ERR(idata))
316 return PTR_ERR(idata);
318 md = mmc_blk_get(bdev->bd_disk);
319 if (!md) {
320 err = -EINVAL;
321 goto cmd_done;
324 card = md->queue.card;
325 if (IS_ERR(card)) {
326 err = PTR_ERR(card);
327 goto cmd_done;
330 cmd.opcode = idata->ic.opcode;
331 cmd.arg = idata->ic.arg;
332 cmd.flags = idata->ic.flags;
334 if (idata->buf_bytes) {
335 data.sg = &sg;
336 data.sg_len = 1;
337 data.blksz = idata->ic.blksz;
338 data.blocks = idata->ic.blocks;
340 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
342 if (idata->ic.write_flag)
343 data.flags = MMC_DATA_WRITE;
344 else
345 data.flags = MMC_DATA_READ;
347 /* data.flags must already be set before doing this. */
348 mmc_set_data_timeout(&data, card);
350 /* Allow overriding the timeout_ns for empirical tuning. */
351 if (idata->ic.data_timeout_ns)
352 data.timeout_ns = idata->ic.data_timeout_ns;
354 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
356 * Pretend this is a data transfer and rely on the
357 * host driver to compute timeout. When all host
358 * drivers support cmd.cmd_timeout for R1B, this
359 * can be changed to:
361 * mrq.data = NULL;
362 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
364 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
367 mrq.data = &data;
370 mrq.cmd = &cmd;
372 mmc_claim_host(card->host);
374 if (idata->ic.is_acmd) {
375 err = mmc_app_cmd(card->host, card);
376 if (err)
377 goto cmd_rel_host;
380 mmc_wait_for_req(card->host, &mrq);
382 if (cmd.error) {
383 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
384 __func__, cmd.error);
385 err = cmd.error;
386 goto cmd_rel_host;
388 if (data.error) {
389 dev_err(mmc_dev(card->host), "%s: data error %d\n",
390 __func__, data.error);
391 err = data.error;
392 goto cmd_rel_host;
396 * According to the SD specs, some commands require a delay after
397 * issuing the command.
399 if (idata->ic.postsleep_min_us)
400 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
402 if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
403 err = -EFAULT;
404 goto cmd_rel_host;
407 if (!idata->ic.write_flag) {
408 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
409 idata->buf, idata->buf_bytes)) {
410 err = -EFAULT;
411 goto cmd_rel_host;
415 cmd_rel_host:
416 mmc_release_host(card->host);
418 cmd_done:
419 mmc_blk_put(md);
420 kfree(idata->buf);
421 kfree(idata);
422 return err;
425 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
426 unsigned int cmd, unsigned long arg)
428 int ret = -EINVAL;
429 if (cmd == MMC_IOC_CMD)
430 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
431 return ret;
434 #ifdef CONFIG_COMPAT
435 static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
436 unsigned int cmd, unsigned long arg)
438 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
440 #endif
442 static const struct block_device_operations mmc_bdops = {
443 .open = mmc_blk_open,
444 .release = mmc_blk_release,
445 .getgeo = mmc_blk_getgeo,
446 .owner = THIS_MODULE,
447 .ioctl = mmc_blk_ioctl,
448 #ifdef CONFIG_COMPAT
449 .compat_ioctl = mmc_blk_compat_ioctl,
450 #endif
453 static inline int mmc_blk_part_switch(struct mmc_card *card,
454 struct mmc_blk_data *md)
456 int ret;
457 struct mmc_blk_data *main_md = mmc_get_drvdata(card);
459 if (main_md->part_curr == md->part_type)
460 return 0;
462 if (mmc_card_mmc(card)) {
463 u8 part_config = card->ext_csd.part_config;
465 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
466 part_config |= md->part_type;
468 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
469 EXT_CSD_PART_CONFIG, part_config,
470 card->ext_csd.part_time);
471 if (ret)
472 return ret;
474 card->ext_csd.part_config = part_config;
477 main_md->part_curr = md->part_type;
478 return 0;
481 static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
483 int err;
484 u32 result;
485 __be32 *blocks;
487 struct mmc_request mrq = {NULL};
488 struct mmc_command cmd = {0};
489 struct mmc_data data = {0};
490 unsigned int timeout_us;
492 struct scatterlist sg;
494 cmd.opcode = MMC_APP_CMD;
495 cmd.arg = card->rca << 16;
496 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
498 err = mmc_wait_for_cmd(card->host, &cmd, 0);
499 if (err)
500 return (u32)-1;
501 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
502 return (u32)-1;
504 memset(&cmd, 0, sizeof(struct mmc_command));
506 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
507 cmd.arg = 0;
508 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
510 data.timeout_ns = card->csd.tacc_ns * 100;
511 data.timeout_clks = card->csd.tacc_clks * 100;
513 timeout_us = data.timeout_ns / 1000;
514 timeout_us += data.timeout_clks * 1000 /
515 (card->host->ios.clock / 1000);
517 if (timeout_us > 100000) {
518 data.timeout_ns = 100000000;
519 data.timeout_clks = 0;
522 data.blksz = 4;
523 data.blocks = 1;
524 data.flags = MMC_DATA_READ;
525 data.sg = &sg;
526 data.sg_len = 1;
528 mrq.cmd = &cmd;
529 mrq.data = &data;
531 blocks = kmalloc(4, GFP_KERNEL);
532 if (!blocks)
533 return (u32)-1;
535 sg_init_one(&sg, blocks, 4);
537 mmc_wait_for_req(card->host, &mrq);
539 result = ntohl(*blocks);
540 kfree(blocks);
542 if (cmd.error || data.error)
543 result = (u32)-1;
545 return result;
548 static int send_stop(struct mmc_card *card, u32 *status)
550 struct mmc_command cmd = {0};
551 int err;
553 cmd.opcode = MMC_STOP_TRANSMISSION;
554 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
555 err = mmc_wait_for_cmd(card->host, &cmd, 5);
556 if (err == 0)
557 *status = cmd.resp[0];
558 return err;
561 static int get_card_status(struct mmc_card *card, u32 *status, int retries)
563 struct mmc_command cmd = {0};
564 int err;
566 cmd.opcode = MMC_SEND_STATUS;
567 if (!mmc_host_is_spi(card->host))
568 cmd.arg = card->rca << 16;
569 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
570 err = mmc_wait_for_cmd(card->host, &cmd, retries);
571 if (err == 0)
572 *status = cmd.resp[0];
573 return err;
576 #define ERR_RETRY 2
577 #define ERR_ABORT 1
578 #define ERR_CONTINUE 0
580 static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
581 bool status_valid, u32 status)
583 switch (error) {
584 case -EILSEQ:
585 /* response crc error, retry the r/w cmd */
586 pr_err("%s: %s sending %s command, card status %#x\n",
587 req->rq_disk->disk_name, "response CRC error",
588 name, status);
589 return ERR_RETRY;
591 case -ETIMEDOUT:
592 pr_err("%s: %s sending %s command, card status %#x\n",
593 req->rq_disk->disk_name, "timed out", name, status);
595 /* If the status cmd initially failed, retry the r/w cmd */
596 if (!status_valid)
597 return ERR_RETRY;
600 * If it was a r/w cmd crc error, or illegal command
601 * (eg, issued in wrong state) then retry - we should
602 * have corrected the state problem above.
604 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
605 return ERR_RETRY;
607 /* Otherwise abort the command */
608 return ERR_ABORT;
610 default:
611 /* We don't understand the error code the driver gave us */
612 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
613 req->rq_disk->disk_name, error, status);
614 return ERR_ABORT;
619 * Initial r/w and stop cmd error recovery.
620 * We don't know whether the card received the r/w cmd or not, so try to
621 * restore things back to a sane state. Essentially, we do this as follows:
622 * - Obtain card status. If the first attempt to obtain card status fails,
623 * the status word will reflect the failed status cmd, not the failed
624 * r/w cmd. If we fail to obtain card status, it suggests we can no
625 * longer communicate with the card.
626 * - Check the card state. If the card received the cmd but there was a
627 * transient problem with the response, it might still be in a data transfer
628 * mode. Try to send it a stop command. If this fails, we can't recover.
629 * - If the r/w cmd failed due to a response CRC error, it was probably
630 * transient, so retry the cmd.
631 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
632 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
633 * illegal cmd, retry.
634 * Otherwise we don't understand what happened, so abort.
636 static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
637 struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
639 bool prev_cmd_status_valid = true;
640 u32 status, stop_status = 0;
641 int err, retry;
644 * Try to get card status which indicates both the card state
645 * and why there was no response. If the first attempt fails,
646 * we can't be sure the returned status is for the r/w command.
648 for (retry = 2; retry >= 0; retry--) {
649 err = get_card_status(card, &status, 0);
650 if (!err)
651 break;
653 prev_cmd_status_valid = false;
654 pr_err("%s: error %d sending status command, %sing\n",
655 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
658 /* We couldn't get a response from the card. Give up. */
659 if (err)
660 return ERR_ABORT;
662 /* Flag ECC errors */
663 if ((status & R1_CARD_ECC_FAILED) ||
664 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
665 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
666 *ecc_err = 1;
668 /* Flag General errors */
669 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
670 if ((status & R1_ERROR) ||
671 (brq->stop.resp[0] & R1_ERROR)) {
672 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
673 req->rq_disk->disk_name, __func__,
674 brq->stop.resp[0], status);
675 *gen_err = 1;
679 * Check the current card state. If it is in some data transfer
680 * mode, tell it to stop (and hopefully transition back to TRAN.)
682 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
683 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
684 err = send_stop(card, &stop_status);
685 if (err)
686 pr_err("%s: error %d sending stop command\n",
687 req->rq_disk->disk_name, err);
690 * If the stop cmd also timed out, the card is probably
691 * not present, so abort. Other errors are bad news too.
693 if (err)
694 return ERR_ABORT;
695 if (stop_status & R1_CARD_ECC_FAILED)
696 *ecc_err = 1;
697 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
698 if (stop_status & R1_ERROR) {
699 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
700 req->rq_disk->disk_name, __func__,
701 stop_status);
702 *gen_err = 1;
706 /* Check for set block count errors */
707 if (brq->sbc.error)
708 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
709 prev_cmd_status_valid, status);
711 /* Check for r/w command errors */
712 if (brq->cmd.error)
713 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
714 prev_cmd_status_valid, status);
716 /* Data errors */
717 if (!brq->stop.error)
718 return ERR_CONTINUE;
720 /* Now for stop errors. These aren't fatal to the transfer. */
721 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
722 req->rq_disk->disk_name, brq->stop.error,
723 brq->cmd.resp[0], status);
726 * Subsitute in our own stop status as this will give the error
727 * state which happened during the execution of the r/w command.
729 if (stop_status) {
730 brq->stop.resp[0] = stop_status;
731 brq->stop.error = 0;
733 return ERR_CONTINUE;
736 static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
737 int type)
739 int err;
741 if (md->reset_done & type)
742 return -EEXIST;
744 md->reset_done |= type;
745 err = mmc_hw_reset(host);
746 /* Ensure we switch back to the correct partition */
747 if (err != -EOPNOTSUPP) {
748 struct mmc_blk_data *main_md = mmc_get_drvdata(host->card);
749 int part_err;
751 main_md->part_curr = main_md->part_type;
752 part_err = mmc_blk_part_switch(host->card, md);
753 if (part_err) {
755 * We have failed to get back into the correct
756 * partition, so we need to abort the whole request.
758 return -ENODEV;
761 return err;
764 static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
766 md->reset_done &= ~type;
769 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
771 struct mmc_blk_data *md = mq->data;
772 struct mmc_card *card = md->queue.card;
773 unsigned int from, nr, arg;
774 int err = 0, type = MMC_BLK_DISCARD;
776 if (!mmc_can_erase(card)) {
777 err = -EOPNOTSUPP;
778 goto out;
781 from = blk_rq_pos(req);
782 nr = blk_rq_sectors(req);
784 if (mmc_can_discard(card))
785 arg = MMC_DISCARD_ARG;
786 else if (mmc_can_trim(card))
787 arg = MMC_TRIM_ARG;
788 else
789 arg = MMC_ERASE_ARG;
790 retry:
791 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
792 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
793 INAND_CMD38_ARG_EXT_CSD,
794 arg == MMC_TRIM_ARG ?
795 INAND_CMD38_ARG_TRIM :
796 INAND_CMD38_ARG_ERASE,
798 if (err)
799 goto out;
801 err = mmc_erase(card, from, nr, arg);
802 out:
803 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
804 goto retry;
805 if (!err)
806 mmc_blk_reset_success(md, type);
807 spin_lock_irq(&md->lock);
808 __blk_end_request(req, err, blk_rq_bytes(req));
809 spin_unlock_irq(&md->lock);
811 return err ? 0 : 1;
814 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
815 struct request *req)
817 struct mmc_blk_data *md = mq->data;
818 struct mmc_card *card = md->queue.card;
819 unsigned int from, nr, arg, trim_arg, erase_arg;
820 int err = 0, type = MMC_BLK_SECDISCARD;
822 if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
823 err = -EOPNOTSUPP;
824 goto out;
827 from = blk_rq_pos(req);
828 nr = blk_rq_sectors(req);
830 /* The sanitize operation is supported at v4.5 only */
831 if (mmc_can_sanitize(card)) {
832 erase_arg = MMC_ERASE_ARG;
833 trim_arg = MMC_TRIM_ARG;
834 } else {
835 erase_arg = MMC_SECURE_ERASE_ARG;
836 trim_arg = MMC_SECURE_TRIM1_ARG;
839 if (mmc_erase_group_aligned(card, from, nr))
840 arg = erase_arg;
841 else if (mmc_can_trim(card))
842 arg = trim_arg;
843 else {
844 err = -EINVAL;
845 goto out;
847 retry:
848 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
849 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
850 INAND_CMD38_ARG_EXT_CSD,
851 arg == MMC_SECURE_TRIM1_ARG ?
852 INAND_CMD38_ARG_SECTRIM1 :
853 INAND_CMD38_ARG_SECERASE,
855 if (err)
856 goto out_retry;
859 err = mmc_erase(card, from, nr, arg);
860 if (err == -EIO)
861 goto out_retry;
862 if (err)
863 goto out;
865 if (arg == MMC_SECURE_TRIM1_ARG) {
866 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
867 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
868 INAND_CMD38_ARG_EXT_CSD,
869 INAND_CMD38_ARG_SECTRIM2,
871 if (err)
872 goto out_retry;
875 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
876 if (err == -EIO)
877 goto out_retry;
878 if (err)
879 goto out;
882 if (mmc_can_sanitize(card))
883 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
884 EXT_CSD_SANITIZE_START, 1, 0);
885 out_retry:
886 if (err && !mmc_blk_reset(md, card->host, type))
887 goto retry;
888 if (!err)
889 mmc_blk_reset_success(md, type);
890 out:
891 spin_lock_irq(&md->lock);
892 __blk_end_request(req, err, blk_rq_bytes(req));
893 spin_unlock_irq(&md->lock);
895 return err ? 0 : 1;
898 static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
900 struct mmc_blk_data *md = mq->data;
901 struct mmc_card *card = md->queue.card;
902 int ret = 0;
904 ret = mmc_flush_cache(card);
905 if (ret)
906 ret = -EIO;
908 spin_lock_irq(&md->lock);
909 __blk_end_request_all(req, ret);
910 spin_unlock_irq(&md->lock);
912 return ret ? 0 : 1;
916 * Reformat current write as a reliable write, supporting
917 * both legacy and the enhanced reliable write MMC cards.
918 * In each transfer we'll handle only as much as a single
919 * reliable write can handle, thus finish the request in
920 * partial completions.
922 static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
923 struct mmc_card *card,
924 struct request *req)
926 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
927 /* Legacy mode imposes restrictions on transfers. */
928 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
929 brq->data.blocks = 1;
931 if (brq->data.blocks > card->ext_csd.rel_sectors)
932 brq->data.blocks = card->ext_csd.rel_sectors;
933 else if (brq->data.blocks < card->ext_csd.rel_sectors)
934 brq->data.blocks = 1;
938 #define CMD_ERRORS \
939 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
940 R1_ADDRESS_ERROR | /* Misaligned address */ \
941 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
942 R1_WP_VIOLATION | /* Tried to write to protected block */ \
943 R1_CC_ERROR | /* Card controller error */ \
944 R1_ERROR) /* General/unknown error */
946 static int mmc_blk_err_check(struct mmc_card *card,
947 struct mmc_async_req *areq)
949 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
950 mmc_active);
951 struct mmc_blk_request *brq = &mq_mrq->brq;
952 struct request *req = mq_mrq->req;
953 int ecc_err = 0, gen_err = 0;
956 * sbc.error indicates a problem with the set block count
957 * command. No data will have been transferred.
959 * cmd.error indicates a problem with the r/w command. No
960 * data will have been transferred.
962 * stop.error indicates a problem with the stop command. Data
963 * may have been transferred, or may still be transferring.
965 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
966 brq->data.error) {
967 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
968 case ERR_RETRY:
969 return MMC_BLK_RETRY;
970 case ERR_ABORT:
971 return MMC_BLK_ABORT;
972 case ERR_CONTINUE:
973 break;
978 * Check for errors relating to the execution of the
979 * initial command - such as address errors. No data
980 * has been transferred.
982 if (brq->cmd.resp[0] & CMD_ERRORS) {
983 pr_err("%s: r/w command failed, status = %#x\n",
984 req->rq_disk->disk_name, brq->cmd.resp[0]);
985 return MMC_BLK_ABORT;
989 * Everything else is either success, or a data error of some
990 * kind. If it was a write, we may have transitioned to
991 * program mode, which we have to wait for it to complete.
993 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
994 u32 status;
996 /* Check stop command response */
997 if (brq->stop.resp[0] & R1_ERROR) {
998 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
999 req->rq_disk->disk_name, __func__,
1000 brq->stop.resp[0]);
1001 gen_err = 1;
1004 do {
1005 int err = get_card_status(card, &status, 5);
1006 if (err) {
1007 pr_err("%s: error %d requesting status\n",
1008 req->rq_disk->disk_name, err);
1009 return MMC_BLK_CMD_ERR;
1012 if (status & R1_ERROR) {
1013 pr_err("%s: %s: general error sending status command, card status %#x\n",
1014 req->rq_disk->disk_name, __func__,
1015 status);
1016 gen_err = 1;
1020 * Some cards mishandle the status bits,
1021 * so make sure to check both the busy
1022 * indication and the card state.
1024 } while (!(status & R1_READY_FOR_DATA) ||
1025 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
1028 /* if general error occurs, retry the write operation. */
1029 if (gen_err) {
1030 pr_warning("%s: retrying write for general error\n",
1031 req->rq_disk->disk_name);
1032 return MMC_BLK_RETRY;
1035 if (brq->data.error) {
1036 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1037 req->rq_disk->disk_name, brq->data.error,
1038 (unsigned)blk_rq_pos(req),
1039 (unsigned)blk_rq_sectors(req),
1040 brq->cmd.resp[0], brq->stop.resp[0]);
1042 if (rq_data_dir(req) == READ) {
1043 if (ecc_err)
1044 return MMC_BLK_ECC_ERR;
1045 return MMC_BLK_DATA_ERR;
1046 } else {
1047 return MMC_BLK_CMD_ERR;
1051 if (!brq->data.bytes_xfered)
1052 return MMC_BLK_RETRY;
1054 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1055 return MMC_BLK_PARTIAL;
1057 return MMC_BLK_SUCCESS;
1060 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1061 struct mmc_card *card,
1062 int disable_multi,
1063 struct mmc_queue *mq)
1065 u32 readcmd, writecmd;
1066 struct mmc_blk_request *brq = &mqrq->brq;
1067 struct request *req = mqrq->req;
1068 struct mmc_blk_data *md = mq->data;
1071 * Reliable writes are used to implement Forced Unit Access and
1072 * REQ_META accesses, and are supported only on MMCs.
1074 * XXX: this really needs a good explanation of why REQ_META
1075 * is treated special.
1077 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
1078 (req->cmd_flags & REQ_META)) &&
1079 (rq_data_dir(req) == WRITE) &&
1080 (md->flags & MMC_BLK_REL_WR);
1082 memset(brq, 0, sizeof(struct mmc_blk_request));
1083 brq->mrq.cmd = &brq->cmd;
1084 brq->mrq.data = &brq->data;
1086 brq->cmd.arg = blk_rq_pos(req);
1087 if (!mmc_card_blockaddr(card))
1088 brq->cmd.arg <<= 9;
1089 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1090 brq->data.blksz = 512;
1091 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1092 brq->stop.arg = 0;
1093 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1094 brq->data.blocks = blk_rq_sectors(req);
1097 * The block layer doesn't support all sector count
1098 * restrictions, so we need to be prepared for too big
1099 * requests.
1101 if (brq->data.blocks > card->host->max_blk_count)
1102 brq->data.blocks = card->host->max_blk_count;
1104 if (brq->data.blocks > 1) {
1106 * After a read error, we redo the request one sector
1107 * at a time in order to accurately determine which
1108 * sectors can be read successfully.
1110 if (disable_multi)
1111 brq->data.blocks = 1;
1113 /* Some controllers can't do multiblock reads due to hw bugs */
1114 if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ &&
1115 rq_data_dir(req) == READ)
1116 brq->data.blocks = 1;
1119 if (brq->data.blocks > 1 || do_rel_wr) {
1120 /* SPI multiblock writes terminate using a special
1121 * token, not a STOP_TRANSMISSION request.
1123 if (!mmc_host_is_spi(card->host) ||
1124 rq_data_dir(req) == READ)
1125 brq->mrq.stop = &brq->stop;
1126 readcmd = MMC_READ_MULTIPLE_BLOCK;
1127 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1128 } else {
1129 brq->mrq.stop = NULL;
1130 readcmd = MMC_READ_SINGLE_BLOCK;
1131 writecmd = MMC_WRITE_BLOCK;
1133 if (rq_data_dir(req) == READ) {
1134 brq->cmd.opcode = readcmd;
1135 brq->data.flags |= MMC_DATA_READ;
1136 } else {
1137 brq->cmd.opcode = writecmd;
1138 brq->data.flags |= MMC_DATA_WRITE;
1141 if (do_rel_wr)
1142 mmc_apply_rel_rw(brq, card, req);
1145 * Pre-defined multi-block transfers are preferable to
1146 * open ended-ones (and necessary for reliable writes).
1147 * However, it is not sufficient to just send CMD23,
1148 * and avoid the final CMD12, as on an error condition
1149 * CMD12 (stop) needs to be sent anyway. This, coupled
1150 * with Auto-CMD23 enhancements provided by some
1151 * hosts, means that the complexity of dealing
1152 * with this is best left to the host. If CMD23 is
1153 * supported by card and host, we'll fill sbc in and let
1154 * the host deal with handling it correctly. This means
1155 * that for hosts that don't expose MMC_CAP_CMD23, no
1156 * change of behavior will be observed.
1158 * N.B: Some MMC cards experience perf degradation.
1159 * We'll avoid using CMD23-bounded multiblock writes for
1160 * these, while retaining features like reliable writes.
1163 if ((md->flags & MMC_BLK_CMD23) &&
1164 mmc_op_multi(brq->cmd.opcode) &&
1165 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
1166 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1167 brq->sbc.arg = brq->data.blocks |
1168 (do_rel_wr ? (1 << 31) : 0);
1169 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1170 brq->mrq.sbc = &brq->sbc;
1173 mmc_set_data_timeout(&brq->data, card);
1175 brq->data.sg = mqrq->sg;
1176 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1179 * Adjust the sg list so it is the same size as the
1180 * request.
1182 if (brq->data.blocks != blk_rq_sectors(req)) {
1183 int i, data_size = brq->data.blocks << 9;
1184 struct scatterlist *sg;
1186 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1187 data_size -= sg->length;
1188 if (data_size <= 0) {
1189 sg->length += data_size;
1190 i++;
1191 break;
1194 brq->data.sg_len = i;
1197 mqrq->mmc_active.mrq = &brq->mrq;
1198 mqrq->mmc_active.err_check = mmc_blk_err_check;
1200 mmc_queue_bounce_pre(mqrq);
1203 static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1204 struct mmc_blk_request *brq, struct request *req,
1205 int ret)
1208 * If this is an SD card and we're writing, we can first
1209 * mark the known good sectors as ok.
1211 * If the card is not SD, we can still ok written sectors
1212 * as reported by the controller (which might be less than
1213 * the real number of written sectors, but never more).
1215 if (mmc_card_sd(card)) {
1216 u32 blocks;
1218 blocks = mmc_sd_num_wr_blocks(card);
1219 if (blocks != (u32)-1) {
1220 spin_lock_irq(&md->lock);
1221 ret = __blk_end_request(req, 0, blocks << 9);
1222 spin_unlock_irq(&md->lock);
1224 } else {
1225 spin_lock_irq(&md->lock);
1226 ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
1227 spin_unlock_irq(&md->lock);
1229 return ret;
1232 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1234 struct mmc_blk_data *md = mq->data;
1235 struct mmc_card *card = md->queue.card;
1236 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
1237 int ret = 1, disable_multi = 0, retry = 0, type;
1238 enum mmc_blk_status status;
1239 struct mmc_queue_req *mq_rq;
1240 struct request *req;
1241 struct mmc_async_req *areq;
1243 if (!rqc && !mq->mqrq_prev->req)
1244 return 0;
1246 do {
1247 if (rqc) {
1248 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1249 areq = &mq->mqrq_cur->mmc_active;
1250 } else
1251 areq = NULL;
1252 areq = mmc_start_req(card->host, areq, (int *) &status);
1253 if (!areq)
1254 return 0;
1256 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
1257 brq = &mq_rq->brq;
1258 req = mq_rq->req;
1259 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
1260 mmc_queue_bounce_post(mq_rq);
1262 switch (status) {
1263 case MMC_BLK_SUCCESS:
1264 case MMC_BLK_PARTIAL:
1266 * A block was successfully transferred.
1268 mmc_blk_reset_success(md, type);
1269 spin_lock_irq(&md->lock);
1270 ret = __blk_end_request(req, 0,
1271 brq->data.bytes_xfered);
1272 spin_unlock_irq(&md->lock);
1274 * If the blk_end_request function returns non-zero even
1275 * though all data has been transferred and no errors
1276 * were returned by the host controller, it's a bug.
1278 if (status == MMC_BLK_SUCCESS && ret) {
1279 pr_err("%s BUG rq_tot %d d_xfer %d\n",
1280 __func__, blk_rq_bytes(req),
1281 brq->data.bytes_xfered);
1282 rqc = NULL;
1283 goto cmd_abort;
1285 break;
1286 case MMC_BLK_CMD_ERR:
1287 ret = mmc_blk_cmd_err(md, card, brq, req, ret);
1288 if (!mmc_blk_reset(md, card->host, type))
1289 break;
1290 goto cmd_abort;
1291 case MMC_BLK_RETRY:
1292 if (retry++ < 5)
1293 break;
1294 /* Fall through */
1295 case MMC_BLK_ABORT:
1296 if (!mmc_blk_reset(md, card->host, type))
1297 break;
1298 goto cmd_abort;
1299 case MMC_BLK_DATA_ERR: {
1300 int err;
1302 err = mmc_blk_reset(md, card->host, type);
1303 if (!err)
1304 break;
1305 if (err == -ENODEV)
1306 goto cmd_abort;
1307 /* Fall through */
1309 case MMC_BLK_ECC_ERR:
1310 if (brq->data.blocks > 1) {
1311 /* Redo read one sector at a time */
1312 pr_warning("%s: retrying using single block read\n",
1313 req->rq_disk->disk_name);
1314 disable_multi = 1;
1315 break;
1318 * After an error, we redo I/O one sector at a
1319 * time, so we only reach here after trying to
1320 * read a single sector.
1322 spin_lock_irq(&md->lock);
1323 ret = __blk_end_request(req, -EIO,
1324 brq->data.blksz);
1325 spin_unlock_irq(&md->lock);
1326 if (!ret)
1327 goto start_new_req;
1328 break;
1331 if (ret) {
1333 * In case of a incomplete request
1334 * prepare it again and resend.
1336 mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
1337 mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
1339 } while (ret);
1341 return 1;
1343 cmd_abort:
1344 spin_lock_irq(&md->lock);
1345 while (ret)
1346 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
1347 spin_unlock_irq(&md->lock);
1349 start_new_req:
1350 if (rqc) {
1351 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1352 mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL);
1355 return 0;
1358 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1360 int ret;
1361 struct mmc_blk_data *md = mq->data;
1362 struct mmc_card *card = md->queue.card;
1364 if (req && !mq->mqrq_prev->req)
1365 /* claim host only for the first request */
1366 mmc_claim_host(card->host);
1368 ret = mmc_blk_part_switch(card, md);
1369 if (ret) {
1370 if (req) {
1371 spin_lock_irq(&md->lock);
1372 __blk_end_request_all(req, -EIO);
1373 spin_unlock_irq(&md->lock);
1375 ret = 0;
1376 goto out;
1379 if (req && req->cmd_flags & REQ_DISCARD) {
1380 /* complete ongoing async transfer before issuing discard */
1381 if (card->host->areq)
1382 mmc_blk_issue_rw_rq(mq, NULL);
1383 if (req->cmd_flags & REQ_SECURE &&
1384 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
1385 ret = mmc_blk_issue_secdiscard_rq(mq, req);
1386 else
1387 ret = mmc_blk_issue_discard_rq(mq, req);
1388 } else if (req && req->cmd_flags & REQ_FLUSH) {
1389 /* complete ongoing async transfer before issuing flush */
1390 if (card->host->areq)
1391 mmc_blk_issue_rw_rq(mq, NULL);
1392 ret = mmc_blk_issue_flush(mq, req);
1393 } else {
1394 ret = mmc_blk_issue_rw_rq(mq, req);
1397 out:
1398 if (!req)
1399 /* release host only when there are no more requests */
1400 mmc_release_host(card->host);
1401 return ret;
1404 static inline int mmc_blk_readonly(struct mmc_card *card)
1406 return mmc_card_readonly(card) ||
1407 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
1410 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
1411 struct device *parent,
1412 sector_t size,
1413 bool default_ro,
1414 const char *subname)
1416 struct mmc_blk_data *md;
1417 int devidx, ret;
1419 devidx = find_first_zero_bit(dev_use, max_devices);
1420 if (devidx >= max_devices)
1421 return ERR_PTR(-ENOSPC);
1422 __set_bit(devidx, dev_use);
1424 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
1425 if (!md) {
1426 ret = -ENOMEM;
1427 goto out;
1431 * !subname implies we are creating main mmc_blk_data that will be
1432 * associated with mmc_card with mmc_set_drvdata. Due to device
1433 * partitions, devidx will not coincide with a per-physical card
1434 * index anymore so we keep track of a name index.
1436 if (!subname) {
1437 md->name_idx = find_first_zero_bit(name_use, max_devices);
1438 __set_bit(md->name_idx, name_use);
1440 else
1441 md->name_idx = ((struct mmc_blk_data *)
1442 dev_to_disk(parent)->private_data)->name_idx;
1445 * Set the read-only status based on the supported commands
1446 * and the write protect switch.
1448 md->read_only = mmc_blk_readonly(card);
1450 md->disk = alloc_disk(perdev_minors);
1451 if (md->disk == NULL) {
1452 ret = -ENOMEM;
1453 goto err_kfree;
1456 spin_lock_init(&md->lock);
1457 INIT_LIST_HEAD(&md->part);
1458 md->usage = 1;
1460 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
1461 if (ret)
1462 goto err_putdisk;
1464 md->queue.issue_fn = mmc_blk_issue_rq;
1465 md->queue.data = md;
1467 md->disk->major = MMC_BLOCK_MAJOR;
1468 md->disk->first_minor = devidx * perdev_minors;
1469 md->disk->fops = &mmc_bdops;
1470 md->disk->private_data = md;
1471 md->disk->queue = md->queue.queue;
1472 md->disk->driverfs_dev = parent;
1473 set_disk_ro(md->disk, md->read_only || default_ro);
1476 * As discussed on lkml, GENHD_FL_REMOVABLE should:
1478 * - be set for removable media with permanent block devices
1479 * - be unset for removable block devices with permanent media
1481 * Since MMC block devices clearly fall under the second
1482 * case, we do not set GENHD_FL_REMOVABLE. Userspace
1483 * should use the block device creation/destruction hotplug
1484 * messages to tell when the card is present.
1487 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
1488 "mmcblk%d%s", md->name_idx, subname ? subname : "");
1490 blk_queue_logical_block_size(md->queue.queue, 512);
1491 set_capacity(md->disk, size);
1493 if (mmc_host_cmd23(card->host)) {
1494 if (mmc_card_mmc(card) ||
1495 (mmc_card_sd(card) &&
1496 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
1497 md->flags |= MMC_BLK_CMD23;
1500 if (mmc_card_mmc(card) &&
1501 md->flags & MMC_BLK_CMD23 &&
1502 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
1503 card->ext_csd.rel_sectors)) {
1504 md->flags |= MMC_BLK_REL_WR;
1505 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
1508 return md;
1510 err_putdisk:
1511 put_disk(md->disk);
1512 err_kfree:
1513 kfree(md);
1514 out:
1515 return ERR_PTR(ret);
1518 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
1520 sector_t size;
1521 struct mmc_blk_data *md;
1523 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
1525 * The EXT_CSD sector count is in number or 512 byte
1526 * sectors.
1528 size = card->ext_csd.sectors;
1529 } else {
1531 * The CSD capacity field is in units of read_blkbits.
1532 * set_capacity takes units of 512 bytes.
1534 size = card->csd.capacity << (card->csd.read_blkbits - 9);
1537 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL);
1538 return md;
1541 static int mmc_blk_alloc_part(struct mmc_card *card,
1542 struct mmc_blk_data *md,
1543 unsigned int part_type,
1544 sector_t size,
1545 bool default_ro,
1546 const char *subname)
1548 char cap_str[10];
1549 struct mmc_blk_data *part_md;
1551 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
1552 subname);
1553 if (IS_ERR(part_md))
1554 return PTR_ERR(part_md);
1555 part_md->part_type = part_type;
1556 list_add(&part_md->part, &md->part);
1558 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
1559 cap_str, sizeof(cap_str));
1560 pr_info("%s: %s %s partition %u %s\n",
1561 part_md->disk->disk_name, mmc_card_id(card),
1562 mmc_card_name(card), part_md->part_type, cap_str);
1563 return 0;
1566 /* MMC Physical partitions consist of two boot partitions and
1567 * up to four general purpose partitions.
1568 * For each partition enabled in EXT_CSD a block device will be allocatedi
1569 * to provide access to the partition.
1572 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
1574 int idx, ret = 0;
1576 if (!mmc_card_mmc(card))
1577 return 0;
1579 for (idx = 0; idx < card->nr_parts; idx++) {
1580 if (card->part[idx].size) {
1581 ret = mmc_blk_alloc_part(card, md,
1582 card->part[idx].part_cfg,
1583 card->part[idx].size >> 9,
1584 card->part[idx].force_ro,
1585 card->part[idx].name);
1586 if (ret)
1587 return ret;
1591 return ret;
1594 static int
1595 mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
1597 int err;
1599 mmc_claim_host(card->host);
1600 err = mmc_set_blocklen(card, 512);
1601 mmc_release_host(card->host);
1603 if (err) {
1604 pr_err("%s: unable to set block size to 512: %d\n",
1605 md->disk->disk_name, err);
1606 return -EINVAL;
1609 return 0;
1612 static void mmc_blk_remove_req(struct mmc_blk_data *md)
1614 if (md) {
1615 if (md->disk->flags & GENHD_FL_UP) {
1616 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
1618 /* Stop new requests from getting into the queue */
1619 del_gendisk(md->disk);
1622 /* Then flush out any already in there */
1623 mmc_cleanup_queue(&md->queue);
1624 mmc_blk_put(md);
1628 static void mmc_blk_remove_parts(struct mmc_card *card,
1629 struct mmc_blk_data *md)
1631 struct list_head *pos, *q;
1632 struct mmc_blk_data *part_md;
1634 __clear_bit(md->name_idx, name_use);
1635 list_for_each_safe(pos, q, &md->part) {
1636 part_md = list_entry(pos, struct mmc_blk_data, part);
1637 list_del(pos);
1638 mmc_blk_remove_req(part_md);
1642 static int mmc_add_disk(struct mmc_blk_data *md)
1644 int ret;
1646 add_disk(md->disk);
1647 md->force_ro.show = force_ro_show;
1648 md->force_ro.store = force_ro_store;
1649 sysfs_attr_init(&md->force_ro.attr);
1650 md->force_ro.attr.name = "force_ro";
1651 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
1652 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
1653 if (ret)
1654 del_gendisk(md->disk);
1656 return ret;
1659 #define CID_MANFID_SAMSUNG 0x15
1661 static const struct mmc_fixup blk_fixups[] =
1663 MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1664 MMC_FIXUP("SEM04G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1665 MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1666 MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1667 MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1670 * Some MMC cards experience performance degradation with CMD23
1671 * instead of CMD12-bounded multiblock transfers. For now we'll
1672 * black list what's bad...
1673 * - Certain Toshiba cards.
1675 * N.B. This doesn't affect SD cards.
1677 MMC_FIXUP("MMC08G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1678 MMC_QUIRK_BLK_NO_CMD23),
1679 MMC_FIXUP("MMC16G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1680 MMC_QUIRK_BLK_NO_CMD23),
1681 MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1682 MMC_QUIRK_BLK_NO_CMD23),
1685 * Some Micron MMC cards needs longer data read timeout than
1686 * indicated in CSD.
1688 MMC_FIXUP(CID_NAME_ANY, 0x13, 0x200, add_quirk_mmc,
1689 MMC_QUIRK_LONG_READ_TIME),
1692 * On these Samsung MoviNAND parts, performing secure erase or
1693 * secure trim can result in unrecoverable corruption due to a
1694 * firmware bug.
1696 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
1697 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
1698 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
1699 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
1700 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
1701 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
1702 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
1703 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
1704 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
1705 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
1706 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
1707 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
1708 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
1709 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
1710 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
1711 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
1713 END_FIXUP
1716 static int mmc_blk_probe(struct mmc_card *card)
1718 struct mmc_blk_data *md, *part_md;
1719 int err;
1720 char cap_str[10];
1723 * Check that the card supports the command class(es) we need.
1725 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
1726 return -ENODEV;
1728 md = mmc_blk_alloc(card);
1729 if (IS_ERR(md))
1730 return PTR_ERR(md);
1732 err = mmc_blk_set_blksize(md, card);
1733 if (err)
1734 goto out;
1736 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
1737 cap_str, sizeof(cap_str));
1738 pr_info("%s: %s %s %s %s\n",
1739 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
1740 cap_str, md->read_only ? "(ro)" : "");
1742 if (mmc_blk_alloc_parts(card, md))
1743 goto out;
1745 mmc_set_drvdata(card, md);
1746 mmc_fixup_device(card, blk_fixups);
1748 if (mmc_add_disk(md))
1749 goto out;
1751 list_for_each_entry(part_md, &md->part, part) {
1752 if (mmc_add_disk(part_md))
1753 goto out;
1755 return 0;
1757 out:
1758 mmc_blk_remove_parts(card, md);
1759 mmc_blk_remove_req(md);
1760 return err;
1763 static void mmc_blk_remove(struct mmc_card *card)
1765 struct mmc_blk_data *md = mmc_get_drvdata(card);
1767 mmc_blk_remove_parts(card, md);
1768 mmc_claim_host(card->host);
1769 mmc_blk_part_switch(card, md);
1770 mmc_release_host(card->host);
1771 mmc_blk_remove_req(md);
1772 mmc_set_drvdata(card, NULL);
1775 #ifdef CONFIG_PM
1776 static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
1778 struct mmc_blk_data *part_md;
1779 struct mmc_blk_data *md = mmc_get_drvdata(card);
1781 if (md) {
1782 mmc_queue_suspend(&md->queue);
1783 list_for_each_entry(part_md, &md->part, part) {
1784 mmc_queue_suspend(&part_md->queue);
1787 return 0;
1790 static int mmc_blk_resume(struct mmc_card *card)
1792 struct mmc_blk_data *part_md;
1793 struct mmc_blk_data *md = mmc_get_drvdata(card);
1795 if (md) {
1796 mmc_blk_set_blksize(md, card);
1799 * Resume involves the card going into idle state,
1800 * so current partition is always the main one.
1802 md->part_curr = md->part_type;
1803 mmc_queue_resume(&md->queue);
1804 list_for_each_entry(part_md, &md->part, part) {
1805 mmc_queue_resume(&part_md->queue);
1808 return 0;
1810 #else
1811 #define mmc_blk_suspend NULL
1812 #define mmc_blk_resume NULL
1813 #endif
1815 static struct mmc_driver mmc_driver = {
1816 .drv = {
1817 .name = "mmcblk",
1819 .probe = mmc_blk_probe,
1820 .remove = mmc_blk_remove,
1821 .suspend = mmc_blk_suspend,
1822 .resume = mmc_blk_resume,
1825 static int __init mmc_blk_init(void)
1827 int res;
1829 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
1830 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
1832 max_devices = 256 / perdev_minors;
1834 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
1835 if (res)
1836 goto out;
1838 res = mmc_register_driver(&mmc_driver);
1839 if (res)
1840 goto out2;
1842 return 0;
1843 out2:
1844 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1845 out:
1846 return res;
1849 static void __exit mmc_blk_exit(void)
1851 mmc_unregister_driver(&mmc_driver);
1852 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1855 module_init(mmc_blk_init);
1856 module_exit(mmc_blk_exit);
1858 MODULE_LICENSE("GPL");
1859 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");