1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * linux/drivers/mmc/core/mmc_ops.h
5 * Copyright 2006-2007 Pierre Ossman
8 #include <linux/slab.h>
9 #include <linux/export.h>
10 #include <linux/types.h>
11 #include <linux/scatterlist.h>
13 #include <linux/mmc/host.h>
14 #include <linux/mmc/card.h>
15 #include <linux/mmc/mmc.h>
22 #define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
23 #define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */
24 #define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */
26 static const u8 tuning_blk_pattern_4bit
[] = {
27 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
28 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
29 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
30 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
31 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
32 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
33 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
34 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
37 static const u8 tuning_blk_pattern_8bit
[] = {
38 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
39 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
40 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
41 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
42 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
43 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
44 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
45 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
46 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
47 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
48 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
49 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
50 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
51 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
52 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
53 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
56 int __mmc_send_status(struct mmc_card
*card
, u32
*status
, unsigned int retries
)
59 struct mmc_command cmd
= {};
61 cmd
.opcode
= MMC_SEND_STATUS
;
62 if (!mmc_host_is_spi(card
->host
))
63 cmd
.arg
= card
->rca
<< 16;
64 cmd
.flags
= MMC_RSP_SPI_R2
| MMC_RSP_R1
| MMC_CMD_AC
;
66 err
= mmc_wait_for_cmd(card
->host
, &cmd
, retries
);
70 /* NOTE: callers are required to understand the difference
71 * between "native" and SPI format status words!
74 *status
= cmd
.resp
[0];
78 EXPORT_SYMBOL_GPL(__mmc_send_status
);
80 int mmc_send_status(struct mmc_card
*card
, u32
*status
)
82 return __mmc_send_status(card
, status
, MMC_CMD_RETRIES
);
84 EXPORT_SYMBOL_GPL(mmc_send_status
);
86 static int _mmc_select_card(struct mmc_host
*host
, struct mmc_card
*card
)
88 struct mmc_command cmd
= {};
90 cmd
.opcode
= MMC_SELECT_CARD
;
93 cmd
.arg
= card
->rca
<< 16;
94 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
97 cmd
.flags
= MMC_RSP_NONE
| MMC_CMD_AC
;
100 return mmc_wait_for_cmd(host
, &cmd
, MMC_CMD_RETRIES
);
103 int mmc_select_card(struct mmc_card
*card
)
106 return _mmc_select_card(card
->host
, card
);
109 int mmc_deselect_cards(struct mmc_host
*host
)
111 return _mmc_select_card(host
, NULL
);
115 * Write the value specified in the device tree or board code into the optional
116 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
117 * drive strength of the DAT and CMD outputs. The actual meaning of a given
118 * value is hardware dependant.
119 * The presence of the DSR register can be determined from the CSD register,
122 int mmc_set_dsr(struct mmc_host
*host
)
124 struct mmc_command cmd
= {};
126 cmd
.opcode
= MMC_SET_DSR
;
128 cmd
.arg
= (host
->dsr
<< 16) | 0xffff;
129 cmd
.flags
= MMC_RSP_NONE
| MMC_CMD_AC
;
131 return mmc_wait_for_cmd(host
, &cmd
, MMC_CMD_RETRIES
);
134 int mmc_go_idle(struct mmc_host
*host
)
137 struct mmc_command cmd
= {};
140 * Non-SPI hosts need to prevent chipselect going active during
141 * GO_IDLE; that would put chips into SPI mode. Remind them of
142 * that in case of hardware that won't pull up DAT3/nCS otherwise.
144 * SPI hosts ignore ios.chip_select; it's managed according to
145 * rules that must accommodate non-MMC slaves which this layer
146 * won't even know about.
148 if (!mmc_host_is_spi(host
)) {
149 mmc_set_chip_select(host
, MMC_CS_HIGH
);
153 cmd
.opcode
= MMC_GO_IDLE_STATE
;
155 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_NONE
| MMC_CMD_BC
;
157 err
= mmc_wait_for_cmd(host
, &cmd
, 0);
161 if (!mmc_host_is_spi(host
)) {
162 mmc_set_chip_select(host
, MMC_CS_DONTCARE
);
166 host
->use_spi_crc
= 0;
171 int mmc_send_op_cond(struct mmc_host
*host
, u32 ocr
, u32
*rocr
)
173 struct mmc_command cmd
= {};
176 cmd
.opcode
= MMC_SEND_OP_COND
;
177 cmd
.arg
= mmc_host_is_spi(host
) ? 0 : ocr
;
178 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R3
| MMC_CMD_BCR
;
180 for (i
= 100; i
; i
--) {
181 err
= mmc_wait_for_cmd(host
, &cmd
, 0);
185 /* wait until reset completes */
186 if (mmc_host_is_spi(host
)) {
187 if (!(cmd
.resp
[0] & R1_SPI_IDLE
))
190 if (cmd
.resp
[0] & MMC_CARD_BUSY
)
199 * According to eMMC specification v5.1 section 6.4.3, we
200 * should issue CMD1 repeatedly in the idle state until
201 * the eMMC is ready. Otherwise some eMMC devices seem to enter
202 * the inactive mode after mmc_init_card() issued CMD0 when
203 * the eMMC device is busy.
205 if (!ocr
&& !mmc_host_is_spi(host
))
206 cmd
.arg
= cmd
.resp
[0] | BIT(30);
209 if (rocr
&& !mmc_host_is_spi(host
))
215 int mmc_set_relative_addr(struct mmc_card
*card
)
217 struct mmc_command cmd
= {};
219 cmd
.opcode
= MMC_SET_RELATIVE_ADDR
;
220 cmd
.arg
= card
->rca
<< 16;
221 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
223 return mmc_wait_for_cmd(card
->host
, &cmd
, MMC_CMD_RETRIES
);
227 mmc_send_cxd_native(struct mmc_host
*host
, u32 arg
, u32
*cxd
, int opcode
)
230 struct mmc_command cmd
= {};
234 cmd
.flags
= MMC_RSP_R2
| MMC_CMD_AC
;
236 err
= mmc_wait_for_cmd(host
, &cmd
, MMC_CMD_RETRIES
);
240 memcpy(cxd
, cmd
.resp
, sizeof(u32
) * 4);
246 * NOTE: void *buf, caller for the buf is required to use DMA-capable
247 * buffer or on-stack buffer (with some overhead in callee).
250 mmc_send_cxd_data(struct mmc_card
*card
, struct mmc_host
*host
,
251 u32 opcode
, void *buf
, unsigned len
)
253 struct mmc_request mrq
= {};
254 struct mmc_command cmd
= {};
255 struct mmc_data data
= {};
256 struct scatterlist sg
;
264 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
265 * rely on callers to never use this with "native" calls for reading
266 * CSD or CID. Native versions of those commands use the R2 type,
267 * not R1 plus a data block.
269 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
273 data
.flags
= MMC_DATA_READ
;
277 sg_init_one(&sg
, buf
, len
);
279 if (opcode
== MMC_SEND_CSD
|| opcode
== MMC_SEND_CID
) {
281 * The spec states that CSR and CID accesses have a timeout
282 * of 64 clock cycles.
285 data
.timeout_clks
= 64;
287 mmc_set_data_timeout(&data
, card
);
289 mmc_wait_for_req(host
, &mrq
);
299 static int mmc_spi_send_csd(struct mmc_card
*card
, u32
*csd
)
304 csd_tmp
= kzalloc(16, GFP_KERNEL
);
308 ret
= mmc_send_cxd_data(card
, card
->host
, MMC_SEND_CSD
, csd_tmp
, 16);
312 for (i
= 0; i
< 4; i
++)
313 csd
[i
] = be32_to_cpu(csd_tmp
[i
]);
320 int mmc_send_csd(struct mmc_card
*card
, u32
*csd
)
322 if (mmc_host_is_spi(card
->host
))
323 return mmc_spi_send_csd(card
, csd
);
325 return mmc_send_cxd_native(card
->host
, card
->rca
<< 16, csd
,
329 static int mmc_spi_send_cid(struct mmc_host
*host
, u32
*cid
)
334 cid_tmp
= kzalloc(16, GFP_KERNEL
);
338 ret
= mmc_send_cxd_data(NULL
, host
, MMC_SEND_CID
, cid_tmp
, 16);
342 for (i
= 0; i
< 4; i
++)
343 cid
[i
] = be32_to_cpu(cid_tmp
[i
]);
350 int mmc_send_cid(struct mmc_host
*host
, u32
*cid
)
352 if (mmc_host_is_spi(host
))
353 return mmc_spi_send_cid(host
, cid
);
355 return mmc_send_cxd_native(host
, 0, cid
, MMC_ALL_SEND_CID
);
358 int mmc_get_ext_csd(struct mmc_card
*card
, u8
**new_ext_csd
)
363 if (!card
|| !new_ext_csd
)
366 if (!mmc_can_ext_csd(card
))
370 * As the ext_csd is so large and mostly unused, we don't store the
371 * raw block in mmc_card.
373 ext_csd
= kzalloc(512, GFP_KERNEL
);
377 err
= mmc_send_cxd_data(card
, card
->host
, MMC_SEND_EXT_CSD
, ext_csd
,
382 *new_ext_csd
= ext_csd
;
386 EXPORT_SYMBOL_GPL(mmc_get_ext_csd
);
388 int mmc_spi_read_ocr(struct mmc_host
*host
, int highcap
, u32
*ocrp
)
390 struct mmc_command cmd
= {};
393 cmd
.opcode
= MMC_SPI_READ_OCR
;
394 cmd
.arg
= highcap
? (1 << 30) : 0;
395 cmd
.flags
= MMC_RSP_SPI_R3
;
397 err
= mmc_wait_for_cmd(host
, &cmd
, 0);
403 int mmc_spi_set_crc(struct mmc_host
*host
, int use_crc
)
405 struct mmc_command cmd
= {};
408 cmd
.opcode
= MMC_SPI_CRC_ON_OFF
;
409 cmd
.flags
= MMC_RSP_SPI_R1
;
412 err
= mmc_wait_for_cmd(host
, &cmd
, 0);
414 host
->use_spi_crc
= use_crc
;
418 static int mmc_switch_status_error(struct mmc_host
*host
, u32 status
)
420 if (mmc_host_is_spi(host
)) {
421 if (status
& R1_SPI_ILLEGAL_COMMAND
)
424 if (R1_STATUS(status
))
425 pr_warn("%s: unexpected status %#x after switch\n",
426 mmc_hostname(host
), status
);
427 if (status
& R1_SWITCH_ERROR
)
433 /* Caller must hold re-tuning */
434 int mmc_switch_status(struct mmc_card
*card
, bool crc_err_fatal
)
439 err
= mmc_send_status(card
, &status
);
440 if (!crc_err_fatal
&& err
== -EILSEQ
)
445 return mmc_switch_status_error(card
->host
, status
);
448 static int mmc_busy_status(struct mmc_card
*card
, bool retry_crc_err
,
449 enum mmc_busy_cmd busy_cmd
, bool *busy
)
451 struct mmc_host
*host
= card
->host
;
455 if (host
->ops
->card_busy
) {
456 *busy
= host
->ops
->card_busy(host
);
460 err
= mmc_send_status(card
, &status
);
461 if (retry_crc_err
&& err
== -EILSEQ
) {
470 err
= mmc_switch_status_error(card
->host
, status
);
473 err
= R1_STATUS(status
) ? -EIO
: 0;
484 *busy
= !mmc_ready_for_data(status
);
488 static int __mmc_poll_for_busy(struct mmc_card
*card
, unsigned int timeout_ms
,
489 bool send_status
, bool retry_crc_err
,
490 enum mmc_busy_cmd busy_cmd
)
492 struct mmc_host
*host
= card
->host
;
494 unsigned long timeout
;
495 unsigned int udelay
= 32, udelay_max
= 32768;
496 bool expired
= false;
500 * In cases when not allowed to poll by using CMD13 or because we aren't
501 * capable of polling by using ->card_busy(), then rely on waiting the
502 * stated timeout to be sufficient.
504 if (!send_status
&& !host
->ops
->card_busy
) {
505 mmc_delay(timeout_ms
);
509 timeout
= jiffies
+ msecs_to_jiffies(timeout_ms
) + 1;
512 * Due to the possibility of being preempted while polling,
513 * check the expiration time first.
515 expired
= time_after(jiffies
, timeout
);
517 err
= mmc_busy_status(card
, retry_crc_err
, busy_cmd
, &busy
);
521 /* Timeout if the device still remains busy. */
522 if (expired
&& busy
) {
523 pr_err("%s: Card stuck being busy! %s\n",
524 mmc_hostname(host
), __func__
);
528 /* Throttle the polling rate to avoid hogging the CPU. */
530 usleep_range(udelay
, udelay
* 2);
531 if (udelay
< udelay_max
)
539 int mmc_poll_for_busy(struct mmc_card
*card
, unsigned int timeout_ms
,
540 enum mmc_busy_cmd busy_cmd
)
542 return __mmc_poll_for_busy(card
, timeout_ms
, true, false, busy_cmd
);
546 * __mmc_switch - modify EXT_CSD register
547 * @card: the MMC card associated with the data transfer
548 * @set: cmd set values
549 * @index: EXT_CSD register index
550 * @value: value to program into EXT_CSD register
551 * @timeout_ms: timeout (ms) for operation performed by register write,
552 * timeout of zero implies maximum possible timeout
553 * @timing: new timing to change to
554 * @send_status: send status cmd to poll for busy
555 * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
557 * Modifies the EXT_CSD register for selected card.
559 int __mmc_switch(struct mmc_card
*card
, u8 set
, u8 index
, u8 value
,
560 unsigned int timeout_ms
, unsigned char timing
,
561 bool send_status
, bool retry_crc_err
)
563 struct mmc_host
*host
= card
->host
;
565 struct mmc_command cmd
= {};
566 bool use_r1b_resp
= true;
567 unsigned char old_timing
= host
->ios
.timing
;
569 mmc_retune_hold(host
);
572 pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
574 timeout_ms
= card
->ext_csd
.generic_cmd6_time
;
578 * If the max_busy_timeout of the host is specified, make sure it's
579 * enough to fit the used timeout_ms. In case it's not, let's instruct
580 * the host to avoid HW busy detection, by converting to a R1 response
581 * instead of a R1B. Note, some hosts requires R1B, which also means
582 * they are on their own when it comes to deal with the busy timeout.
584 if (!(host
->caps
& MMC_CAP_NEED_RSP_BUSY
) && host
->max_busy_timeout
&&
585 (timeout_ms
> host
->max_busy_timeout
))
586 use_r1b_resp
= false;
588 cmd
.opcode
= MMC_SWITCH
;
589 cmd
.arg
= (MMC_SWITCH_MODE_WRITE_BYTE
<< 24) |
593 cmd
.flags
= MMC_CMD_AC
;
595 cmd
.flags
|= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
;
596 cmd
.busy_timeout
= timeout_ms
;
598 cmd
.flags
|= MMC_RSP_SPI_R1
| MMC_RSP_R1
;
601 err
= mmc_wait_for_cmd(host
, &cmd
, MMC_CMD_RETRIES
);
605 /*If SPI or used HW busy detection above, then we don't need to poll. */
606 if (((host
->caps
& MMC_CAP_WAIT_WHILE_BUSY
) && use_r1b_resp
) ||
607 mmc_host_is_spi(host
))
610 /* Let's try to poll to find out when the command is completed. */
611 err
= __mmc_poll_for_busy(card
, timeout_ms
, send_status
, retry_crc_err
,
617 /* Switch to new timing before check switch status. */
619 mmc_set_timing(host
, timing
);
622 err
= mmc_switch_status(card
, true);
624 mmc_set_timing(host
, old_timing
);
627 mmc_retune_release(host
);
632 int mmc_switch(struct mmc_card
*card
, u8 set
, u8 index
, u8 value
,
633 unsigned int timeout_ms
)
635 return __mmc_switch(card
, set
, index
, value
, timeout_ms
, 0,
638 EXPORT_SYMBOL_GPL(mmc_switch
);
640 int mmc_send_tuning(struct mmc_host
*host
, u32 opcode
, int *cmd_error
)
642 struct mmc_request mrq
= {};
643 struct mmc_command cmd
= {};
644 struct mmc_data data
= {};
645 struct scatterlist sg
;
646 struct mmc_ios
*ios
= &host
->ios
;
647 const u8
*tuning_block_pattern
;
651 if (ios
->bus_width
== MMC_BUS_WIDTH_8
) {
652 tuning_block_pattern
= tuning_blk_pattern_8bit
;
653 size
= sizeof(tuning_blk_pattern_8bit
);
654 } else if (ios
->bus_width
== MMC_BUS_WIDTH_4
) {
655 tuning_block_pattern
= tuning_blk_pattern_4bit
;
656 size
= sizeof(tuning_blk_pattern_4bit
);
660 data_buf
= kzalloc(size
, GFP_KERNEL
);
668 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_ADTC
;
672 data
.flags
= MMC_DATA_READ
;
675 * According to the tuning specs, Tuning process
676 * is normally shorter 40 executions of CMD19,
677 * and timeout value should be shorter than 150 ms
679 data
.timeout_ns
= 150 * NSEC_PER_MSEC
;
683 sg_init_one(&sg
, data_buf
, size
);
685 mmc_wait_for_req(host
, &mrq
);
688 *cmd_error
= cmd
.error
;
700 if (memcmp(data_buf
, tuning_block_pattern
, size
))
707 EXPORT_SYMBOL_GPL(mmc_send_tuning
);
709 int mmc_abort_tuning(struct mmc_host
*host
, u32 opcode
)
711 struct mmc_command cmd
= {};
714 * eMMC specification specifies that CMD12 can be used to stop a tuning
715 * command, but SD specification does not, so do nothing unless it is
718 if (opcode
!= MMC_SEND_TUNING_BLOCK_HS200
)
721 cmd
.opcode
= MMC_STOP_TRANSMISSION
;
722 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
725 * For drivers that override R1 to R1b, set an arbitrary timeout based
726 * on the tuning timeout i.e. 150ms.
728 cmd
.busy_timeout
= 150;
730 return mmc_wait_for_cmd(host
, &cmd
, 0);
732 EXPORT_SYMBOL_GPL(mmc_abort_tuning
);
735 mmc_send_bus_test(struct mmc_card
*card
, struct mmc_host
*host
, u8 opcode
,
738 struct mmc_request mrq
= {};
739 struct mmc_command cmd
= {};
740 struct mmc_data data
= {};
741 struct scatterlist sg
;
745 static u8 testdata_8bit
[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
746 static u8 testdata_4bit
[4] = { 0x5a, 0, 0, 0 };
748 /* dma onto stack is unsafe/nonportable, but callers to this
749 * routine normally provide temporary on-stack buffers ...
751 data_buf
= kmalloc(len
, GFP_KERNEL
);
756 test_buf
= testdata_8bit
;
758 test_buf
= testdata_4bit
;
760 pr_err("%s: Invalid bus_width %d\n",
761 mmc_hostname(host
), len
);
766 if (opcode
== MMC_BUS_TEST_W
)
767 memcpy(data_buf
, test_buf
, len
);
774 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
775 * rely on callers to never use this with "native" calls for reading
776 * CSD or CID. Native versions of those commands use the R2 type,
777 * not R1 plus a data block.
779 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
783 if (opcode
== MMC_BUS_TEST_R
)
784 data
.flags
= MMC_DATA_READ
;
786 data
.flags
= MMC_DATA_WRITE
;
790 mmc_set_data_timeout(&data
, card
);
791 sg_init_one(&sg
, data_buf
, len
);
792 mmc_wait_for_req(host
, &mrq
);
794 if (opcode
== MMC_BUS_TEST_R
) {
795 for (i
= 0; i
< len
/ 4; i
++)
796 if ((test_buf
[i
] ^ data_buf
[i
]) != 0xff) {
811 int mmc_bus_test(struct mmc_card
*card
, u8 bus_width
)
815 if (bus_width
== MMC_BUS_WIDTH_8
)
817 else if (bus_width
== MMC_BUS_WIDTH_4
)
819 else if (bus_width
== MMC_BUS_WIDTH_1
)
820 return 0; /* no need for test */
825 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
826 * is a problem. This improves chances that the test will work.
828 mmc_send_bus_test(card
, card
->host
, MMC_BUS_TEST_W
, width
);
829 return mmc_send_bus_test(card
, card
->host
, MMC_BUS_TEST_R
, width
);
832 static int mmc_send_hpi_cmd(struct mmc_card
*card
)
834 unsigned int busy_timeout_ms
= card
->ext_csd
.out_of_int_time
;
835 struct mmc_host
*host
= card
->host
;
836 bool use_r1b_resp
= true;
837 struct mmc_command cmd
= {};
840 cmd
.opcode
= card
->ext_csd
.hpi_cmd
;
841 cmd
.arg
= card
->rca
<< 16 | 1;
844 * Make sure the host's max_busy_timeout fit the needed timeout for HPI.
845 * In case it doesn't, let's instruct the host to avoid HW busy
846 * detection, by using a R1 response instead of R1B.
848 if (host
->max_busy_timeout
&& busy_timeout_ms
> host
->max_busy_timeout
)
849 use_r1b_resp
= false;
851 if (cmd
.opcode
== MMC_STOP_TRANSMISSION
&& use_r1b_resp
) {
852 cmd
.flags
= MMC_RSP_R1B
| MMC_CMD_AC
;
853 cmd
.busy_timeout
= busy_timeout_ms
;
855 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
856 use_r1b_resp
= false;
859 err
= mmc_wait_for_cmd(host
, &cmd
, 0);
861 pr_warn("%s: HPI error %d. Command response %#x\n",
862 mmc_hostname(host
), err
, cmd
.resp
[0]);
866 /* No need to poll when using HW busy detection. */
867 if (host
->caps
& MMC_CAP_WAIT_WHILE_BUSY
&& use_r1b_resp
)
870 /* Let's poll to find out when the HPI request completes. */
871 return mmc_poll_for_busy(card
, busy_timeout_ms
, MMC_BUSY_HPI
);
875 * mmc_interrupt_hpi - Issue for High priority Interrupt
876 * @card: the MMC card associated with the HPI transfer
878 * Issued High Priority Interrupt, and check for card status
879 * until out-of prg-state.
881 static int mmc_interrupt_hpi(struct mmc_card
*card
)
886 if (!card
->ext_csd
.hpi_en
) {
887 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card
->host
));
891 err
= mmc_send_status(card
, &status
);
893 pr_err("%s: Get card status fail\n", mmc_hostname(card
->host
));
897 switch (R1_CURRENT_STATE(status
)) {
903 * In idle and transfer states, HPI is not needed and the caller
904 * can issue the next intended command immediately
910 /* In all other states, it's illegal to issue HPI */
911 pr_debug("%s: HPI cannot be sent. Card state=%d\n",
912 mmc_hostname(card
->host
), R1_CURRENT_STATE(status
));
917 err
= mmc_send_hpi_cmd(card
);
922 int mmc_can_ext_csd(struct mmc_card
*card
)
924 return (card
&& card
->csd
.mmca_vsn
> CSD_SPEC_VER_3
);
927 static int mmc_read_bkops_status(struct mmc_card
*card
)
932 err
= mmc_get_ext_csd(card
, &ext_csd
);
936 card
->ext_csd
.raw_bkops_status
= ext_csd
[EXT_CSD_BKOPS_STATUS
];
937 card
->ext_csd
.raw_exception_status
= ext_csd
[EXT_CSD_EXP_EVENTS_STATUS
];
943 * mmc_run_bkops - Run BKOPS for supported cards
944 * @card: MMC card to run BKOPS for
946 * Run background operations synchronously for cards having manual BKOPS
947 * enabled and in case it reports urgent BKOPS level.
949 void mmc_run_bkops(struct mmc_card
*card
)
953 if (!card
->ext_csd
.man_bkops_en
)
956 err
= mmc_read_bkops_status(card
);
958 pr_err("%s: Failed to read bkops status: %d\n",
959 mmc_hostname(card
->host
), err
);
963 if (!card
->ext_csd
.raw_bkops_status
||
964 card
->ext_csd
.raw_bkops_status
< EXT_CSD_BKOPS_LEVEL_2
)
967 mmc_retune_hold(card
->host
);
970 * For urgent BKOPS status, LEVEL_2 and higher, let's execute
971 * synchronously. Future wise, we may consider to start BKOPS, for less
972 * urgent levels by using an asynchronous background task, when idle.
974 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
975 EXT_CSD_BKOPS_START
, 1, MMC_BKOPS_TIMEOUT_MS
);
977 pr_warn("%s: Error %d starting bkops\n",
978 mmc_hostname(card
->host
), err
);
980 mmc_retune_release(card
->host
);
982 EXPORT_SYMBOL(mmc_run_bkops
);
985 * Flush the cache to the non-volatile storage.
987 int mmc_flush_cache(struct mmc_card
*card
)
991 if (mmc_card_mmc(card
) &&
992 (card
->ext_csd
.cache_size
> 0) &&
993 (card
->ext_csd
.cache_ctrl
& 1)) {
994 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
995 EXT_CSD_FLUSH_CACHE
, 1,
996 MMC_CACHE_FLUSH_TIMEOUT_MS
);
998 pr_err("%s: cache flush error %d\n",
999 mmc_hostname(card
->host
), err
);
1004 EXPORT_SYMBOL(mmc_flush_cache
);
1006 static int mmc_cmdq_switch(struct mmc_card
*card
, bool enable
)
1008 u8 val
= enable
? EXT_CSD_CMDQ_MODE_ENABLED
: 0;
1011 if (!card
->ext_csd
.cmdq_support
)
1014 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
, EXT_CSD_CMDQ_MODE_EN
,
1015 val
, card
->ext_csd
.generic_cmd6_time
);
1017 card
->ext_csd
.cmdq_en
= enable
;
1022 int mmc_cmdq_enable(struct mmc_card
*card
)
1024 return mmc_cmdq_switch(card
, true);
1026 EXPORT_SYMBOL_GPL(mmc_cmdq_enable
);
1028 int mmc_cmdq_disable(struct mmc_card
*card
)
1030 return mmc_cmdq_switch(card
, false);
1032 EXPORT_SYMBOL_GPL(mmc_cmdq_disable
);
1034 int mmc_sanitize(struct mmc_card
*card
)
1036 struct mmc_host
*host
= card
->host
;
1039 if (!mmc_can_sanitize(card
)) {
1040 pr_warn("%s: Sanitize not supported\n", mmc_hostname(host
));
1044 pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host
));
1046 mmc_retune_hold(host
);
1048 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
, EXT_CSD_SANITIZE_START
,
1049 1, MMC_SANITIZE_TIMEOUT_MS
);
1051 pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host
), err
);
1054 * If the sanitize operation timed out, the card is probably still busy
1055 * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort
1056 * it with a HPI command to get back into R1_STATE_TRAN.
1058 if (err
== -ETIMEDOUT
&& !mmc_interrupt_hpi(card
))
1059 pr_warn("%s: Sanitize aborted\n", mmc_hostname(host
));
1061 mmc_retune_release(host
);
1063 pr_debug("%s: Sanitize completed\n", mmc_hostname(host
));
1066 EXPORT_SYMBOL_GPL(mmc_sanitize
);