1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * linux/drivers/mmc/core/mmc_ops.h
5 * Copyright 2006-2007 Pierre Ossman
8 #include <linux/slab.h>
9 #include <linux/export.h>
10 #include <linux/types.h>
11 #include <linux/scatterlist.h>
13 #include <linux/mmc/host.h>
14 #include <linux/mmc/card.h>
15 #include <linux/mmc/mmc.h>
22 #define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10min*/
23 #define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
24 #define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */
26 static const u8 tuning_blk_pattern_4bit
[] = {
27 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
28 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
29 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
30 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
31 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
32 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
33 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
34 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
37 static const u8 tuning_blk_pattern_8bit
[] = {
38 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
39 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
40 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
41 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
42 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
43 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
44 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
45 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
46 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
47 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
48 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
49 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
50 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
51 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
52 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
53 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
56 int __mmc_send_status(struct mmc_card
*card
, u32
*status
, unsigned int retries
)
59 struct mmc_command cmd
= {};
61 cmd
.opcode
= MMC_SEND_STATUS
;
62 if (!mmc_host_is_spi(card
->host
))
63 cmd
.arg
= card
->rca
<< 16;
64 cmd
.flags
= MMC_RSP_SPI_R2
| MMC_RSP_R1
| MMC_CMD_AC
;
66 err
= mmc_wait_for_cmd(card
->host
, &cmd
, retries
);
70 /* NOTE: callers are required to understand the difference
71 * between "native" and SPI format status words!
74 *status
= cmd
.resp
[0];
78 EXPORT_SYMBOL_GPL(__mmc_send_status
);
80 int mmc_send_status(struct mmc_card
*card
, u32
*status
)
82 return __mmc_send_status(card
, status
, MMC_CMD_RETRIES
);
84 EXPORT_SYMBOL_GPL(mmc_send_status
);
86 static int _mmc_select_card(struct mmc_host
*host
, struct mmc_card
*card
)
88 struct mmc_command cmd
= {};
90 cmd
.opcode
= MMC_SELECT_CARD
;
93 cmd
.arg
= card
->rca
<< 16;
94 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
97 cmd
.flags
= MMC_RSP_NONE
| MMC_CMD_AC
;
100 return mmc_wait_for_cmd(host
, &cmd
, MMC_CMD_RETRIES
);
103 int mmc_select_card(struct mmc_card
*card
)
106 return _mmc_select_card(card
->host
, card
);
109 int mmc_deselect_cards(struct mmc_host
*host
)
111 return _mmc_select_card(host
, NULL
);
115 * Write the value specified in the device tree or board code into the optional
116 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
117 * drive strength of the DAT and CMD outputs. The actual meaning of a given
118 * value is hardware dependant.
119 * The presence of the DSR register can be determined from the CSD register,
122 int mmc_set_dsr(struct mmc_host
*host
)
124 struct mmc_command cmd
= {};
126 cmd
.opcode
= MMC_SET_DSR
;
128 cmd
.arg
= (host
->dsr
<< 16) | 0xffff;
129 cmd
.flags
= MMC_RSP_NONE
| MMC_CMD_AC
;
131 return mmc_wait_for_cmd(host
, &cmd
, MMC_CMD_RETRIES
);
134 int mmc_go_idle(struct mmc_host
*host
)
137 struct mmc_command cmd
= {};
140 * Non-SPI hosts need to prevent chipselect going active during
141 * GO_IDLE; that would put chips into SPI mode. Remind them of
142 * that in case of hardware that won't pull up DAT3/nCS otherwise.
144 * SPI hosts ignore ios.chip_select; it's managed according to
145 * rules that must accommodate non-MMC slaves which this layer
146 * won't even know about.
148 if (!mmc_host_is_spi(host
)) {
149 mmc_set_chip_select(host
, MMC_CS_HIGH
);
153 cmd
.opcode
= MMC_GO_IDLE_STATE
;
155 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_NONE
| MMC_CMD_BC
;
157 err
= mmc_wait_for_cmd(host
, &cmd
, 0);
161 if (!mmc_host_is_spi(host
)) {
162 mmc_set_chip_select(host
, MMC_CS_DONTCARE
);
166 host
->use_spi_crc
= 0;
171 int mmc_send_op_cond(struct mmc_host
*host
, u32 ocr
, u32
*rocr
)
173 struct mmc_command cmd
= {};
176 cmd
.opcode
= MMC_SEND_OP_COND
;
177 cmd
.arg
= mmc_host_is_spi(host
) ? 0 : ocr
;
178 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R3
| MMC_CMD_BCR
;
180 for (i
= 100; i
; i
--) {
181 err
= mmc_wait_for_cmd(host
, &cmd
, 0);
185 /* wait until reset completes */
186 if (mmc_host_is_spi(host
)) {
187 if (!(cmd
.resp
[0] & R1_SPI_IDLE
))
190 if (cmd
.resp
[0] & MMC_CARD_BUSY
)
199 * According to eMMC specification v5.1 section 6.4.3, we
200 * should issue CMD1 repeatedly in the idle state until
201 * the eMMC is ready. Otherwise some eMMC devices seem to enter
202 * the inactive mode after mmc_init_card() issued CMD0 when
203 * the eMMC device is busy.
205 if (!ocr
&& !mmc_host_is_spi(host
))
206 cmd
.arg
= cmd
.resp
[0] | BIT(30);
209 if (rocr
&& !mmc_host_is_spi(host
))
215 int mmc_set_relative_addr(struct mmc_card
*card
)
217 struct mmc_command cmd
= {};
219 cmd
.opcode
= MMC_SET_RELATIVE_ADDR
;
220 cmd
.arg
= card
->rca
<< 16;
221 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
223 return mmc_wait_for_cmd(card
->host
, &cmd
, MMC_CMD_RETRIES
);
227 mmc_send_cxd_native(struct mmc_host
*host
, u32 arg
, u32
*cxd
, int opcode
)
230 struct mmc_command cmd
= {};
234 cmd
.flags
= MMC_RSP_R2
| MMC_CMD_AC
;
236 err
= mmc_wait_for_cmd(host
, &cmd
, MMC_CMD_RETRIES
);
240 memcpy(cxd
, cmd
.resp
, sizeof(u32
) * 4);
246 * NOTE: void *buf, caller for the buf is required to use DMA-capable
247 * buffer or on-stack buffer (with some overhead in callee).
250 mmc_send_cxd_data(struct mmc_card
*card
, struct mmc_host
*host
,
251 u32 opcode
, void *buf
, unsigned len
)
253 struct mmc_request mrq
= {};
254 struct mmc_command cmd
= {};
255 struct mmc_data data
= {};
256 struct scatterlist sg
;
264 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
265 * rely on callers to never use this with "native" calls for reading
266 * CSD or CID. Native versions of those commands use the R2 type,
267 * not R1 plus a data block.
269 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
273 data
.flags
= MMC_DATA_READ
;
277 sg_init_one(&sg
, buf
, len
);
279 if (opcode
== MMC_SEND_CSD
|| opcode
== MMC_SEND_CID
) {
281 * The spec states that CSR and CID accesses have a timeout
282 * of 64 clock cycles.
285 data
.timeout_clks
= 64;
287 mmc_set_data_timeout(&data
, card
);
289 mmc_wait_for_req(host
, &mrq
);
299 static int mmc_spi_send_csd(struct mmc_card
*card
, u32
*csd
)
304 csd_tmp
= kzalloc(16, GFP_KERNEL
);
308 ret
= mmc_send_cxd_data(card
, card
->host
, MMC_SEND_CSD
, csd_tmp
, 16);
312 for (i
= 0; i
< 4; i
++)
313 csd
[i
] = be32_to_cpu(csd_tmp
[i
]);
320 int mmc_send_csd(struct mmc_card
*card
, u32
*csd
)
322 if (mmc_host_is_spi(card
->host
))
323 return mmc_spi_send_csd(card
, csd
);
325 return mmc_send_cxd_native(card
->host
, card
->rca
<< 16, csd
,
329 static int mmc_spi_send_cid(struct mmc_host
*host
, u32
*cid
)
334 cid_tmp
= kzalloc(16, GFP_KERNEL
);
338 ret
= mmc_send_cxd_data(NULL
, host
, MMC_SEND_CID
, cid_tmp
, 16);
342 for (i
= 0; i
< 4; i
++)
343 cid
[i
] = be32_to_cpu(cid_tmp
[i
]);
350 int mmc_send_cid(struct mmc_host
*host
, u32
*cid
)
352 if (mmc_host_is_spi(host
))
353 return mmc_spi_send_cid(host
, cid
);
355 return mmc_send_cxd_native(host
, 0, cid
, MMC_ALL_SEND_CID
);
358 int mmc_get_ext_csd(struct mmc_card
*card
, u8
**new_ext_csd
)
363 if (!card
|| !new_ext_csd
)
366 if (!mmc_can_ext_csd(card
))
370 * As the ext_csd is so large and mostly unused, we don't store the
371 * raw block in mmc_card.
373 ext_csd
= kzalloc(512, GFP_KERNEL
);
377 err
= mmc_send_cxd_data(card
, card
->host
, MMC_SEND_EXT_CSD
, ext_csd
,
382 *new_ext_csd
= ext_csd
;
386 EXPORT_SYMBOL_GPL(mmc_get_ext_csd
);
388 int mmc_spi_read_ocr(struct mmc_host
*host
, int highcap
, u32
*ocrp
)
390 struct mmc_command cmd
= {};
393 cmd
.opcode
= MMC_SPI_READ_OCR
;
394 cmd
.arg
= highcap
? (1 << 30) : 0;
395 cmd
.flags
= MMC_RSP_SPI_R3
;
397 err
= mmc_wait_for_cmd(host
, &cmd
, 0);
403 int mmc_spi_set_crc(struct mmc_host
*host
, int use_crc
)
405 struct mmc_command cmd
= {};
408 cmd
.opcode
= MMC_SPI_CRC_ON_OFF
;
409 cmd
.flags
= MMC_RSP_SPI_R1
;
412 err
= mmc_wait_for_cmd(host
, &cmd
, 0);
414 host
->use_spi_crc
= use_crc
;
418 static int mmc_switch_status_error(struct mmc_host
*host
, u32 status
)
420 if (mmc_host_is_spi(host
)) {
421 if (status
& R1_SPI_ILLEGAL_COMMAND
)
424 if (R1_STATUS(status
))
425 pr_warn("%s: unexpected status %#x after switch\n",
426 mmc_hostname(host
), status
);
427 if (status
& R1_SWITCH_ERROR
)
433 /* Caller must hold re-tuning */
434 int __mmc_switch_status(struct mmc_card
*card
, bool crc_err_fatal
)
439 err
= mmc_send_status(card
, &status
);
440 if (!crc_err_fatal
&& err
== -EILSEQ
)
445 return mmc_switch_status_error(card
->host
, status
);
448 int mmc_switch_status(struct mmc_card
*card
)
450 return __mmc_switch_status(card
, true);
453 static int mmc_poll_for_busy(struct mmc_card
*card
, unsigned int timeout_ms
,
454 bool send_status
, bool retry_crc_err
)
456 struct mmc_host
*host
= card
->host
;
458 unsigned long timeout
;
460 bool expired
= false;
464 * In cases when not allowed to poll by using CMD13 or because we aren't
465 * capable of polling by using ->card_busy(), then rely on waiting the
466 * stated timeout to be sufficient.
468 if (!send_status
&& !host
->ops
->card_busy
) {
469 mmc_delay(timeout_ms
);
473 timeout
= jiffies
+ msecs_to_jiffies(timeout_ms
) + 1;
476 * Due to the possibility of being preempted while polling,
477 * check the expiration time first.
479 expired
= time_after(jiffies
, timeout
);
481 if (host
->ops
->card_busy
) {
482 busy
= host
->ops
->card_busy(host
);
484 err
= mmc_send_status(card
, &status
);
485 if (retry_crc_err
&& err
== -EILSEQ
) {
490 err
= mmc_switch_status_error(host
, status
);
493 busy
= R1_CURRENT_STATE(status
) == R1_STATE_PRG
;
497 /* Timeout if the device still remains busy. */
498 if (expired
&& busy
) {
499 pr_err("%s: Card stuck being busy! %s\n",
500 mmc_hostname(host
), __func__
);
509 * __mmc_switch - modify EXT_CSD register
510 * @card: the MMC card associated with the data transfer
511 * @set: cmd set values
512 * @index: EXT_CSD register index
513 * @value: value to program into EXT_CSD register
514 * @timeout_ms: timeout (ms) for operation performed by register write,
515 * timeout of zero implies maximum possible timeout
516 * @timing: new timing to change to
517 * @use_busy_signal: use the busy signal as response type
518 * @send_status: send status cmd to poll for busy
519 * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
521 * Modifies the EXT_CSD register for selected card.
523 int __mmc_switch(struct mmc_card
*card
, u8 set
, u8 index
, u8 value
,
524 unsigned int timeout_ms
, unsigned char timing
,
525 bool use_busy_signal
, bool send_status
, bool retry_crc_err
)
527 struct mmc_host
*host
= card
->host
;
529 struct mmc_command cmd
= {};
530 bool use_r1b_resp
= use_busy_signal
;
531 unsigned char old_timing
= host
->ios
.timing
;
533 mmc_retune_hold(host
);
536 pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
538 timeout_ms
= card
->ext_csd
.generic_cmd6_time
;
542 * If the max_busy_timeout of the host is specified, make sure it's
543 * enough to fit the used timeout_ms. In case it's not, let's instruct
544 * the host to avoid HW busy detection, by converting to a R1 response
547 if (host
->max_busy_timeout
&& (timeout_ms
> host
->max_busy_timeout
))
548 use_r1b_resp
= false;
550 cmd
.opcode
= MMC_SWITCH
;
551 cmd
.arg
= (MMC_SWITCH_MODE_WRITE_BYTE
<< 24) |
555 cmd
.flags
= MMC_CMD_AC
;
557 cmd
.flags
|= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
;
558 cmd
.busy_timeout
= timeout_ms
;
560 cmd
.flags
|= MMC_RSP_SPI_R1
| MMC_RSP_R1
;
563 if (index
== EXT_CSD_SANITIZE_START
)
564 cmd
.sanitize_busy
= true;
566 err
= mmc_wait_for_cmd(host
, &cmd
, MMC_CMD_RETRIES
);
570 /* No need to check card status in case of unblocking command */
571 if (!use_busy_signal
)
574 /*If SPI or used HW busy detection above, then we don't need to poll. */
575 if (((host
->caps
& MMC_CAP_WAIT_WHILE_BUSY
) && use_r1b_resp
) ||
576 mmc_host_is_spi(host
))
579 /* Let's try to poll to find out when the command is completed. */
580 err
= mmc_poll_for_busy(card
, timeout_ms
, send_status
, retry_crc_err
);
585 /* Switch to new timing before check switch status. */
587 mmc_set_timing(host
, timing
);
590 err
= mmc_switch_status(card
);
592 mmc_set_timing(host
, old_timing
);
595 mmc_retune_release(host
);
600 int mmc_switch(struct mmc_card
*card
, u8 set
, u8 index
, u8 value
,
601 unsigned int timeout_ms
)
603 return __mmc_switch(card
, set
, index
, value
, timeout_ms
, 0,
606 EXPORT_SYMBOL_GPL(mmc_switch
);
608 int mmc_send_tuning(struct mmc_host
*host
, u32 opcode
, int *cmd_error
)
610 struct mmc_request mrq
= {};
611 struct mmc_command cmd
= {};
612 struct mmc_data data
= {};
613 struct scatterlist sg
;
614 struct mmc_ios
*ios
= &host
->ios
;
615 const u8
*tuning_block_pattern
;
619 if (ios
->bus_width
== MMC_BUS_WIDTH_8
) {
620 tuning_block_pattern
= tuning_blk_pattern_8bit
;
621 size
= sizeof(tuning_blk_pattern_8bit
);
622 } else if (ios
->bus_width
== MMC_BUS_WIDTH_4
) {
623 tuning_block_pattern
= tuning_blk_pattern_4bit
;
624 size
= sizeof(tuning_blk_pattern_4bit
);
628 data_buf
= kzalloc(size
, GFP_KERNEL
);
636 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_ADTC
;
640 data
.flags
= MMC_DATA_READ
;
643 * According to the tuning specs, Tuning process
644 * is normally shorter 40 executions of CMD19,
645 * and timeout value should be shorter than 150 ms
647 data
.timeout_ns
= 150 * NSEC_PER_MSEC
;
651 sg_init_one(&sg
, data_buf
, size
);
653 mmc_wait_for_req(host
, &mrq
);
656 *cmd_error
= cmd
.error
;
668 if (memcmp(data_buf
, tuning_block_pattern
, size
))
675 EXPORT_SYMBOL_GPL(mmc_send_tuning
);
677 int mmc_abort_tuning(struct mmc_host
*host
, u32 opcode
)
679 struct mmc_command cmd
= {};
682 * eMMC specification specifies that CMD12 can be used to stop a tuning
683 * command, but SD specification does not, so do nothing unless it is
686 if (opcode
!= MMC_SEND_TUNING_BLOCK_HS200
)
689 cmd
.opcode
= MMC_STOP_TRANSMISSION
;
690 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
693 * For drivers that override R1 to R1b, set an arbitrary timeout based
694 * on the tuning timeout i.e. 150ms.
696 cmd
.busy_timeout
= 150;
698 return mmc_wait_for_cmd(host
, &cmd
, 0);
700 EXPORT_SYMBOL_GPL(mmc_abort_tuning
);
703 mmc_send_bus_test(struct mmc_card
*card
, struct mmc_host
*host
, u8 opcode
,
706 struct mmc_request mrq
= {};
707 struct mmc_command cmd
= {};
708 struct mmc_data data
= {};
709 struct scatterlist sg
;
713 static u8 testdata_8bit
[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
714 static u8 testdata_4bit
[4] = { 0x5a, 0, 0, 0 };
716 /* dma onto stack is unsafe/nonportable, but callers to this
717 * routine normally provide temporary on-stack buffers ...
719 data_buf
= kmalloc(len
, GFP_KERNEL
);
724 test_buf
= testdata_8bit
;
726 test_buf
= testdata_4bit
;
728 pr_err("%s: Invalid bus_width %d\n",
729 mmc_hostname(host
), len
);
734 if (opcode
== MMC_BUS_TEST_W
)
735 memcpy(data_buf
, test_buf
, len
);
742 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
743 * rely on callers to never use this with "native" calls for reading
744 * CSD or CID. Native versions of those commands use the R2 type,
745 * not R1 plus a data block.
747 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
751 if (opcode
== MMC_BUS_TEST_R
)
752 data
.flags
= MMC_DATA_READ
;
754 data
.flags
= MMC_DATA_WRITE
;
758 mmc_set_data_timeout(&data
, card
);
759 sg_init_one(&sg
, data_buf
, len
);
760 mmc_wait_for_req(host
, &mrq
);
762 if (opcode
== MMC_BUS_TEST_R
) {
763 for (i
= 0; i
< len
/ 4; i
++)
764 if ((test_buf
[i
] ^ data_buf
[i
]) != 0xff) {
779 int mmc_bus_test(struct mmc_card
*card
, u8 bus_width
)
783 if (bus_width
== MMC_BUS_WIDTH_8
)
785 else if (bus_width
== MMC_BUS_WIDTH_4
)
787 else if (bus_width
== MMC_BUS_WIDTH_1
)
788 return 0; /* no need for test */
793 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
794 * is a problem. This improves chances that the test will work.
796 mmc_send_bus_test(card
, card
->host
, MMC_BUS_TEST_W
, width
);
797 return mmc_send_bus_test(card
, card
->host
, MMC_BUS_TEST_R
, width
);
800 static int mmc_send_hpi_cmd(struct mmc_card
*card
, u32
*status
)
802 struct mmc_command cmd
= {};
806 opcode
= card
->ext_csd
.hpi_cmd
;
807 if (opcode
== MMC_STOP_TRANSMISSION
)
808 cmd
.flags
= MMC_RSP_R1B
| MMC_CMD_AC
;
809 else if (opcode
== MMC_SEND_STATUS
)
810 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
813 cmd
.arg
= card
->rca
<< 16 | 1;
815 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
817 pr_warn("%s: error %d interrupting operation. "
818 "HPI command response %#x\n", mmc_hostname(card
->host
),
823 *status
= cmd
.resp
[0];
829 * mmc_interrupt_hpi - Issue for High priority Interrupt
830 * @card: the MMC card associated with the HPI transfer
832 * Issued High Priority Interrupt, and check for card status
833 * until out-of prg-state.
835 int mmc_interrupt_hpi(struct mmc_card
*card
)
839 unsigned long prg_wait
;
841 if (!card
->ext_csd
.hpi_en
) {
842 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card
->host
));
846 err
= mmc_send_status(card
, &status
);
848 pr_err("%s: Get card status fail\n", mmc_hostname(card
->host
));
852 switch (R1_CURRENT_STATE(status
)) {
858 * In idle and transfer states, HPI is not needed and the caller
859 * can issue the next intended command immediately
865 /* In all other states, it's illegal to issue HPI */
866 pr_debug("%s: HPI cannot be sent. Card state=%d\n",
867 mmc_hostname(card
->host
), R1_CURRENT_STATE(status
));
872 err
= mmc_send_hpi_cmd(card
, &status
);
876 prg_wait
= jiffies
+ msecs_to_jiffies(card
->ext_csd
.out_of_int_time
);
878 err
= mmc_send_status(card
, &status
);
880 if (!err
&& R1_CURRENT_STATE(status
) == R1_STATE_TRAN
)
882 if (time_after(jiffies
, prg_wait
))
890 int mmc_can_ext_csd(struct mmc_card
*card
)
892 return (card
&& card
->csd
.mmca_vsn
> CSD_SPEC_VER_3
);
895 static int mmc_read_bkops_status(struct mmc_card
*card
)
900 err
= mmc_get_ext_csd(card
, &ext_csd
);
904 card
->ext_csd
.raw_bkops_status
= ext_csd
[EXT_CSD_BKOPS_STATUS
];
905 card
->ext_csd
.raw_exception_status
= ext_csd
[EXT_CSD_EXP_EVENTS_STATUS
];
911 * mmc_run_bkops - Run BKOPS for supported cards
912 * @card: MMC card to run BKOPS for
914 * Run background operations synchronously for cards having manual BKOPS
915 * enabled and in case it reports urgent BKOPS level.
917 void mmc_run_bkops(struct mmc_card
*card
)
921 if (!card
->ext_csd
.man_bkops_en
)
924 err
= mmc_read_bkops_status(card
);
926 pr_err("%s: Failed to read bkops status: %d\n",
927 mmc_hostname(card
->host
), err
);
931 if (!card
->ext_csd
.raw_bkops_status
||
932 card
->ext_csd
.raw_bkops_status
< EXT_CSD_BKOPS_LEVEL_2
)
935 mmc_retune_hold(card
->host
);
938 * For urgent BKOPS status, LEVEL_2 and higher, let's execute
939 * synchronously. Future wise, we may consider to start BKOPS, for less
940 * urgent levels by using an asynchronous background task, when idle.
942 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
943 EXT_CSD_BKOPS_START
, 1, MMC_BKOPS_TIMEOUT_MS
);
945 pr_warn("%s: Error %d starting bkops\n",
946 mmc_hostname(card
->host
), err
);
948 mmc_retune_release(card
->host
);
950 EXPORT_SYMBOL(mmc_run_bkops
);
953 * Flush the cache to the non-volatile storage.
955 int mmc_flush_cache(struct mmc_card
*card
)
959 if (mmc_card_mmc(card
) &&
960 (card
->ext_csd
.cache_size
> 0) &&
961 (card
->ext_csd
.cache_ctrl
& 1)) {
962 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
963 EXT_CSD_FLUSH_CACHE
, 1,
964 MMC_CACHE_FLUSH_TIMEOUT_MS
);
966 pr_err("%s: cache flush error %d\n",
967 mmc_hostname(card
->host
), err
);
972 EXPORT_SYMBOL(mmc_flush_cache
);
974 static int mmc_cmdq_switch(struct mmc_card
*card
, bool enable
)
976 u8 val
= enable
? EXT_CSD_CMDQ_MODE_ENABLED
: 0;
979 if (!card
->ext_csd
.cmdq_support
)
982 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
, EXT_CSD_CMDQ_MODE_EN
,
983 val
, card
->ext_csd
.generic_cmd6_time
);
985 card
->ext_csd
.cmdq_en
= enable
;
990 int mmc_cmdq_enable(struct mmc_card
*card
)
992 return mmc_cmdq_switch(card
, true);
994 EXPORT_SYMBOL_GPL(mmc_cmdq_enable
);
996 int mmc_cmdq_disable(struct mmc_card
*card
)
998 return mmc_cmdq_switch(card
, false);
1000 EXPORT_SYMBOL_GPL(mmc_cmdq_disable
);