1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * linux/drivers/mmc/host/sdhci_uhs2.c - Secure Digital Host Controller
6 * Copyright (C) 2014 Intel Corp, All Rights Reserved.
7 * Copyright (C) 2020 Genesys Logic, Inc.
8 * Authors: Ben Chuang <ben.chuang@genesyslogic.com.tw>
9 * Copyright (C) 2020 Linaro Limited
10 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
13 #include <linux/delay.h>
14 #include <linux/module.h>
15 #include <linux/iopoll.h>
16 #include <linux/bitfield.h>
17 #include <linux/regulator/consumer.h>
18 #include <linux/mmc/mmc.h>
19 #include <linux/mmc/host.h>
22 #include "sdhci-uhs2.h"
24 #define DRIVER_NAME "sdhci_uhs2"
25 #define DBG(f, x...) \
26 pr_debug(DRIVER_NAME " [%s()]: " f, __func__, ## x)
27 #define SDHCI_UHS2_DUMP(f, x...) \
28 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
30 #define UHS2_RESET_TIMEOUT_100MS 100000
31 #define UHS2_CHECK_DORMANT_TIMEOUT_100MS 100000
32 #define UHS2_INTERFACE_DETECT_TIMEOUT_100MS 100000
33 #define UHS2_LANE_SYNC_TIMEOUT_150MS 150000
35 #define UHS2_ARG_IOADR_MASK 0xfff
37 void sdhci_uhs2_dump_regs(struct sdhci_host
*host
)
39 if (!(mmc_card_uhs2(host
->mmc
)))
42 SDHCI_UHS2_DUMP("==================== UHS2 ==================\n");
43 SDHCI_UHS2_DUMP("Blk Size: 0x%08x | Blk Cnt: 0x%08x\n",
44 sdhci_readw(host
, SDHCI_UHS2_BLOCK_SIZE
),
45 sdhci_readl(host
, SDHCI_UHS2_BLOCK_COUNT
));
46 SDHCI_UHS2_DUMP("Cmd: 0x%08x | Trn mode: 0x%08x\n",
47 sdhci_readw(host
, SDHCI_UHS2_CMD
),
48 sdhci_readw(host
, SDHCI_UHS2_TRANS_MODE
));
49 SDHCI_UHS2_DUMP("Int Stat: 0x%08x | Dev Sel : 0x%08x\n",
50 sdhci_readw(host
, SDHCI_UHS2_DEV_INT_STATUS
),
51 sdhci_readb(host
, SDHCI_UHS2_DEV_SELECT
));
52 SDHCI_UHS2_DUMP("Dev Int Code: 0x%08x\n",
53 sdhci_readb(host
, SDHCI_UHS2_DEV_INT_CODE
));
54 SDHCI_UHS2_DUMP("Reset: 0x%08x | Timer: 0x%08x\n",
55 sdhci_readw(host
, SDHCI_UHS2_SW_RESET
),
56 sdhci_readw(host
, SDHCI_UHS2_TIMER_CTRL
));
57 SDHCI_UHS2_DUMP("ErrInt: 0x%08x | ErrIntEn: 0x%08x\n",
58 sdhci_readl(host
, SDHCI_UHS2_INT_STATUS
),
59 sdhci_readl(host
, SDHCI_UHS2_INT_STATUS_ENABLE
));
60 SDHCI_UHS2_DUMP("ErrSigEn: 0x%08x\n",
61 sdhci_readl(host
, SDHCI_UHS2_INT_SIGNAL_ENABLE
));
63 EXPORT_SYMBOL_GPL(sdhci_uhs2_dump_regs
);
65 /*****************************************************************************\
67 * Low level functions *
69 \*****************************************************************************/
71 static inline u16
uhs2_dev_cmd(struct mmc_command
*cmd
)
73 return be16_to_cpu((__force __be16
)cmd
->uhs2_cmd
->arg
) & UHS2_ARG_IOADR_MASK
;
76 static inline int mmc_opt_regulator_set_ocr(struct mmc_host
*mmc
,
77 struct regulator
*supply
,
78 unsigned short vdd_bit
)
80 return IS_ERR_OR_NULL(supply
) ? 0 : mmc_regulator_set_ocr(mmc
, supply
, vdd_bit
);
84 * sdhci_uhs2_reset - invoke SW reset
88 * Invoke SW reset, depending on a bit in @mask and wait for completion.
90 void sdhci_uhs2_reset(struct sdhci_host
*host
, u16 mask
)
94 sdhci_writew(host
, mask
, SDHCI_UHS2_SW_RESET
);
96 if (mask
& SDHCI_UHS2_SW_RESET_FULL
)
99 /* hw clears the bit when it's done */
100 if (read_poll_timeout_atomic(sdhci_readw
, val
, !(val
& mask
), 10,
101 UHS2_RESET_TIMEOUT_100MS
, true, host
, SDHCI_UHS2_SW_RESET
)) {
102 pr_warn("%s: %s: Reset 0x%x never completed. %s: clean reset bit.\n", __func__
,
103 mmc_hostname(host
->mmc
), (int)mask
, mmc_hostname(host
->mmc
));
104 sdhci_writeb(host
, 0, SDHCI_UHS2_SW_RESET
);
108 EXPORT_SYMBOL_GPL(sdhci_uhs2_reset
);
110 static void sdhci_uhs2_reset_cmd_data(struct sdhci_host
*host
)
112 sdhci_do_reset(host
, SDHCI_RESET_CMD
| SDHCI_RESET_DATA
);
114 if (host
->mmc
->uhs2_sd_tran
) {
115 sdhci_uhs2_reset(host
, SDHCI_UHS2_SW_RESET_SD
);
117 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
118 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
119 sdhci_uhs2_clear_set_irqs(host
, SDHCI_INT_ALL_MASK
, SDHCI_UHS2_INT_ERROR_MASK
);
123 void sdhci_uhs2_set_power(struct sdhci_host
*host
, unsigned char mode
, unsigned short vdd
)
125 struct mmc_host
*mmc
= host
->mmc
;
128 if (mode
!= MMC_POWER_OFF
) {
129 pwr
= sdhci_get_vdd_value(vdd
);
131 WARN(1, "%s: Invalid vdd %#x\n",
132 mmc_hostname(host
->mmc
), vdd
);
133 pwr
|= SDHCI_VDD2_POWER_180
;
136 if (host
->pwr
== pwr
)
141 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
143 mmc_opt_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
144 mmc_regulator_set_vqmmc2(mmc
, &mmc
->ios
);
146 mmc_opt_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, vdd
);
147 /* support 1.8v only for now */
148 mmc_regulator_set_vqmmc2(mmc
, &mmc
->ios
);
150 /* Clear the power reg before setting a new value */
151 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
154 pwr
|= SDHCI_POWER_ON
;
155 sdhci_writeb(host
, pwr
& 0xf, SDHCI_POWER_CONTROL
);
158 pwr
|= SDHCI_VDD2_POWER_ON
;
159 sdhci_writeb(host
, pwr
, SDHCI_POWER_CONTROL
);
163 EXPORT_SYMBOL_GPL(sdhci_uhs2_set_power
);
165 static u8
sdhci_calc_timeout_uhs2(struct sdhci_host
*host
, u8
*cmd_res
, u8
*dead_lock
)
168 unsigned int dead_lock_timeout
= 1 * 1000 * 1000;
169 unsigned int cmd_res_timeout
= 5 * 1000;
170 unsigned int current_timeout
;
174 * Figure out needed cycles.
175 * We do this in steps in order to fit inside a 32 bit int.
176 * The first step is the minimum timeout, which will have a
177 * minimum resolution of 6 bits:
178 * (1) 2^13*1000 > 2^22,
179 * (2) host->timeout_clk < 2^16
184 current_timeout
= (1 << 13) * 1000 / host
->timeout_clk
;
185 while (current_timeout
< cmd_res_timeout
) {
187 current_timeout
<<= 1;
193 DBG("%s: Too large timeout 0x%x requested for CMD_RES!\n",
194 mmc_hostname(host
->mmc
), count
);
200 current_timeout
= (1 << 13) * 1000 / host
->timeout_clk
;
201 while (current_timeout
< dead_lock_timeout
) {
203 current_timeout
<<= 1;
209 DBG("%s: Too large timeout 0x%x requested for DEADLOCK!\n",
210 mmc_hostname(host
->mmc
), count
);
218 static void __sdhci_uhs2_set_timeout(struct sdhci_host
*host
)
220 u8 cmd_res
, dead_lock
;
222 sdhci_calc_timeout_uhs2(host
, &cmd_res
, &dead_lock
);
223 cmd_res
|= FIELD_PREP(SDHCI_UHS2_TIMER_CTRL_DEADLOCK_MASK
, dead_lock
);
224 sdhci_writeb(host
, cmd_res
, SDHCI_UHS2_TIMER_CTRL
);
227 void sdhci_uhs2_set_timeout(struct sdhci_host
*host
, struct mmc_command
*cmd
)
229 __sdhci_set_timeout(host
, cmd
);
231 if (mmc_card_uhs2(host
->mmc
))
232 __sdhci_uhs2_set_timeout(host
);
234 EXPORT_SYMBOL_GPL(sdhci_uhs2_set_timeout
);
237 * sdhci_uhs2_clear_set_irqs - set Error Interrupt Status Enable register
239 * @clear: bit-wise clear mask
240 * @set: bit-wise set mask
242 * Set/unset bits in UHS-II Error Interrupt Status Enable register
244 void sdhci_uhs2_clear_set_irqs(struct sdhci_host
*host
, u32 clear
, u32 set
)
248 ier
= sdhci_readl(host
, SDHCI_UHS2_INT_STATUS_ENABLE
);
251 sdhci_writel(host
, ier
, SDHCI_UHS2_INT_STATUS_ENABLE
);
252 sdhci_writel(host
, ier
, SDHCI_UHS2_INT_SIGNAL_ENABLE
);
254 EXPORT_SYMBOL_GPL(sdhci_uhs2_clear_set_irqs
);
256 static void __sdhci_uhs2_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
258 struct sdhci_host
*host
= mmc_priv(mmc
);
259 u8 cmd_res
, dead_lock
;
262 /* UHS2 Timeout Control */
263 sdhci_calc_timeout_uhs2(host
, &cmd_res
, &dead_lock
);
265 /* change to use calculate value */
266 cmd_res
|= FIELD_PREP(SDHCI_UHS2_TIMER_CTRL_DEADLOCK_MASK
, dead_lock
);
268 sdhci_uhs2_clear_set_irqs(host
,
269 SDHCI_UHS2_INT_CMD_TIMEOUT
|
270 SDHCI_UHS2_INT_DEADLOCK_TIMEOUT
,
272 sdhci_writeb(host
, cmd_res
, SDHCI_UHS2_TIMER_CTRL
);
273 sdhci_uhs2_clear_set_irqs(host
, 0,
274 SDHCI_UHS2_INT_CMD_TIMEOUT
|
275 SDHCI_UHS2_INT_DEADLOCK_TIMEOUT
);
277 /* UHS2 timing. Note, UHS2 timing is disabled when powering off */
278 ctrl_2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
279 if (ios
->power_mode
!= MMC_POWER_OFF
&&
280 (ios
->timing
== MMC_TIMING_UHS2_SPEED_A
||
281 ios
->timing
== MMC_TIMING_UHS2_SPEED_A_HD
||
282 ios
->timing
== MMC_TIMING_UHS2_SPEED_B
||
283 ios
->timing
== MMC_TIMING_UHS2_SPEED_B_HD
))
284 ctrl_2
|= SDHCI_CTRL_UHS2
| SDHCI_CTRL_UHS2_ENABLE
;
286 ctrl_2
&= ~(SDHCI_CTRL_UHS2
| SDHCI_CTRL_UHS2_ENABLE
);
287 sdhci_writew(host
, ctrl_2
, SDHCI_HOST_CONTROL2
);
288 host
->timing
= ios
->timing
;
290 if (!(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
))
291 sdhci_enable_preset_value(host
, true);
293 if (host
->ops
->set_power
)
294 host
->ops
->set_power(host
, ios
->power_mode
, ios
->vdd
);
296 sdhci_uhs2_set_power(host
, ios
->power_mode
, ios
->vdd
);
298 sdhci_set_clock(host
, host
->clock
);
301 static int sdhci_uhs2_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
303 struct sdhci_host
*host
= mmc_priv(mmc
);
305 pr_debug("%s: clock %uHz powermode %u Vdd %u timing %u\n",
306 mmc_hostname(mmc
), ios
->clock
, ios
->power_mode
, ios
->vdd
, ios
->timing
);
308 if (!mmc_card_uhs2(mmc
)) {
309 sdhci_set_ios(mmc
, ios
);
313 if (ios
->power_mode
== MMC_POWER_UNDEFINED
)
316 if (host
->flags
& SDHCI_DEVICE_DEAD
) {
317 if (ios
->power_mode
== MMC_POWER_OFF
) {
318 mmc_opt_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
319 mmc_regulator_set_vqmmc2(mmc
, ios
);
324 sdhci_set_ios_common(mmc
, ios
);
326 __sdhci_uhs2_set_ios(mmc
, ios
);
331 static int sdhci_uhs2_interface_detect(struct sdhci_host
*host
)
335 if (read_poll_timeout(sdhci_readl
, val
, (val
& SDHCI_UHS2_IF_DETECT
),
336 100, UHS2_INTERFACE_DETECT_TIMEOUT_100MS
, true,
337 host
, SDHCI_PRESENT_STATE
)) {
338 pr_warn("%s: not detect UHS2 interface in 100ms.\n", mmc_hostname(host
->mmc
));
339 sdhci_dumpregs(host
);
343 /* Enable UHS2 error interrupts */
344 sdhci_uhs2_clear_set_irqs(host
, SDHCI_INT_ALL_MASK
, SDHCI_UHS2_INT_ERROR_MASK
);
346 if (read_poll_timeout(sdhci_readl
, val
, (val
& SDHCI_UHS2_LANE_SYNC
),
347 100, UHS2_LANE_SYNC_TIMEOUT_150MS
, true, host
, SDHCI_PRESENT_STATE
)) {
348 pr_warn("%s: UHS2 Lane sync fail in 150ms.\n", mmc_hostname(host
->mmc
));
349 sdhci_dumpregs(host
);
353 DBG("%s: UHS2 Lane synchronized in UHS2 mode, PHY is initialized.\n",
354 mmc_hostname(host
->mmc
));
358 static int sdhci_uhs2_init(struct sdhci_host
*host
)
363 u32 caps_tran
[2] = {0, 0};
364 struct mmc_host
*mmc
= host
->mmc
;
366 caps_ptr
= sdhci_readw(host
, SDHCI_UHS2_CAPS_PTR
);
367 if (caps_ptr
< 0x100 || caps_ptr
> 0x1FF) {
368 pr_err("%s: SDHCI_UHS2_CAPS_PTR(%d) is wrong.\n",
369 mmc_hostname(mmc
), caps_ptr
);
372 caps_gen
= sdhci_readl(host
, caps_ptr
+ SDHCI_UHS2_CAPS_OFFSET
);
373 caps_phy
= sdhci_readl(host
, caps_ptr
+ SDHCI_UHS2_CAPS_PHY_OFFSET
);
374 caps_tran
[0] = sdhci_readl(host
, caps_ptr
+ SDHCI_UHS2_CAPS_TRAN_OFFSET
);
375 caps_tran
[1] = sdhci_readl(host
, caps_ptr
+ SDHCI_UHS2_CAPS_TRAN_1_OFFSET
);
378 mmc
->uhs2_caps
.dap
= caps_gen
& SDHCI_UHS2_CAPS_DAP_MASK
;
379 mmc
->uhs2_caps
.gap
= FIELD_GET(SDHCI_UHS2_CAPS_GAP_MASK
, caps_gen
);
380 mmc
->uhs2_caps
.n_lanes
= FIELD_GET(SDHCI_UHS2_CAPS_LANE_MASK
, caps_gen
);
381 mmc
->uhs2_caps
.addr64
= (caps_gen
& SDHCI_UHS2_CAPS_ADDR_64
) ? 1 : 0;
382 mmc
->uhs2_caps
.card_type
= FIELD_GET(SDHCI_UHS2_CAPS_DEV_TYPE_MASK
, caps_gen
);
385 mmc
->uhs2_caps
.phy_rev
= caps_phy
& SDHCI_UHS2_CAPS_PHY_REV_MASK
;
386 mmc
->uhs2_caps
.speed_range
= FIELD_GET(SDHCI_UHS2_CAPS_PHY_RANGE_MASK
, caps_phy
);
387 mmc
->uhs2_caps
.n_lss_sync
= FIELD_GET(SDHCI_UHS2_CAPS_PHY_N_LSS_SYN_MASK
, caps_phy
);
388 mmc
->uhs2_caps
.n_lss_dir
= FIELD_GET(SDHCI_UHS2_CAPS_PHY_N_LSS_DIR_MASK
, caps_phy
);
389 if (mmc
->uhs2_caps
.n_lss_sync
== 0)
390 mmc
->uhs2_caps
.n_lss_sync
= 16 << 2;
392 mmc
->uhs2_caps
.n_lss_sync
<<= 2;
393 if (mmc
->uhs2_caps
.n_lss_dir
== 0)
394 mmc
->uhs2_caps
.n_lss_dir
= 16 << 3;
396 mmc
->uhs2_caps
.n_lss_dir
<<= 3;
399 mmc
->uhs2_caps
.link_rev
= caps_tran
[0] & SDHCI_UHS2_CAPS_TRAN_LINK_REV_MASK
;
400 mmc
->uhs2_caps
.n_fcu
= FIELD_GET(SDHCI_UHS2_CAPS_TRAN_N_FCU_MASK
, caps_tran
[0]);
401 if (mmc
->uhs2_caps
.n_fcu
== 0)
402 mmc
->uhs2_caps
.n_fcu
= 256;
403 mmc
->uhs2_caps
.host_type
= FIELD_GET(SDHCI_UHS2_CAPS_TRAN_HOST_TYPE_MASK
, caps_tran
[0]);
404 mmc
->uhs2_caps
.maxblk_len
= FIELD_GET(SDHCI_UHS2_CAPS_TRAN_BLK_LEN_MASK
, caps_tran
[0]);
405 mmc
->uhs2_caps
.n_data_gap
= caps_tran
[1] & SDHCI_UHS2_CAPS_TRAN_1_N_DATA_GAP_MASK
;
410 static int sdhci_uhs2_do_detect_init(struct mmc_host
*mmc
)
412 struct sdhci_host
*host
= mmc_priv(mmc
);
414 DBG("Begin do uhs2 detect init.\n");
416 if (host
->ops
->uhs2_pre_detect_init
)
417 host
->ops
->uhs2_pre_detect_init(host
);
419 if (sdhci_uhs2_interface_detect(host
)) {
420 pr_warn("%s: cannot detect UHS2 interface.\n", mmc_hostname(host
->mmc
));
424 if (sdhci_uhs2_init(host
)) {
425 pr_warn("%s: UHS2 init fail.\n", mmc_hostname(host
->mmc
));
429 /* Init complete, do soft reset and enable UHS2 error irqs. */
430 sdhci_uhs2_reset(host
, SDHCI_UHS2_SW_RESET_SD
);
431 sdhci_uhs2_clear_set_irqs(host
, SDHCI_INT_ALL_MASK
, SDHCI_UHS2_INT_ERROR_MASK
);
433 * N.B SDHCI_INT_ENABLE and SDHCI_SIGNAL_ENABLE was cleared
434 * by SDHCI_UHS2_SW_RESET_SD
436 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
437 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
442 static int sdhci_uhs2_disable_clk(struct mmc_host
*mmc
)
444 struct sdhci_host
*host
= mmc_priv(mmc
);
445 u16 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
447 clk
&= ~SDHCI_CLOCK_CARD_EN
;
448 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
453 static int sdhci_uhs2_enable_clk(struct mmc_host
*mmc
)
455 struct sdhci_host
*host
= mmc_priv(mmc
);
456 u16 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
457 int timeout_us
= 20000; /* 20ms */
460 clk
|= SDHCI_CLOCK_CARD_EN
;
461 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
463 if (read_poll_timeout(sdhci_readw
, val
, (val
& SDHCI_CLOCK_INT_STABLE
),
464 10, timeout_us
, true, host
, SDHCI_CLOCK_CONTROL
)) {
465 pr_err("%s: Internal clock never stabilised.\n", mmc_hostname(host
->mmc
));
466 sdhci_dumpregs(host
);
472 static void sdhci_uhs2_set_config(struct sdhci_host
*host
)
475 u16 sdhci_uhs2_set_ptr
= sdhci_readw(host
, SDHCI_UHS2_SETTINGS_PTR
);
476 u16 sdhci_uhs2_gen_set_reg
= sdhci_uhs2_set_ptr
;
477 u16 sdhci_uhs2_phy_set_reg
= sdhci_uhs2_set_ptr
+ 4;
478 u16 sdhci_uhs2_tran_set_reg
= sdhci_uhs2_set_ptr
+ 8;
479 u16 sdhci_uhs2_tran_set_1_reg
= sdhci_uhs2_set_ptr
+ 12;
481 /* Set Gen Settings */
482 value
= FIELD_PREP(SDHCI_UHS2_GEN_SETTINGS_N_LANES_MASK
, host
->mmc
->uhs2_caps
.n_lanes_set
);
483 sdhci_writel(host
, value
, sdhci_uhs2_gen_set_reg
);
485 /* Set PHY Settings */
486 value
= FIELD_PREP(SDHCI_UHS2_PHY_N_LSS_DIR_MASK
, host
->mmc
->uhs2_caps
.n_lss_dir_set
) |
487 FIELD_PREP(SDHCI_UHS2_PHY_N_LSS_SYN_MASK
, host
->mmc
->uhs2_caps
.n_lss_sync_set
);
488 if (host
->mmc
->ios
.timing
== MMC_TIMING_UHS2_SPEED_B
||
489 host
->mmc
->ios
.timing
== MMC_TIMING_UHS2_SPEED_B_HD
)
490 value
|= SDHCI_UHS2_PHY_SET_SPEED_B
;
491 sdhci_writel(host
, value
, sdhci_uhs2_phy_set_reg
);
493 /* Set LINK-TRAN Settings */
494 value
= FIELD_PREP(SDHCI_UHS2_TRAN_RETRY_CNT_MASK
, host
->mmc
->uhs2_caps
.max_retry_set
) |
495 FIELD_PREP(SDHCI_UHS2_TRAN_N_FCU_MASK
, host
->mmc
->uhs2_caps
.n_fcu_set
);
496 sdhci_writel(host
, value
, sdhci_uhs2_tran_set_reg
);
497 sdhci_writel(host
, host
->mmc
->uhs2_caps
.n_data_gap_set
, sdhci_uhs2_tran_set_1_reg
);
500 static int sdhci_uhs2_check_dormant(struct sdhci_host
*host
)
504 if (read_poll_timeout(sdhci_readl
, val
, (val
& SDHCI_UHS2_IN_DORMANT_STATE
),
505 100, UHS2_CHECK_DORMANT_TIMEOUT_100MS
, true, host
,
506 SDHCI_PRESENT_STATE
)) {
507 pr_warn("%s: UHS2 IN_DORMANT fail in 100ms.\n", mmc_hostname(host
->mmc
));
508 sdhci_dumpregs(host
);
514 static int sdhci_uhs2_control(struct mmc_host
*mmc
, enum sd_uhs2_operation op
)
516 struct sdhci_host
*host
= mmc_priv(mmc
);
517 struct mmc_ios
*ios
= &mmc
->ios
;
520 DBG("Begin uhs2 control, act %d.\n", op
);
524 err
= sdhci_uhs2_do_detect_init(mmc
);
526 case UHS2_SET_CONFIG
:
527 sdhci_uhs2_set_config(host
);
529 case UHS2_ENABLE_INT
:
530 sdhci_uhs2_clear_set_irqs(host
, 0, SDHCI_INT_CARD_INT
);
532 case UHS2_DISABLE_INT
:
533 sdhci_uhs2_clear_set_irqs(host
, SDHCI_INT_CARD_INT
, 0);
535 case UHS2_CHECK_DORMANT
:
536 err
= sdhci_uhs2_check_dormant(host
);
538 case UHS2_DISABLE_CLK
:
539 err
= sdhci_uhs2_disable_clk(mmc
);
541 case UHS2_ENABLE_CLK
:
542 err
= sdhci_uhs2_enable_clk(mmc
);
545 err
= sdhci_uhs2_set_ios(mmc
, ios
);
548 pr_err("%s: input sd uhs2 operation %d is wrong!\n",
549 mmc_hostname(host
->mmc
), op
);
557 /*****************************************************************************\
561 \*****************************************************************************/
563 static void sdhci_uhs2_prepare_data(struct sdhci_host
*host
, struct mmc_command
*cmd
)
565 struct mmc_data
*data
= cmd
->data
;
567 sdhci_initialize_data(host
, data
);
569 sdhci_prepare_dma(host
, data
);
571 sdhci_writew(host
, data
->blksz
, SDHCI_UHS2_BLOCK_SIZE
);
572 sdhci_writew(host
, data
->blocks
, SDHCI_UHS2_BLOCK_COUNT
);
575 static void sdhci_uhs2_finish_data(struct sdhci_host
*host
)
577 struct mmc_data
*data
= host
->data
;
579 __sdhci_finish_data_common(host
, true);
581 __sdhci_finish_mrq(host
, data
->mrq
);
584 static void sdhci_uhs2_set_transfer_mode(struct sdhci_host
*host
, struct mmc_command
*cmd
)
587 struct mmc_data
*data
= cmd
->data
;
590 /* clear Auto CMD settings for no data CMDs */
591 if (uhs2_dev_cmd(cmd
) == UHS2_DEV_CMD_TRANS_ABORT
) {
594 mode
= sdhci_readw(host
, SDHCI_UHS2_TRANS_MODE
);
595 if (cmd
->opcode
== MMC_STOP_TRANSMISSION
|| cmd
->opcode
== MMC_ERASE
)
596 mode
|= SDHCI_UHS2_TRNS_WAIT_EBSY
;
598 /* send status mode */
599 if (cmd
->opcode
== MMC_SEND_STATUS
)
603 DBG("UHS2 no data trans mode is 0x%x.\n", mode
);
605 sdhci_writew(host
, mode
, SDHCI_UHS2_TRANS_MODE
);
609 WARN_ON(!host
->data
);
611 mode
= SDHCI_UHS2_TRNS_BLK_CNT_EN
| SDHCI_UHS2_TRNS_WAIT_EBSY
;
612 if (data
->flags
& MMC_DATA_WRITE
)
613 mode
|= SDHCI_UHS2_TRNS_DATA_TRNS_WRT
;
615 if (data
->blocks
== 1 &&
616 data
->blksz
!= 512 &&
617 cmd
->opcode
!= MMC_READ_SINGLE_BLOCK
&&
618 cmd
->opcode
!= MMC_WRITE_BLOCK
) {
619 mode
&= ~SDHCI_UHS2_TRNS_BLK_CNT_EN
;
620 mode
|= SDHCI_UHS2_TRNS_BLK_BYTE_MODE
;
623 if (host
->flags
& SDHCI_REQ_USE_DMA
)
624 mode
|= SDHCI_UHS2_TRNS_DMA
;
626 if (cmd
->uhs2_cmd
->tmode_half_duplex
)
627 mode
|= SDHCI_UHS2_TRNS_2L_HD
;
629 sdhci_writew(host
, mode
, SDHCI_UHS2_TRANS_MODE
);
631 DBG("UHS2 trans mode is 0x%x.\n", mode
);
634 static void __sdhci_uhs2_send_command(struct sdhci_host
*host
, struct mmc_command
*cmd
)
641 ((u32
)cmd
->uhs2_cmd
->arg
<< 16) |
642 (u32
)cmd
->uhs2_cmd
->header
,
643 SDHCI_UHS2_CMD_PACKET
+ i
);
647 * Per spec, payload (config) should be MSB before sending out.
648 * But we don't need convert here because had set payload as
649 * MSB when preparing config read/write commands.
651 for (j
= 0; j
< cmd
->uhs2_cmd
->payload_len
/ sizeof(u32
); j
++) {
652 sdhci_writel(host
, *(__force u32
*)(cmd
->uhs2_cmd
->payload
+ j
),
653 SDHCI_UHS2_CMD_PACKET
+ i
);
657 for ( ; i
< SDHCI_UHS2_CMD_PACK_MAX_LEN
; i
+= 4)
658 sdhci_writel(host
, 0, SDHCI_UHS2_CMD_PACKET
+ i
);
660 DBG("UHS2 CMD packet_len = %d.\n", cmd
->uhs2_cmd
->packet_len
);
661 for (i
= 0; i
< cmd
->uhs2_cmd
->packet_len
; i
++)
662 DBG("UHS2 CMD_PACKET[%d] = 0x%x.\n", i
,
663 sdhci_readb(host
, SDHCI_UHS2_CMD_PACKET
+ i
));
665 cmd_reg
= FIELD_PREP(SDHCI_UHS2_CMD_PACK_LEN_MASK
, cmd
->uhs2_cmd
->packet_len
);
666 if ((cmd
->flags
& MMC_CMD_MASK
) == MMC_CMD_ADTC
)
667 cmd_reg
|= SDHCI_UHS2_CMD_DATA
;
668 if (cmd
->opcode
== MMC_STOP_TRANSMISSION
)
669 cmd_reg
|= SDHCI_UHS2_CMD_CMD12
;
671 /* UHS2 Native ABORT */
672 if ((cmd
->uhs2_cmd
->header
& UHS2_NATIVE_PACKET
) &&
673 (uhs2_dev_cmd(cmd
) == UHS2_DEV_CMD_TRANS_ABORT
))
674 cmd_reg
|= SDHCI_UHS2_CMD_TRNS_ABORT
;
676 /* UHS2 Native DORMANT */
677 if ((cmd
->uhs2_cmd
->header
& UHS2_NATIVE_PACKET
) &&
678 (uhs2_dev_cmd(cmd
) == UHS2_DEV_CMD_GO_DORMANT_STATE
))
679 cmd_reg
|= SDHCI_UHS2_CMD_DORMANT
;
681 DBG("0x%x is set to UHS2 CMD register.\n", cmd_reg
);
683 sdhci_writew(host
, cmd_reg
, SDHCI_UHS2_CMD
);
686 static bool sdhci_uhs2_send_command(struct sdhci_host
*host
, struct mmc_command
*cmd
)
689 unsigned long timeout
;
693 /* Initially, a command has no error */
696 if (cmd
->opcode
== MMC_STOP_TRANSMISSION
)
697 cmd
->flags
|= MMC_RSP_BUSY
;
699 mask
= SDHCI_CMD_INHIBIT
;
701 if (sdhci_readl(host
, SDHCI_PRESENT_STATE
) & mask
)
705 host
->data_timeout
= 0;
706 if (sdhci_data_line_cmd(cmd
)) {
707 WARN_ON(host
->data_cmd
);
708 host
->data_cmd
= cmd
;
709 __sdhci_uhs2_set_timeout(host
);
713 sdhci_uhs2_prepare_data(host
, cmd
);
715 sdhci_uhs2_set_transfer_mode(host
, cmd
);
718 if (host
->data_timeout
)
719 timeout
+= nsecs_to_jiffies(host
->data_timeout
);
720 else if (!cmd
->data
&& cmd
->busy_timeout
> 9000)
721 timeout
+= DIV_ROUND_UP(cmd
->busy_timeout
, 1000) * HZ
+ HZ
;
724 sdhci_mod_timer(host
, cmd
->mrq
, timeout
);
726 __sdhci_uhs2_send_command(host
, cmd
);
731 static bool sdhci_uhs2_send_command_retry(struct sdhci_host
*host
,
732 struct mmc_command
*cmd
,
734 __releases(host
->lock
)
735 __acquires(host
->lock
)
737 struct mmc_command
*deferred_cmd
= host
->deferred_cmd
;
738 int timeout
= 10; /* Approx. 10 ms */
741 while (!sdhci_uhs2_send_command(host
, cmd
)) {
743 pr_err("%s: Controller never released inhibit bit(s).\n",
744 mmc_hostname(host
->mmc
));
745 sdhci_dumpregs(host
);
750 spin_unlock_irqrestore(&host
->lock
, flags
);
752 usleep_range(1000, 1250);
754 present
= host
->mmc
->ops
->get_cd(host
->mmc
);
756 spin_lock_irqsave(&host
->lock
, flags
);
758 /* A deferred command might disappear, handle that */
759 if (cmd
== deferred_cmd
&& cmd
!= host
->deferred_cmd
)
762 if (sdhci_present_error(host
, cmd
, present
))
766 if (cmd
== host
->deferred_cmd
)
767 host
->deferred_cmd
= NULL
;
772 static void __sdhci_uhs2_finish_command(struct sdhci_host
*host
)
774 struct mmc_command
*cmd
= host
->cmd
;
780 if (host
->mmc
->uhs2_sd_tran
) {
781 resp
= sdhci_readb(host
, SDHCI_UHS2_RESPONSE
+ 2);
782 if (resp
& UHS2_RES_NACK_MASK
) {
783 error_code
= (resp
>> UHS2_RES_ECODE_POS
) & UHS2_RES_ECODE_MASK
;
784 pr_err("%s: NACK response, ECODE=0x%x.\n",
785 mmc_hostname(host
->mmc
), error_code
);
790 if (cmd
->uhs2_cmd
->uhs2_resp_len
) {
791 int len
= min_t(int, cmd
->uhs2_cmd
->uhs2_resp_len
, UHS2_MAX_RESP_LEN
);
793 /* Get whole response of some native CCMD, like
794 * DEVICE_INIT, ENUMERATE.
796 for (i
= 0; i
< len
; i
++)
797 cmd
->uhs2_cmd
->uhs2_resp
[i
] = sdhci_readb(host
, SDHCI_UHS2_RESPONSE
+ i
);
799 /* Get SD CMD response and Payload for some read
800 * CCMD, like INQUIRY_CFG.
802 /* Per spec (p136), payload field is divided into
803 * a unit of DWORD and transmission order within
804 * a DWORD is big endian.
807 sdhci_readl(host
, SDHCI_UHS2_RESPONSE
);
808 for (i
= 4; i
< 20; i
+= 4) {
809 cmd
->resp
[i
/ 4 - 1] =
811 SDHCI_UHS2_RESPONSE
+ i
) << 24) |
813 SDHCI_UHS2_RESPONSE
+ i
+ 1)
816 SDHCI_UHS2_RESPONSE
+ i
+ 2)
818 sdhci_readb(host
, SDHCI_UHS2_RESPONSE
+ i
+ 3);
823 static void sdhci_uhs2_finish_command(struct sdhci_host
*host
)
825 struct mmc_command
*cmd
= host
->cmd
;
827 __sdhci_uhs2_finish_command(host
);
831 if (cmd
->mrq
->cap_cmd_during_tfr
&& cmd
== cmd
->mrq
->cmd
)
832 mmc_command_done(host
->mmc
, cmd
->mrq
);
835 * The host can send and interrupt when the busy state has
836 * ended, allowing us to wait without wasting CPU cycles.
837 * The busy signal uses DAT0 so this is similar to waiting
838 * for data to complete.
840 * Note: The 1.0 specification is a bit ambiguous about this
841 * feature so there might be some problems with older
844 if (cmd
->flags
& MMC_RSP_BUSY
) {
846 DBG("Cannot wait for busy signal when also doing a data transfer");
847 } else if (!(host
->quirks
& SDHCI_QUIRK_NO_BUSY_IRQ
) &&
848 cmd
== host
->data_cmd
) {
849 /* Command complete before busy is ended */
854 /* Processed actual command. */
855 if (host
->data
&& host
->data_early
)
856 sdhci_uhs2_finish_data(host
);
859 __sdhci_finish_mrq(host
, cmd
->mrq
);
862 static void sdhci_uhs2_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
864 struct sdhci_host
*host
= mmc_priv(mmc
);
865 struct mmc_command
*cmd
;
869 if (!(mmc_card_uhs2(mmc
))) {
870 sdhci_request(mmc
, mrq
);
877 mrq
->data
->stop
= NULL
;
879 /* Firstly check card presence */
880 present
= mmc
->ops
->get_cd(mmc
);
882 spin_lock_irqsave(&host
->lock
, flags
);
884 if (sdhci_present_error(host
, mrq
->cmd
, present
))
889 if (!sdhci_uhs2_send_command_retry(host
, cmd
, flags
))
892 spin_unlock_irqrestore(&host
->lock
, flags
);
897 sdhci_finish_mrq(host
, mrq
);
898 spin_unlock_irqrestore(&host
->lock
, flags
);
901 /*****************************************************************************\
905 \*****************************************************************************/
907 static bool sdhci_uhs2_needs_reset(struct sdhci_host
*host
, struct mmc_request
*mrq
)
909 return sdhci_needs_reset(host
, mrq
) ||
910 (!(host
->flags
& SDHCI_DEVICE_DEAD
) && mrq
->data
&& mrq
->data
->error
);
913 static bool sdhci_uhs2_request_done(struct sdhci_host
*host
)
916 struct mmc_request
*mrq
;
919 spin_lock_irqsave(&host
->lock
, flags
);
921 for (i
= 0; i
< SDHCI_MAX_MRQS
; i
++) {
922 mrq
= host
->mrqs_done
[i
];
928 spin_unlock_irqrestore(&host
->lock
, flags
);
933 * Always unmap the data buffers if they were mapped by
934 * sdhci_prepare_data() whenever we finish with a request.
935 * This avoids leaking DMA mappings on error.
937 if (host
->flags
& SDHCI_REQ_USE_DMA
)
938 sdhci_request_done_dma(host
, mrq
);
941 * The controller needs a reset of internal state machines
942 * upon error conditions.
944 if (sdhci_uhs2_needs_reset(host
, mrq
)) {
946 * Do not finish until command and data lines are available for
947 * reset. Note there can only be one other mrq, so it cannot
948 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
949 * would both be null.
951 if (host
->cmd
|| host
->data_cmd
) {
952 spin_unlock_irqrestore(&host
->lock
, flags
);
956 if (mrq
->cmd
->error
|| mrq
->data
->error
)
957 sdhci_uhs2_reset_cmd_data(host
);
959 sdhci_uhs2_reset(host
, SDHCI_UHS2_SW_RESET_SD
);
960 host
->pending_reset
= false;
963 host
->mrqs_done
[i
] = NULL
;
965 spin_unlock_irqrestore(&host
->lock
, flags
);
967 if (host
->ops
->request_done
)
968 host
->ops
->request_done(host
, mrq
);
970 mmc_request_done(host
->mmc
, mrq
);
975 static void sdhci_uhs2_complete_work(struct work_struct
*work
)
977 struct sdhci_host
*host
= container_of(work
, struct sdhci_host
,
980 if (!mmc_card_uhs2(host
->mmc
)) {
981 sdhci_complete_work(work
);
985 while (!sdhci_uhs2_request_done(host
))
989 /*****************************************************************************\
991 * Interrupt handling *
993 \*****************************************************************************/
995 static void __sdhci_uhs2_irq(struct sdhci_host
*host
, u32 uhs2mask
)
997 struct mmc_command
*cmd
= host
->cmd
;
999 DBG("*** %s got UHS2 error interrupt: 0x%08x\n",
1000 mmc_hostname(host
->mmc
), uhs2mask
);
1002 if (uhs2mask
& SDHCI_UHS2_INT_CMD_ERR_MASK
) {
1004 pr_err("%s: Got cmd interrupt 0x%08x but no cmd.\n",
1005 mmc_hostname(host
->mmc
),
1006 (unsigned int)uhs2mask
);
1007 sdhci_dumpregs(host
);
1010 host
->cmd
->error
= -EILSEQ
;
1011 if (uhs2mask
& SDHCI_UHS2_INT_CMD_TIMEOUT
)
1012 host
->cmd
->error
= -ETIMEDOUT
;
1015 if (uhs2mask
& SDHCI_UHS2_INT_DATA_ERR_MASK
) {
1017 pr_err("%s: Got data interrupt 0x%08x but no data.\n",
1018 mmc_hostname(host
->mmc
),
1019 (unsigned int)uhs2mask
);
1020 sdhci_dumpregs(host
);
1024 if (uhs2mask
& SDHCI_UHS2_INT_DEADLOCK_TIMEOUT
) {
1025 pr_err("%s: Got deadlock timeout interrupt 0x%08x\n",
1026 mmc_hostname(host
->mmc
),
1027 (unsigned int)uhs2mask
);
1028 host
->data
->error
= -ETIMEDOUT
;
1029 } else if (uhs2mask
& SDHCI_UHS2_INT_ADMA_ERROR
) {
1030 pr_err("%s: ADMA error = 0x %x\n",
1031 mmc_hostname(host
->mmc
),
1032 sdhci_readb(host
, SDHCI_ADMA_ERROR
));
1033 host
->data
->error
= -EIO
;
1035 host
->data
->error
= -EILSEQ
;
1039 if (host
->data
&& host
->data
->error
)
1040 sdhci_uhs2_finish_data(host
);
1042 sdhci_finish_mrq(host
, cmd
->mrq
);
1046 u32
sdhci_uhs2_irq(struct sdhci_host
*host
, u32 intmask
)
1048 u32 mask
= intmask
, uhs2mask
;
1050 if (!mmc_card_uhs2(host
->mmc
))
1053 if (intmask
& SDHCI_INT_ERROR
) {
1054 uhs2mask
= sdhci_readl(host
, SDHCI_UHS2_INT_STATUS
);
1055 if (!(uhs2mask
& SDHCI_UHS2_INT_ERROR_MASK
))
1058 /* Clear error interrupts */
1059 sdhci_writel(host
, uhs2mask
& SDHCI_UHS2_INT_ERROR_MASK
,
1060 SDHCI_UHS2_INT_STATUS
);
1062 /* Handle error interrupts */
1063 __sdhci_uhs2_irq(host
, uhs2mask
);
1065 /* Caller, sdhci_irq(), doesn't have to care about UHS-2 errors */
1066 intmask
&= ~SDHCI_INT_ERROR
;
1067 mask
&= SDHCI_INT_ERROR
;
1071 if (intmask
& SDHCI_INT_CMD_MASK
) {
1072 /* Clear command interrupt */
1073 sdhci_writel(host
, intmask
& SDHCI_INT_CMD_MASK
, SDHCI_INT_STATUS
);
1075 /* Handle command interrupt */
1076 if (intmask
& SDHCI_INT_RESPONSE
)
1077 sdhci_uhs2_finish_command(host
);
1079 /* Caller, sdhci_irq(), doesn't have to care about UHS-2 commands */
1080 intmask
&= ~SDHCI_INT_CMD_MASK
;
1081 mask
&= SDHCI_INT_CMD_MASK
;
1084 /* Clear already-handled interrupts. */
1085 sdhci_writel(host
, mask
, SDHCI_INT_STATUS
);
1090 EXPORT_SYMBOL_GPL(sdhci_uhs2_irq
);
1092 static irqreturn_t
sdhci_uhs2_thread_irq(int irq
, void *dev_id
)
1094 struct sdhci_host
*host
= dev_id
;
1095 struct mmc_command
*cmd
;
1096 unsigned long flags
;
1099 if (!mmc_card_uhs2(host
->mmc
))
1100 return sdhci_thread_irq(irq
, dev_id
);
1102 while (!sdhci_uhs2_request_done(host
))
1105 spin_lock_irqsave(&host
->lock
, flags
);
1107 isr
= host
->thread_isr
;
1108 host
->thread_isr
= 0;
1110 cmd
= host
->deferred_cmd
;
1111 if (cmd
&& !sdhci_uhs2_send_command_retry(host
, cmd
, flags
))
1112 sdhci_finish_mrq(host
, cmd
->mrq
);
1114 spin_unlock_irqrestore(&host
->lock
, flags
);
1116 if (isr
& (SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
)) {
1117 struct mmc_host
*mmc
= host
->mmc
;
1119 mmc
->ops
->card_event(mmc
);
1120 mmc_detect_change(mmc
, msecs_to_jiffies(200));
1126 /*****************************************************************************\
1128 * Driver init/exit *
1130 \*****************************************************************************/
1132 static int sdhci_uhs2_host_ops_init(struct sdhci_host
*host
)
1134 host
->mmc_host_ops
.uhs2_control
= sdhci_uhs2_control
;
1135 host
->mmc_host_ops
.request
= sdhci_uhs2_request
;
1140 static int __init
sdhci_uhs2_mod_init(void)
1144 module_init(sdhci_uhs2_mod_init
);
1146 static void __exit
sdhci_uhs2_mod_exit(void)
1149 module_exit(sdhci_uhs2_mod_exit
);
1151 /*****************************************************************************\
1153 * Device allocation/registration *
1155 \*****************************************************************************/
1157 static void __sdhci_uhs2_add_host_v4(struct sdhci_host
*host
, u32 caps1
)
1159 struct mmc_host
*mmc
;
1160 u32 max_current_caps2
;
1165 if (caps1
& SDHCI_SUPPORT_UHS2
)
1166 mmc
->caps2
|= MMC_CAP2_SD_UHS2
;
1168 max_current_caps2
= sdhci_readl(host
, SDHCI_MAX_CURRENT_1
);
1170 if ((caps1
& SDHCI_CAN_VDD2_180
) &&
1171 !max_current_caps2
&&
1172 !IS_ERR(mmc
->supply
.vqmmc2
)) {
1174 int curr
= regulator_get_current_limit(mmc
->supply
.vqmmc2
);
1177 /* convert to SDHCI_MAX_CURRENT format */
1178 curr
= curr
/ 1000; /* convert to mA */
1179 curr
= curr
/ SDHCI_MAX_CURRENT_MULTIPLIER
;
1180 curr
= min_t(u32
, curr
, SDHCI_MAX_CURRENT_LIMIT
);
1181 max_current_caps2
= curr
;
1185 if (!(caps1
& SDHCI_CAN_VDD2_180
))
1186 mmc
->caps2
&= ~MMC_CAP2_SD_UHS2
;
1189 static void __sdhci_uhs2_remove_host(struct sdhci_host
*host
, int dead
)
1191 if (!mmc_card_uhs2(host
->mmc
))
1195 sdhci_uhs2_reset(host
, SDHCI_UHS2_SW_RESET_FULL
);
1198 int sdhci_uhs2_add_host(struct sdhci_host
*host
)
1200 struct mmc_host
*mmc
= host
->mmc
;
1203 ret
= sdhci_setup_host(host
);
1207 if (host
->version
>= SDHCI_SPEC_400
)
1208 __sdhci_uhs2_add_host_v4(host
, host
->caps1
);
1210 if ((mmc
->caps2
& MMC_CAP2_SD_UHS2
) && !host
->v4_mode
)
1211 /* host doesn't want to enable UHS2 support */
1212 mmc
->caps2
&= ~MMC_CAP2_SD_UHS2
;
1215 if (mmc
->caps2
& MMC_CAP2_SD_UHS2
)
1216 sdhci_uhs2_host_ops_init(host
);
1218 host
->complete_work_fn
= sdhci_uhs2_complete_work
;
1219 host
->thread_irq_fn
= sdhci_uhs2_thread_irq
;
1221 /* LED support not implemented for UHS2 */
1222 host
->quirks
|= SDHCI_QUIRK_NO_LED
;
1224 ret
= __sdhci_add_host(host
);
1231 if (host
->version
>= SDHCI_SPEC_400
)
1232 __sdhci_uhs2_remove_host(host
, 0);
1234 sdhci_cleanup_host(host
);
1238 EXPORT_SYMBOL_GPL(sdhci_uhs2_add_host
);
1240 void sdhci_uhs2_remove_host(struct sdhci_host
*host
, int dead
)
1242 __sdhci_uhs2_remove_host(host
, dead
);
1244 sdhci_remove_host(host
, dead
);
1246 EXPORT_SYMBOL_GPL(sdhci_uhs2_remove_host
);
1248 MODULE_AUTHOR("Intel, Genesys Logic, Linaro");
1249 MODULE_DESCRIPTION("MMC UHS-II Support");
1250 MODULE_LICENSE("GPL");