1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/drivers/mmc/core/mmc.c
5 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
6 * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
10 #include <linux/err.h>
12 #include <linux/slab.h>
13 #include <linux/stat.h>
14 #include <linux/pm_runtime.h>
16 #include <linux/mmc/host.h>
17 #include <linux/mmc/card.h>
18 #include <linux/mmc/mmc.h>
29 #define DEFAULT_CMD6_TIMEOUT_MS 500
30 #define MIN_CACHE_EN_TIMEOUT_MS 1600
32 static const unsigned int tran_exp
[] = {
33 10000, 100000, 1000000, 10000000,
37 static const unsigned char tran_mant
[] = {
38 0, 10, 12, 13, 15, 20, 25, 30,
39 35, 40, 45, 50, 55, 60, 70, 80,
42 static const unsigned int taac_exp
[] = {
43 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000,
46 static const unsigned int taac_mant
[] = {
47 0, 10, 12, 13, 15, 20, 25, 30,
48 35, 40, 45, 50, 55, 60, 70, 80,
51 #define UNSTUFF_BITS(resp,start,size) \
53 const int __size = size; \
54 const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
55 const int __off = 3 - ((start) / 32); \
56 const int __shft = (start) & 31; \
59 __res = resp[__off] >> __shft; \
60 if (__size + __shft > 32) \
61 __res |= resp[__off-1] << ((32 - __shft) % 32); \
66 * Given the decoded CSD structure, decode the raw CID to our CID structure.
68 static int mmc_decode_cid(struct mmc_card
*card
)
70 u32
*resp
= card
->raw_cid
;
73 * The selection of the format here is based upon published
74 * specs from sandisk and from what people have reported.
76 switch (card
->csd
.mmca_vsn
) {
77 case 0: /* MMC v1.0 - v1.2 */
78 case 1: /* MMC v1.4 */
79 card
->cid
.manfid
= UNSTUFF_BITS(resp
, 104, 24);
80 card
->cid
.prod_name
[0] = UNSTUFF_BITS(resp
, 96, 8);
81 card
->cid
.prod_name
[1] = UNSTUFF_BITS(resp
, 88, 8);
82 card
->cid
.prod_name
[2] = UNSTUFF_BITS(resp
, 80, 8);
83 card
->cid
.prod_name
[3] = UNSTUFF_BITS(resp
, 72, 8);
84 card
->cid
.prod_name
[4] = UNSTUFF_BITS(resp
, 64, 8);
85 card
->cid
.prod_name
[5] = UNSTUFF_BITS(resp
, 56, 8);
86 card
->cid
.prod_name
[6] = UNSTUFF_BITS(resp
, 48, 8);
87 card
->cid
.hwrev
= UNSTUFF_BITS(resp
, 44, 4);
88 card
->cid
.fwrev
= UNSTUFF_BITS(resp
, 40, 4);
89 card
->cid
.serial
= UNSTUFF_BITS(resp
, 16, 24);
90 card
->cid
.month
= UNSTUFF_BITS(resp
, 12, 4);
91 card
->cid
.year
= UNSTUFF_BITS(resp
, 8, 4) + 1997;
94 case 2: /* MMC v2.0 - v2.2 */
95 case 3: /* MMC v3.1 - v3.3 */
97 card
->cid
.manfid
= UNSTUFF_BITS(resp
, 120, 8);
98 card
->cid
.oemid
= UNSTUFF_BITS(resp
, 104, 16);
99 card
->cid
.prod_name
[0] = UNSTUFF_BITS(resp
, 96, 8);
100 card
->cid
.prod_name
[1] = UNSTUFF_BITS(resp
, 88, 8);
101 card
->cid
.prod_name
[2] = UNSTUFF_BITS(resp
, 80, 8);
102 card
->cid
.prod_name
[3] = UNSTUFF_BITS(resp
, 72, 8);
103 card
->cid
.prod_name
[4] = UNSTUFF_BITS(resp
, 64, 8);
104 card
->cid
.prod_name
[5] = UNSTUFF_BITS(resp
, 56, 8);
105 card
->cid
.prv
= UNSTUFF_BITS(resp
, 48, 8);
106 card
->cid
.serial
= UNSTUFF_BITS(resp
, 16, 32);
107 card
->cid
.month
= UNSTUFF_BITS(resp
, 12, 4);
108 card
->cid
.year
= UNSTUFF_BITS(resp
, 8, 4) + 1997;
112 pr_err("%s: card has unknown MMCA version %d\n",
113 mmc_hostname(card
->host
), card
->csd
.mmca_vsn
);
120 static void mmc_set_erase_size(struct mmc_card
*card
)
122 if (card
->ext_csd
.erase_group_def
& 1)
123 card
->erase_size
= card
->ext_csd
.hc_erase_size
;
125 card
->erase_size
= card
->csd
.erase_size
;
127 mmc_init_erase(card
);
131 * Given a 128-bit response, decode to our card CSD structure.
133 static int mmc_decode_csd(struct mmc_card
*card
)
135 struct mmc_csd
*csd
= &card
->csd
;
136 unsigned int e
, m
, a
, b
;
137 u32
*resp
= card
->raw_csd
;
140 * We only understand CSD structure v1.1 and v1.2.
141 * v1.2 has extra information in bits 15, 11 and 10.
142 * We also support eMMC v4.4 & v4.41.
144 csd
->structure
= UNSTUFF_BITS(resp
, 126, 2);
145 if (csd
->structure
== 0) {
146 pr_err("%s: unrecognised CSD structure version %d\n",
147 mmc_hostname(card
->host
), csd
->structure
);
151 csd
->mmca_vsn
= UNSTUFF_BITS(resp
, 122, 4);
152 m
= UNSTUFF_BITS(resp
, 115, 4);
153 e
= UNSTUFF_BITS(resp
, 112, 3);
154 csd
->taac_ns
= (taac_exp
[e
] * taac_mant
[m
] + 9) / 10;
155 csd
->taac_clks
= UNSTUFF_BITS(resp
, 104, 8) * 100;
157 m
= UNSTUFF_BITS(resp
, 99, 4);
158 e
= UNSTUFF_BITS(resp
, 96, 3);
159 csd
->max_dtr
= tran_exp
[e
] * tran_mant
[m
];
160 csd
->cmdclass
= UNSTUFF_BITS(resp
, 84, 12);
162 e
= UNSTUFF_BITS(resp
, 47, 3);
163 m
= UNSTUFF_BITS(resp
, 62, 12);
164 csd
->capacity
= (1 + m
) << (e
+ 2);
166 csd
->read_blkbits
= UNSTUFF_BITS(resp
, 80, 4);
167 csd
->read_partial
= UNSTUFF_BITS(resp
, 79, 1);
168 csd
->write_misalign
= UNSTUFF_BITS(resp
, 78, 1);
169 csd
->read_misalign
= UNSTUFF_BITS(resp
, 77, 1);
170 csd
->dsr_imp
= UNSTUFF_BITS(resp
, 76, 1);
171 csd
->r2w_factor
= UNSTUFF_BITS(resp
, 26, 3);
172 csd
->write_blkbits
= UNSTUFF_BITS(resp
, 22, 4);
173 csd
->write_partial
= UNSTUFF_BITS(resp
, 21, 1);
175 if (csd
->write_blkbits
>= 9) {
176 a
= UNSTUFF_BITS(resp
, 42, 5);
177 b
= UNSTUFF_BITS(resp
, 37, 5);
178 csd
->erase_size
= (a
+ 1) * (b
+ 1);
179 csd
->erase_size
<<= csd
->write_blkbits
- 9;
185 static void mmc_select_card_type(struct mmc_card
*card
)
187 struct mmc_host
*host
= card
->host
;
188 u8 card_type
= card
->ext_csd
.raw_card_type
;
189 u32 caps
= host
->caps
, caps2
= host
->caps2
;
190 unsigned int hs_max_dtr
= 0, hs200_max_dtr
= 0;
191 unsigned int avail_type
= 0;
193 if (caps
& MMC_CAP_MMC_HIGHSPEED
&&
194 card_type
& EXT_CSD_CARD_TYPE_HS_26
) {
195 hs_max_dtr
= MMC_HIGH_26_MAX_DTR
;
196 avail_type
|= EXT_CSD_CARD_TYPE_HS_26
;
199 if (caps
& MMC_CAP_MMC_HIGHSPEED
&&
200 card_type
& EXT_CSD_CARD_TYPE_HS_52
) {
201 hs_max_dtr
= MMC_HIGH_52_MAX_DTR
;
202 avail_type
|= EXT_CSD_CARD_TYPE_HS_52
;
205 if (caps
& (MMC_CAP_1_8V_DDR
| MMC_CAP_3_3V_DDR
) &&
206 card_type
& EXT_CSD_CARD_TYPE_DDR_1_8V
) {
207 hs_max_dtr
= MMC_HIGH_DDR_MAX_DTR
;
208 avail_type
|= EXT_CSD_CARD_TYPE_DDR_1_8V
;
211 if (caps
& MMC_CAP_1_2V_DDR
&&
212 card_type
& EXT_CSD_CARD_TYPE_DDR_1_2V
) {
213 hs_max_dtr
= MMC_HIGH_DDR_MAX_DTR
;
214 avail_type
|= EXT_CSD_CARD_TYPE_DDR_1_2V
;
217 if (caps2
& MMC_CAP2_HS200_1_8V_SDR
&&
218 card_type
& EXT_CSD_CARD_TYPE_HS200_1_8V
) {
219 hs200_max_dtr
= MMC_HS200_MAX_DTR
;
220 avail_type
|= EXT_CSD_CARD_TYPE_HS200_1_8V
;
223 if (caps2
& MMC_CAP2_HS200_1_2V_SDR
&&
224 card_type
& EXT_CSD_CARD_TYPE_HS200_1_2V
) {
225 hs200_max_dtr
= MMC_HS200_MAX_DTR
;
226 avail_type
|= EXT_CSD_CARD_TYPE_HS200_1_2V
;
229 if (caps2
& MMC_CAP2_HS400_1_8V
&&
230 card_type
& EXT_CSD_CARD_TYPE_HS400_1_8V
) {
231 hs200_max_dtr
= MMC_HS200_MAX_DTR
;
232 avail_type
|= EXT_CSD_CARD_TYPE_HS400_1_8V
;
235 if (caps2
& MMC_CAP2_HS400_1_2V
&&
236 card_type
& EXT_CSD_CARD_TYPE_HS400_1_2V
) {
237 hs200_max_dtr
= MMC_HS200_MAX_DTR
;
238 avail_type
|= EXT_CSD_CARD_TYPE_HS400_1_2V
;
241 if ((caps2
& MMC_CAP2_HS400_ES
) &&
242 card
->ext_csd
.strobe_support
&&
243 (avail_type
& EXT_CSD_CARD_TYPE_HS400
))
244 avail_type
|= EXT_CSD_CARD_TYPE_HS400ES
;
246 card
->ext_csd
.hs_max_dtr
= hs_max_dtr
;
247 card
->ext_csd
.hs200_max_dtr
= hs200_max_dtr
;
248 card
->mmc_avail_type
= avail_type
;
251 static void mmc_manage_enhanced_area(struct mmc_card
*card
, u8
*ext_csd
)
253 u8 hc_erase_grp_sz
, hc_wp_grp_sz
;
256 * Disable these attributes by default
258 card
->ext_csd
.enhanced_area_offset
= -EINVAL
;
259 card
->ext_csd
.enhanced_area_size
= -EINVAL
;
262 * Enhanced area feature support -- check whether the eMMC
263 * card has the Enhanced area enabled. If so, export enhanced
264 * area offset and size to user by adding sysfs interface.
266 if ((ext_csd
[EXT_CSD_PARTITION_SUPPORT
] & 0x2) &&
267 (ext_csd
[EXT_CSD_PARTITION_ATTRIBUTE
] & 0x1)) {
268 if (card
->ext_csd
.partition_setting_completed
) {
270 ext_csd
[EXT_CSD_HC_ERASE_GRP_SIZE
];
272 ext_csd
[EXT_CSD_HC_WP_GRP_SIZE
];
275 * calculate the enhanced data area offset, in bytes
277 card
->ext_csd
.enhanced_area_offset
=
278 (((unsigned long long)ext_csd
[139]) << 24) +
279 (((unsigned long long)ext_csd
[138]) << 16) +
280 (((unsigned long long)ext_csd
[137]) << 8) +
281 (((unsigned long long)ext_csd
[136]));
282 if (mmc_card_blockaddr(card
))
283 card
->ext_csd
.enhanced_area_offset
<<= 9;
285 * calculate the enhanced data area size, in kilobytes
287 card
->ext_csd
.enhanced_area_size
=
288 (ext_csd
[142] << 16) + (ext_csd
[141] << 8) +
290 card
->ext_csd
.enhanced_area_size
*=
291 (size_t)(hc_erase_grp_sz
* hc_wp_grp_sz
);
292 card
->ext_csd
.enhanced_area_size
<<= 9;
294 pr_warn("%s: defines enhanced area without partition setting complete\n",
295 mmc_hostname(card
->host
));
300 static void mmc_part_add(struct mmc_card
*card
, u64 size
,
301 unsigned int part_cfg
, char *name
, int idx
, bool ro
,
304 card
->part
[card
->nr_parts
].size
= size
;
305 card
->part
[card
->nr_parts
].part_cfg
= part_cfg
;
306 sprintf(card
->part
[card
->nr_parts
].name
, name
, idx
);
307 card
->part
[card
->nr_parts
].force_ro
= ro
;
308 card
->part
[card
->nr_parts
].area_type
= area_type
;
312 static void mmc_manage_gp_partitions(struct mmc_card
*card
, u8
*ext_csd
)
315 u8 hc_erase_grp_sz
, hc_wp_grp_sz
;
319 * General purpose partition feature support --
320 * If ext_csd has the size of general purpose partitions,
321 * set size, part_cfg, partition name in mmc_part.
323 if (ext_csd
[EXT_CSD_PARTITION_SUPPORT
] &
324 EXT_CSD_PART_SUPPORT_PART_EN
) {
326 ext_csd
[EXT_CSD_HC_ERASE_GRP_SIZE
];
328 ext_csd
[EXT_CSD_HC_WP_GRP_SIZE
];
330 for (idx
= 0; idx
< MMC_NUM_GP_PARTITION
; idx
++) {
331 if (!ext_csd
[EXT_CSD_GP_SIZE_MULT
+ idx
* 3] &&
332 !ext_csd
[EXT_CSD_GP_SIZE_MULT
+ idx
* 3 + 1] &&
333 !ext_csd
[EXT_CSD_GP_SIZE_MULT
+ idx
* 3 + 2])
335 if (card
->ext_csd
.partition_setting_completed
== 0) {
336 pr_warn("%s: has partition size defined without partition complete\n",
337 mmc_hostname(card
->host
));
341 (ext_csd
[EXT_CSD_GP_SIZE_MULT
+ idx
* 3 + 2]
343 (ext_csd
[EXT_CSD_GP_SIZE_MULT
+ idx
* 3 + 1]
345 ext_csd
[EXT_CSD_GP_SIZE_MULT
+ idx
* 3];
346 part_size
*= (hc_erase_grp_sz
* hc_wp_grp_sz
);
347 mmc_part_add(card
, part_size
<< 19,
348 EXT_CSD_PART_CONFIG_ACC_GP0
+ idx
,
350 MMC_BLK_DATA_AREA_GP
);
355 /* Minimum partition switch timeout in milliseconds */
356 #define MMC_MIN_PART_SWITCH_TIME 300
359 * Decode extended CSD.
361 static int mmc_decode_ext_csd(struct mmc_card
*card
, u8
*ext_csd
)
365 struct device_node
*np
;
366 bool broken_hpi
= false;
368 /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
369 card
->ext_csd
.raw_ext_csd_structure
= ext_csd
[EXT_CSD_STRUCTURE
];
370 if (card
->csd
.structure
== 3) {
371 if (card
->ext_csd
.raw_ext_csd_structure
> 2) {
372 pr_err("%s: unrecognised EXT_CSD structure "
373 "version %d\n", mmc_hostname(card
->host
),
374 card
->ext_csd
.raw_ext_csd_structure
);
380 np
= mmc_of_find_child_device(card
->host
, 0);
381 if (np
&& of_device_is_compatible(np
, "mmc-card"))
382 broken_hpi
= of_property_read_bool(np
, "broken-hpi");
386 * The EXT_CSD format is meant to be forward compatible. As long
387 * as CSD_STRUCTURE does not change, all values for EXT_CSD_REV
388 * are authorized, see JEDEC JESD84-B50 section B.8.
390 card
->ext_csd
.rev
= ext_csd
[EXT_CSD_REV
];
392 /* fixup device after ext_csd revision field is updated */
393 mmc_fixup_device(card
, mmc_ext_csd_fixups
);
395 card
->ext_csd
.raw_sectors
[0] = ext_csd
[EXT_CSD_SEC_CNT
+ 0];
396 card
->ext_csd
.raw_sectors
[1] = ext_csd
[EXT_CSD_SEC_CNT
+ 1];
397 card
->ext_csd
.raw_sectors
[2] = ext_csd
[EXT_CSD_SEC_CNT
+ 2];
398 card
->ext_csd
.raw_sectors
[3] = ext_csd
[EXT_CSD_SEC_CNT
+ 3];
399 if (card
->ext_csd
.rev
>= 2) {
400 card
->ext_csd
.sectors
=
401 ext_csd
[EXT_CSD_SEC_CNT
+ 0] << 0 |
402 ext_csd
[EXT_CSD_SEC_CNT
+ 1] << 8 |
403 ext_csd
[EXT_CSD_SEC_CNT
+ 2] << 16 |
404 ext_csd
[EXT_CSD_SEC_CNT
+ 3] << 24;
406 /* Cards with density > 2GiB are sector addressed */
407 if (card
->ext_csd
.sectors
> (2u * 1024 * 1024 * 1024) / 512)
408 mmc_card_set_blockaddr(card
);
411 card
->ext_csd
.strobe_support
= ext_csd
[EXT_CSD_STROBE_SUPPORT
];
412 card
->ext_csd
.raw_card_type
= ext_csd
[EXT_CSD_CARD_TYPE
];
413 mmc_select_card_type(card
);
415 card
->ext_csd
.raw_s_a_timeout
= ext_csd
[EXT_CSD_S_A_TIMEOUT
];
416 card
->ext_csd
.raw_erase_timeout_mult
=
417 ext_csd
[EXT_CSD_ERASE_TIMEOUT_MULT
];
418 card
->ext_csd
.raw_hc_erase_grp_size
=
419 ext_csd
[EXT_CSD_HC_ERASE_GRP_SIZE
];
420 if (card
->ext_csd
.rev
>= 3) {
421 u8 sa_shift
= ext_csd
[EXT_CSD_S_A_TIMEOUT
];
422 card
->ext_csd
.part_config
= ext_csd
[EXT_CSD_PART_CONFIG
];
424 /* EXT_CSD value is in units of 10ms, but we store in ms */
425 card
->ext_csd
.part_time
= 10 * ext_csd
[EXT_CSD_PART_SWITCH_TIME
];
426 /* Some eMMC set the value too low so set a minimum */
427 if (card
->ext_csd
.part_time
&&
428 card
->ext_csd
.part_time
< MMC_MIN_PART_SWITCH_TIME
)
429 card
->ext_csd
.part_time
= MMC_MIN_PART_SWITCH_TIME
;
431 /* Sleep / awake timeout in 100ns units */
432 if (sa_shift
> 0 && sa_shift
<= 0x17)
433 card
->ext_csd
.sa_timeout
=
434 1 << ext_csd
[EXT_CSD_S_A_TIMEOUT
];
435 card
->ext_csd
.erase_group_def
=
436 ext_csd
[EXT_CSD_ERASE_GROUP_DEF
];
437 card
->ext_csd
.hc_erase_timeout
= 300 *
438 ext_csd
[EXT_CSD_ERASE_TIMEOUT_MULT
];
439 card
->ext_csd
.hc_erase_size
=
440 ext_csd
[EXT_CSD_HC_ERASE_GRP_SIZE
] << 10;
442 card
->ext_csd
.rel_sectors
= ext_csd
[EXT_CSD_REL_WR_SEC_C
];
445 * There are two boot regions of equal size, defined in
448 if (ext_csd
[EXT_CSD_BOOT_MULT
] && mmc_boot_partition_access(card
->host
)) {
449 for (idx
= 0; idx
< MMC_NUM_BOOT_PARTITION
; idx
++) {
450 part_size
= ext_csd
[EXT_CSD_BOOT_MULT
] << 17;
451 mmc_part_add(card
, part_size
,
452 EXT_CSD_PART_CONFIG_ACC_BOOT0
+ idx
,
454 MMC_BLK_DATA_AREA_BOOT
);
459 card
->ext_csd
.raw_hc_erase_gap_size
=
460 ext_csd
[EXT_CSD_HC_WP_GRP_SIZE
];
461 card
->ext_csd
.raw_sec_trim_mult
=
462 ext_csd
[EXT_CSD_SEC_TRIM_MULT
];
463 card
->ext_csd
.raw_sec_erase_mult
=
464 ext_csd
[EXT_CSD_SEC_ERASE_MULT
];
465 card
->ext_csd
.raw_sec_feature_support
=
466 ext_csd
[EXT_CSD_SEC_FEATURE_SUPPORT
];
467 card
->ext_csd
.raw_trim_mult
=
468 ext_csd
[EXT_CSD_TRIM_MULT
];
469 card
->ext_csd
.raw_partition_support
= ext_csd
[EXT_CSD_PARTITION_SUPPORT
];
470 card
->ext_csd
.raw_driver_strength
= ext_csd
[EXT_CSD_DRIVER_STRENGTH
];
471 if (card
->ext_csd
.rev
>= 4) {
472 if (ext_csd
[EXT_CSD_PARTITION_SETTING_COMPLETED
] &
473 EXT_CSD_PART_SETTING_COMPLETED
)
474 card
->ext_csd
.partition_setting_completed
= 1;
476 card
->ext_csd
.partition_setting_completed
= 0;
478 mmc_manage_enhanced_area(card
, ext_csd
);
480 mmc_manage_gp_partitions(card
, ext_csd
);
482 card
->ext_csd
.sec_trim_mult
=
483 ext_csd
[EXT_CSD_SEC_TRIM_MULT
];
484 card
->ext_csd
.sec_erase_mult
=
485 ext_csd
[EXT_CSD_SEC_ERASE_MULT
];
486 card
->ext_csd
.sec_feature_support
=
487 ext_csd
[EXT_CSD_SEC_FEATURE_SUPPORT
];
488 card
->ext_csd
.trim_timeout
= 300 *
489 ext_csd
[EXT_CSD_TRIM_MULT
];
492 * Note that the call to mmc_part_add above defaults to read
493 * only. If this default assumption is changed, the call must
494 * take into account the value of boot_locked below.
496 card
->ext_csd
.boot_ro_lock
= ext_csd
[EXT_CSD_BOOT_WP
];
497 card
->ext_csd
.boot_ro_lockable
= true;
499 /* Save power class values */
500 card
->ext_csd
.raw_pwr_cl_52_195
=
501 ext_csd
[EXT_CSD_PWR_CL_52_195
];
502 card
->ext_csd
.raw_pwr_cl_26_195
=
503 ext_csd
[EXT_CSD_PWR_CL_26_195
];
504 card
->ext_csd
.raw_pwr_cl_52_360
=
505 ext_csd
[EXT_CSD_PWR_CL_52_360
];
506 card
->ext_csd
.raw_pwr_cl_26_360
=
507 ext_csd
[EXT_CSD_PWR_CL_26_360
];
508 card
->ext_csd
.raw_pwr_cl_200_195
=
509 ext_csd
[EXT_CSD_PWR_CL_200_195
];
510 card
->ext_csd
.raw_pwr_cl_200_360
=
511 ext_csd
[EXT_CSD_PWR_CL_200_360
];
512 card
->ext_csd
.raw_pwr_cl_ddr_52_195
=
513 ext_csd
[EXT_CSD_PWR_CL_DDR_52_195
];
514 card
->ext_csd
.raw_pwr_cl_ddr_52_360
=
515 ext_csd
[EXT_CSD_PWR_CL_DDR_52_360
];
516 card
->ext_csd
.raw_pwr_cl_ddr_200_360
=
517 ext_csd
[EXT_CSD_PWR_CL_DDR_200_360
];
520 if (card
->ext_csd
.rev
>= 5) {
521 /* Adjust production date as per JEDEC JESD84-B451 */
522 if (card
->cid
.year
< 2010)
523 card
->cid
.year
+= 16;
525 /* check whether the eMMC card supports BKOPS */
526 if (ext_csd
[EXT_CSD_BKOPS_SUPPORT
] & 0x1) {
527 card
->ext_csd
.bkops
= 1;
528 card
->ext_csd
.man_bkops_en
=
529 (ext_csd
[EXT_CSD_BKOPS_EN
] &
530 EXT_CSD_MANUAL_BKOPS_MASK
);
531 card
->ext_csd
.raw_bkops_status
=
532 ext_csd
[EXT_CSD_BKOPS_STATUS
];
533 if (card
->ext_csd
.man_bkops_en
)
534 pr_debug("%s: MAN_BKOPS_EN bit is set\n",
535 mmc_hostname(card
->host
));
536 card
->ext_csd
.auto_bkops_en
=
537 (ext_csd
[EXT_CSD_BKOPS_EN
] &
538 EXT_CSD_AUTO_BKOPS_MASK
);
539 if (card
->ext_csd
.auto_bkops_en
)
540 pr_debug("%s: AUTO_BKOPS_EN bit is set\n",
541 mmc_hostname(card
->host
));
544 /* check whether the eMMC card supports HPI */
545 if (!mmc_card_broken_hpi(card
) &&
546 !broken_hpi
&& (ext_csd
[EXT_CSD_HPI_FEATURES
] & 0x1)) {
547 card
->ext_csd
.hpi
= 1;
548 if (ext_csd
[EXT_CSD_HPI_FEATURES
] & 0x2)
549 card
->ext_csd
.hpi_cmd
= MMC_STOP_TRANSMISSION
;
551 card
->ext_csd
.hpi_cmd
= MMC_SEND_STATUS
;
553 * Indicate the maximum timeout to close
554 * a command interrupted by HPI
556 card
->ext_csd
.out_of_int_time
=
557 ext_csd
[EXT_CSD_OUT_OF_INTERRUPT_TIME
] * 10;
560 card
->ext_csd
.rel_param
= ext_csd
[EXT_CSD_WR_REL_PARAM
];
561 card
->ext_csd
.rst_n_function
= ext_csd
[EXT_CSD_RST_N_FUNCTION
];
564 * RPMB regions are defined in multiples of 128K.
566 card
->ext_csd
.raw_rpmb_size_mult
= ext_csd
[EXT_CSD_RPMB_MULT
];
567 if (ext_csd
[EXT_CSD_RPMB_MULT
] && mmc_host_cmd23(card
->host
)) {
568 mmc_part_add(card
, ext_csd
[EXT_CSD_RPMB_MULT
] << 17,
569 EXT_CSD_PART_CONFIG_ACC_RPMB
,
571 MMC_BLK_DATA_AREA_RPMB
);
575 card
->ext_csd
.raw_erased_mem_count
= ext_csd
[EXT_CSD_ERASED_MEM_CONT
];
576 if (ext_csd
[EXT_CSD_ERASED_MEM_CONT
])
577 card
->erased_byte
= 0xFF;
579 card
->erased_byte
= 0x0;
581 /* eMMC v4.5 or later */
582 card
->ext_csd
.generic_cmd6_time
= DEFAULT_CMD6_TIMEOUT_MS
;
583 if (card
->ext_csd
.rev
>= 6) {
584 card
->ext_csd
.feature_support
|= MMC_DISCARD_FEATURE
;
586 card
->ext_csd
.generic_cmd6_time
= 10 *
587 ext_csd
[EXT_CSD_GENERIC_CMD6_TIME
];
588 card
->ext_csd
.power_off_longtime
= 10 *
589 ext_csd
[EXT_CSD_POWER_OFF_LONG_TIME
];
591 card
->ext_csd
.cache_size
=
592 ext_csd
[EXT_CSD_CACHE_SIZE
+ 0] << 0 |
593 ext_csd
[EXT_CSD_CACHE_SIZE
+ 1] << 8 |
594 ext_csd
[EXT_CSD_CACHE_SIZE
+ 2] << 16 |
595 ext_csd
[EXT_CSD_CACHE_SIZE
+ 3] << 24;
597 if (ext_csd
[EXT_CSD_DATA_SECTOR_SIZE
] == 1)
598 card
->ext_csd
.data_sector_size
= 4096;
600 card
->ext_csd
.data_sector_size
= 512;
602 if ((ext_csd
[EXT_CSD_DATA_TAG_SUPPORT
] & 1) &&
603 (ext_csd
[EXT_CSD_TAG_UNIT_SIZE
] <= 8)) {
604 card
->ext_csd
.data_tag_unit_size
=
605 ((unsigned int) 1 << ext_csd
[EXT_CSD_TAG_UNIT_SIZE
]) *
606 (card
->ext_csd
.data_sector_size
);
608 card
->ext_csd
.data_tag_unit_size
= 0;
611 card
->ext_csd
.max_packed_writes
=
612 ext_csd
[EXT_CSD_MAX_PACKED_WRITES
];
613 card
->ext_csd
.max_packed_reads
=
614 ext_csd
[EXT_CSD_MAX_PACKED_READS
];
616 card
->ext_csd
.data_sector_size
= 512;
619 /* eMMC v5 or later */
620 if (card
->ext_csd
.rev
>= 7) {
621 memcpy(card
->ext_csd
.fwrev
, &ext_csd
[EXT_CSD_FIRMWARE_VERSION
],
623 card
->ext_csd
.ffu_capable
=
624 (ext_csd
[EXT_CSD_SUPPORTED_MODE
] & 0x1) &&
625 !(ext_csd
[EXT_CSD_FW_CONFIG
] & 0x1);
627 card
->ext_csd
.pre_eol_info
= ext_csd
[EXT_CSD_PRE_EOL_INFO
];
628 card
->ext_csd
.device_life_time_est_typ_a
=
629 ext_csd
[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A
];
630 card
->ext_csd
.device_life_time_est_typ_b
=
631 ext_csd
[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B
];
634 /* eMMC v5.1 or later */
635 if (card
->ext_csd
.rev
>= 8) {
636 card
->ext_csd
.cmdq_support
= ext_csd
[EXT_CSD_CMDQ_SUPPORT
] &
637 EXT_CSD_CMDQ_SUPPORTED
;
638 card
->ext_csd
.cmdq_depth
= (ext_csd
[EXT_CSD_CMDQ_DEPTH
] &
639 EXT_CSD_CMDQ_DEPTH_MASK
) + 1;
640 /* Exclude inefficiently small queue depths */
641 if (card
->ext_csd
.cmdq_depth
<= 2) {
642 card
->ext_csd
.cmdq_support
= false;
643 card
->ext_csd
.cmdq_depth
= 0;
645 if (card
->ext_csd
.cmdq_support
) {
646 pr_debug("%s: Command Queue supported depth %u\n",
647 mmc_hostname(card
->host
),
648 card
->ext_csd
.cmdq_depth
);
650 card
->ext_csd
.enhanced_rpmb_supported
=
651 (card
->ext_csd
.rel_param
&
652 EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR
);
658 static int mmc_read_ext_csd(struct mmc_card
*card
)
663 if (!mmc_can_ext_csd(card
))
666 err
= mmc_get_ext_csd(card
, &ext_csd
);
668 /* If the host or the card can't do the switch,
669 * fail more gracefully. */
676 * High capacity cards should have this "magic" size
677 * stored in their CSD.
679 if (card
->csd
.capacity
== (4096 * 512)) {
680 pr_err("%s: unable to read EXT_CSD on a possible high capacity card. Card will be ignored.\n",
681 mmc_hostname(card
->host
));
683 pr_warn("%s: unable to read EXT_CSD, performance might suffer\n",
684 mmc_hostname(card
->host
));
691 err
= mmc_decode_ext_csd(card
, ext_csd
);
696 static int mmc_compare_ext_csds(struct mmc_card
*card
, unsigned bus_width
)
701 if (bus_width
== MMC_BUS_WIDTH_1
)
704 err
= mmc_get_ext_csd(card
, &bw_ext_csd
);
708 /* only compare read only fields */
709 err
= !((card
->ext_csd
.raw_partition_support
==
710 bw_ext_csd
[EXT_CSD_PARTITION_SUPPORT
]) &&
711 (card
->ext_csd
.raw_erased_mem_count
==
712 bw_ext_csd
[EXT_CSD_ERASED_MEM_CONT
]) &&
713 (card
->ext_csd
.rev
==
714 bw_ext_csd
[EXT_CSD_REV
]) &&
715 (card
->ext_csd
.raw_ext_csd_structure
==
716 bw_ext_csd
[EXT_CSD_STRUCTURE
]) &&
717 (card
->ext_csd
.raw_card_type
==
718 bw_ext_csd
[EXT_CSD_CARD_TYPE
]) &&
719 (card
->ext_csd
.raw_s_a_timeout
==
720 bw_ext_csd
[EXT_CSD_S_A_TIMEOUT
]) &&
721 (card
->ext_csd
.raw_hc_erase_gap_size
==
722 bw_ext_csd
[EXT_CSD_HC_WP_GRP_SIZE
]) &&
723 (card
->ext_csd
.raw_erase_timeout_mult
==
724 bw_ext_csd
[EXT_CSD_ERASE_TIMEOUT_MULT
]) &&
725 (card
->ext_csd
.raw_hc_erase_grp_size
==
726 bw_ext_csd
[EXT_CSD_HC_ERASE_GRP_SIZE
]) &&
727 (card
->ext_csd
.raw_sec_trim_mult
==
728 bw_ext_csd
[EXT_CSD_SEC_TRIM_MULT
]) &&
729 (card
->ext_csd
.raw_sec_erase_mult
==
730 bw_ext_csd
[EXT_CSD_SEC_ERASE_MULT
]) &&
731 (card
->ext_csd
.raw_sec_feature_support
==
732 bw_ext_csd
[EXT_CSD_SEC_FEATURE_SUPPORT
]) &&
733 (card
->ext_csd
.raw_trim_mult
==
734 bw_ext_csd
[EXT_CSD_TRIM_MULT
]) &&
735 (card
->ext_csd
.raw_sectors
[0] ==
736 bw_ext_csd
[EXT_CSD_SEC_CNT
+ 0]) &&
737 (card
->ext_csd
.raw_sectors
[1] ==
738 bw_ext_csd
[EXT_CSD_SEC_CNT
+ 1]) &&
739 (card
->ext_csd
.raw_sectors
[2] ==
740 bw_ext_csd
[EXT_CSD_SEC_CNT
+ 2]) &&
741 (card
->ext_csd
.raw_sectors
[3] ==
742 bw_ext_csd
[EXT_CSD_SEC_CNT
+ 3]) &&
743 (card
->ext_csd
.raw_pwr_cl_52_195
==
744 bw_ext_csd
[EXT_CSD_PWR_CL_52_195
]) &&
745 (card
->ext_csd
.raw_pwr_cl_26_195
==
746 bw_ext_csd
[EXT_CSD_PWR_CL_26_195
]) &&
747 (card
->ext_csd
.raw_pwr_cl_52_360
==
748 bw_ext_csd
[EXT_CSD_PWR_CL_52_360
]) &&
749 (card
->ext_csd
.raw_pwr_cl_26_360
==
750 bw_ext_csd
[EXT_CSD_PWR_CL_26_360
]) &&
751 (card
->ext_csd
.raw_pwr_cl_200_195
==
752 bw_ext_csd
[EXT_CSD_PWR_CL_200_195
]) &&
753 (card
->ext_csd
.raw_pwr_cl_200_360
==
754 bw_ext_csd
[EXT_CSD_PWR_CL_200_360
]) &&
755 (card
->ext_csd
.raw_pwr_cl_ddr_52_195
==
756 bw_ext_csd
[EXT_CSD_PWR_CL_DDR_52_195
]) &&
757 (card
->ext_csd
.raw_pwr_cl_ddr_52_360
==
758 bw_ext_csd
[EXT_CSD_PWR_CL_DDR_52_360
]) &&
759 (card
->ext_csd
.raw_pwr_cl_ddr_200_360
==
760 bw_ext_csd
[EXT_CSD_PWR_CL_DDR_200_360
]));
769 MMC_DEV_ATTR(cid
, "%08x%08x%08x%08x\n", card
->raw_cid
[0], card
->raw_cid
[1],
770 card
->raw_cid
[2], card
->raw_cid
[3]);
771 MMC_DEV_ATTR(csd
, "%08x%08x%08x%08x\n", card
->raw_csd
[0], card
->raw_csd
[1],
772 card
->raw_csd
[2], card
->raw_csd
[3]);
773 MMC_DEV_ATTR(date
, "%02d/%04d\n", card
->cid
.month
, card
->cid
.year
);
774 MMC_DEV_ATTR(erase_size
, "%u\n", card
->erase_size
<< 9);
775 MMC_DEV_ATTR(preferred_erase_size
, "%u\n", card
->pref_erase
<< 9);
776 MMC_DEV_ATTR(ffu_capable
, "%d\n", card
->ext_csd
.ffu_capable
);
777 MMC_DEV_ATTR(hwrev
, "0x%x\n", card
->cid
.hwrev
);
778 MMC_DEV_ATTR(manfid
, "0x%06x\n", card
->cid
.manfid
);
779 MMC_DEV_ATTR(name
, "%s\n", card
->cid
.prod_name
);
780 MMC_DEV_ATTR(oemid
, "0x%04x\n", card
->cid
.oemid
);
781 MMC_DEV_ATTR(prv
, "0x%x\n", card
->cid
.prv
);
782 MMC_DEV_ATTR(rev
, "0x%x\n", card
->ext_csd
.rev
);
783 MMC_DEV_ATTR(pre_eol_info
, "0x%02x\n", card
->ext_csd
.pre_eol_info
);
784 MMC_DEV_ATTR(life_time
, "0x%02x 0x%02x\n",
785 card
->ext_csd
.device_life_time_est_typ_a
,
786 card
->ext_csd
.device_life_time_est_typ_b
);
787 MMC_DEV_ATTR(serial
, "0x%08x\n", card
->cid
.serial
);
788 MMC_DEV_ATTR(enhanced_area_offset
, "%llu\n",
789 card
->ext_csd
.enhanced_area_offset
);
790 MMC_DEV_ATTR(enhanced_area_size
, "%u\n", card
->ext_csd
.enhanced_area_size
);
791 MMC_DEV_ATTR(raw_rpmb_size_mult
, "%#x\n", card
->ext_csd
.raw_rpmb_size_mult
);
792 MMC_DEV_ATTR(enhanced_rpmb_supported
, "%#x\n",
793 card
->ext_csd
.enhanced_rpmb_supported
);
794 MMC_DEV_ATTR(rel_sectors
, "%#x\n", card
->ext_csd
.rel_sectors
);
795 MMC_DEV_ATTR(ocr
, "0x%08x\n", card
->ocr
);
796 MMC_DEV_ATTR(rca
, "0x%04x\n", card
->rca
);
797 MMC_DEV_ATTR(cmdq_en
, "%d\n", card
->ext_csd
.cmdq_en
);
799 static ssize_t
mmc_fwrev_show(struct device
*dev
,
800 struct device_attribute
*attr
,
803 struct mmc_card
*card
= mmc_dev_to_card(dev
);
805 if (card
->ext_csd
.rev
< 7) {
806 return sprintf(buf
, "0x%x\n", card
->cid
.fwrev
);
808 return sprintf(buf
, "0x%*phN\n", MMC_FIRMWARE_LEN
,
809 card
->ext_csd
.fwrev
);
813 static DEVICE_ATTR(fwrev
, S_IRUGO
, mmc_fwrev_show
, NULL
);
815 static ssize_t
mmc_dsr_show(struct device
*dev
,
816 struct device_attribute
*attr
,
819 struct mmc_card
*card
= mmc_dev_to_card(dev
);
820 struct mmc_host
*host
= card
->host
;
822 if (card
->csd
.dsr_imp
&& host
->dsr_req
)
823 return sprintf(buf
, "0x%x\n", host
->dsr
);
825 /* return default DSR value */
826 return sprintf(buf
, "0x%x\n", 0x404);
829 static DEVICE_ATTR(dsr
, S_IRUGO
, mmc_dsr_show
, NULL
);
831 static struct attribute
*mmc_std_attrs
[] = {
835 &dev_attr_erase_size
.attr
,
836 &dev_attr_preferred_erase_size
.attr
,
837 &dev_attr_fwrev
.attr
,
838 &dev_attr_ffu_capable
.attr
,
839 &dev_attr_hwrev
.attr
,
840 &dev_attr_manfid
.attr
,
842 &dev_attr_oemid
.attr
,
845 &dev_attr_pre_eol_info
.attr
,
846 &dev_attr_life_time
.attr
,
847 &dev_attr_serial
.attr
,
848 &dev_attr_enhanced_area_offset
.attr
,
849 &dev_attr_enhanced_area_size
.attr
,
850 &dev_attr_raw_rpmb_size_mult
.attr
,
851 &dev_attr_enhanced_rpmb_supported
.attr
,
852 &dev_attr_rel_sectors
.attr
,
856 &dev_attr_cmdq_en
.attr
,
859 ATTRIBUTE_GROUPS(mmc_std
);
861 static struct device_type mmc_type
= {
862 .groups
= mmc_std_groups
,
866 * Select the PowerClass for the current bus width
867 * If power class is defined for 4/8 bit bus in the
868 * extended CSD register, select it by executing the
869 * mmc_switch command.
871 static int __mmc_select_powerclass(struct mmc_card
*card
,
872 unsigned int bus_width
)
874 struct mmc_host
*host
= card
->host
;
875 struct mmc_ext_csd
*ext_csd
= &card
->ext_csd
;
876 unsigned int pwrclass_val
= 0;
879 switch (1 << host
->ios
.vdd
) {
880 case MMC_VDD_165_195
:
881 if (host
->ios
.clock
<= MMC_HIGH_26_MAX_DTR
)
882 pwrclass_val
= ext_csd
->raw_pwr_cl_26_195
;
883 else if (host
->ios
.clock
<= MMC_HIGH_52_MAX_DTR
)
884 pwrclass_val
= (bus_width
<= EXT_CSD_BUS_WIDTH_8
) ?
885 ext_csd
->raw_pwr_cl_52_195
:
886 ext_csd
->raw_pwr_cl_ddr_52_195
;
887 else if (host
->ios
.clock
<= MMC_HS200_MAX_DTR
)
888 pwrclass_val
= ext_csd
->raw_pwr_cl_200_195
;
899 if (host
->ios
.clock
<= MMC_HIGH_26_MAX_DTR
)
900 pwrclass_val
= ext_csd
->raw_pwr_cl_26_360
;
901 else if (host
->ios
.clock
<= MMC_HIGH_52_MAX_DTR
)
902 pwrclass_val
= (bus_width
<= EXT_CSD_BUS_WIDTH_8
) ?
903 ext_csd
->raw_pwr_cl_52_360
:
904 ext_csd
->raw_pwr_cl_ddr_52_360
;
905 else if (host
->ios
.clock
<= MMC_HS200_MAX_DTR
)
906 pwrclass_val
= (bus_width
== EXT_CSD_DDR_BUS_WIDTH_8
) ?
907 ext_csd
->raw_pwr_cl_ddr_200_360
:
908 ext_csd
->raw_pwr_cl_200_360
;
911 pr_warn("%s: Voltage range not supported for power class\n",
916 if (bus_width
& (EXT_CSD_BUS_WIDTH_8
| EXT_CSD_DDR_BUS_WIDTH_8
))
917 pwrclass_val
= (pwrclass_val
& EXT_CSD_PWR_CL_8BIT_MASK
) >>
918 EXT_CSD_PWR_CL_8BIT_SHIFT
;
920 pwrclass_val
= (pwrclass_val
& EXT_CSD_PWR_CL_4BIT_MASK
) >>
921 EXT_CSD_PWR_CL_4BIT_SHIFT
;
923 /* If the power class is different from the default value */
924 if (pwrclass_val
> 0) {
925 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
928 card
->ext_csd
.generic_cmd6_time
);
934 static int mmc_select_powerclass(struct mmc_card
*card
)
936 struct mmc_host
*host
= card
->host
;
937 u32 bus_width
, ext_csd_bits
;
940 /* Power class selection is supported for versions >= 4.0 */
941 if (!mmc_can_ext_csd(card
))
944 bus_width
= host
->ios
.bus_width
;
945 /* Power class values are defined only for 4/8 bit bus */
946 if (bus_width
== MMC_BUS_WIDTH_1
)
949 ddr
= card
->mmc_avail_type
& EXT_CSD_CARD_TYPE_DDR_52
;
951 ext_csd_bits
= (bus_width
== MMC_BUS_WIDTH_8
) ?
952 EXT_CSD_DDR_BUS_WIDTH_8
: EXT_CSD_DDR_BUS_WIDTH_4
;
954 ext_csd_bits
= (bus_width
== MMC_BUS_WIDTH_8
) ?
955 EXT_CSD_BUS_WIDTH_8
: EXT_CSD_BUS_WIDTH_4
;
957 err
= __mmc_select_powerclass(card
, ext_csd_bits
);
959 pr_warn("%s: power class selection to bus width %d ddr %d failed\n",
960 mmc_hostname(host
), 1 << bus_width
, ddr
);
966 * Set the bus speed for the selected speed mode.
968 static void mmc_set_bus_speed(struct mmc_card
*card
)
970 unsigned int max_dtr
= (unsigned int)-1;
972 if ((mmc_card_hs200(card
) || mmc_card_hs400(card
)) &&
973 max_dtr
> card
->ext_csd
.hs200_max_dtr
)
974 max_dtr
= card
->ext_csd
.hs200_max_dtr
;
975 else if (mmc_card_hs(card
) && max_dtr
> card
->ext_csd
.hs_max_dtr
)
976 max_dtr
= card
->ext_csd
.hs_max_dtr
;
977 else if (max_dtr
> card
->csd
.max_dtr
)
978 max_dtr
= card
->csd
.max_dtr
;
980 mmc_set_clock(card
->host
, max_dtr
);
984 * Select the bus width amoung 4-bit and 8-bit(SDR).
985 * If the bus width is changed successfully, return the selected width value.
986 * Zero is returned instead of error value if the wide width is not supported.
988 static int mmc_select_bus_width(struct mmc_card
*card
)
990 static unsigned ext_csd_bits
[] = {
994 static unsigned bus_widths
[] = {
998 struct mmc_host
*host
= card
->host
;
999 unsigned idx
, bus_width
= 0;
1002 if (!mmc_can_ext_csd(card
) ||
1003 !(host
->caps
& (MMC_CAP_4_BIT_DATA
| MMC_CAP_8_BIT_DATA
)))
1006 idx
= (host
->caps
& MMC_CAP_8_BIT_DATA
) ? 0 : 1;
1009 * Unlike SD, MMC cards dont have a configuration register to notify
1010 * supported bus width. So bus test command should be run to identify
1011 * the supported bus width or compare the ext csd values of current
1012 * bus width and ext csd values of 1 bit mode read earlier.
1014 for (; idx
< ARRAY_SIZE(bus_widths
); idx
++) {
1016 * Host is capable of 8bit transfer, then switch
1017 * the device to work in 8bit transfer mode. If the
1018 * mmc switch command returns error then switch to
1019 * 4bit transfer mode. On success set the corresponding
1020 * bus width on the host.
1022 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1025 card
->ext_csd
.generic_cmd6_time
);
1029 bus_width
= bus_widths
[idx
];
1030 mmc_set_bus_width(host
, bus_width
);
1033 * If controller can't handle bus width test,
1034 * compare ext_csd previously read in 1 bit mode
1035 * against ext_csd at new bus width
1037 if (!(host
->caps
& MMC_CAP_BUS_WIDTH_TEST
))
1038 err
= mmc_compare_ext_csds(card
, bus_width
);
1040 err
= mmc_bus_test(card
, bus_width
);
1046 pr_warn("%s: switch to bus width %d failed\n",
1047 mmc_hostname(host
), 1 << bus_width
);
1055 * Switch to the high-speed mode
1057 static int mmc_select_hs(struct mmc_card
*card
)
1061 err
= __mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1062 EXT_CSD_HS_TIMING
, EXT_CSD_TIMING_HS
,
1063 card
->ext_csd
.generic_cmd6_time
, MMC_TIMING_MMC_HS
,
1066 pr_warn("%s: switch to high-speed failed, err:%d\n",
1067 mmc_hostname(card
->host
), err
);
1073 * Activate wide bus and DDR if supported.
1075 static int mmc_select_hs_ddr(struct mmc_card
*card
)
1077 struct mmc_host
*host
= card
->host
;
1078 u32 bus_width
, ext_csd_bits
;
1081 if (!(card
->mmc_avail_type
& EXT_CSD_CARD_TYPE_DDR_52
))
1084 bus_width
= host
->ios
.bus_width
;
1085 if (bus_width
== MMC_BUS_WIDTH_1
)
1088 ext_csd_bits
= (bus_width
== MMC_BUS_WIDTH_8
) ?
1089 EXT_CSD_DDR_BUS_WIDTH_8
: EXT_CSD_DDR_BUS_WIDTH_4
;
1091 err
= __mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1094 card
->ext_csd
.generic_cmd6_time
,
1095 MMC_TIMING_MMC_DDR52
,
1098 pr_err("%s: switch to bus width %d ddr failed\n",
1099 mmc_hostname(host
), 1 << bus_width
);
1104 * eMMC cards can support 3.3V to 1.2V i/o (vccq)
1107 * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
1109 * 1.8V vccq at 3.3V core voltage (vcc) is not required
1110 * in the JEDEC spec for DDR.
1112 * Even (e)MMC card can support 3.3v to 1.2v vccq, but not all
1113 * host controller can support this, like some of the SDHCI
1114 * controller which connect to an eMMC device. Some of these
1115 * host controller still needs to use 1.8v vccq for supporting
1118 * So the sequence will be:
1119 * if (host and device can both support 1.2v IO)
1121 * else if (host and device can both support 1.8v IO)
1123 * so if host and device can only support 3.3v IO, this is the
1126 * WARNING: eMMC rules are NOT the same as SD DDR
1128 if (card
->mmc_avail_type
& EXT_CSD_CARD_TYPE_DDR_1_2V
) {
1129 err
= mmc_set_signal_voltage(host
, MMC_SIGNAL_VOLTAGE_120
);
1134 if (card
->mmc_avail_type
& EXT_CSD_CARD_TYPE_DDR_1_8V
&&
1135 host
->caps
& MMC_CAP_1_8V_DDR
)
1136 err
= mmc_set_signal_voltage(host
, MMC_SIGNAL_VOLTAGE_180
);
1138 /* make sure vccq is 3.3v after switching disaster */
1140 err
= mmc_set_signal_voltage(host
, MMC_SIGNAL_VOLTAGE_330
);
1145 static int mmc_select_hs400(struct mmc_card
*card
)
1147 struct mmc_host
*host
= card
->host
;
1148 unsigned int max_dtr
;
1153 * HS400 mode requires 8-bit bus width
1155 if (!(card
->mmc_avail_type
& EXT_CSD_CARD_TYPE_HS400
&&
1156 host
->ios
.bus_width
== MMC_BUS_WIDTH_8
))
1159 /* Switch card to HS mode */
1160 val
= EXT_CSD_TIMING_HS
;
1161 err
= __mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1162 EXT_CSD_HS_TIMING
, val
,
1163 card
->ext_csd
.generic_cmd6_time
, 0,
1166 pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
1167 mmc_hostname(host
), err
);
1171 /* Prepare host to downgrade to HS timing */
1172 if (host
->ops
->hs400_downgrade
)
1173 host
->ops
->hs400_downgrade(host
);
1175 /* Set host controller to HS timing */
1176 mmc_set_timing(host
, MMC_TIMING_MMC_HS
);
1178 /* Reduce frequency to HS frequency */
1179 max_dtr
= card
->ext_csd
.hs_max_dtr
;
1180 mmc_set_clock(host
, max_dtr
);
1182 err
= mmc_switch_status(card
, true);
1186 if (host
->ops
->hs400_prepare_ddr
)
1187 host
->ops
->hs400_prepare_ddr(host
);
1189 /* Switch card to DDR */
1190 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1192 EXT_CSD_DDR_BUS_WIDTH_8
,
1193 card
->ext_csd
.generic_cmd6_time
);
1195 pr_err("%s: switch to bus width for hs400 failed, err:%d\n",
1196 mmc_hostname(host
), err
);
1200 /* Switch card to HS400 */
1201 val
= EXT_CSD_TIMING_HS400
|
1202 card
->drive_strength
<< EXT_CSD_DRV_STR_SHIFT
;
1203 err
= __mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1204 EXT_CSD_HS_TIMING
, val
,
1205 card
->ext_csd
.generic_cmd6_time
, 0,
1208 pr_err("%s: switch to hs400 failed, err:%d\n",
1209 mmc_hostname(host
), err
);
1213 /* Set host controller to HS400 timing and frequency */
1214 mmc_set_timing(host
, MMC_TIMING_MMC_HS400
);
1215 mmc_set_bus_speed(card
);
1217 if (host
->ops
->hs400_complete
)
1218 host
->ops
->hs400_complete(host
);
1220 err
= mmc_switch_status(card
, true);
1227 pr_err("%s: %s failed, error %d\n", mmc_hostname(card
->host
),
1232 int mmc_hs200_to_hs400(struct mmc_card
*card
)
1234 return mmc_select_hs400(card
);
1237 int mmc_hs400_to_hs200(struct mmc_card
*card
)
1239 struct mmc_host
*host
= card
->host
;
1240 unsigned int max_dtr
;
1244 /* Reduce frequency to HS */
1245 max_dtr
= card
->ext_csd
.hs_max_dtr
;
1246 mmc_set_clock(host
, max_dtr
);
1248 /* Switch HS400 to HS DDR */
1249 val
= EXT_CSD_TIMING_HS
;
1250 err
= __mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
, EXT_CSD_HS_TIMING
,
1251 val
, card
->ext_csd
.generic_cmd6_time
, 0,
1256 if (host
->ops
->hs400_downgrade
)
1257 host
->ops
->hs400_downgrade(host
);
1259 mmc_set_timing(host
, MMC_TIMING_MMC_DDR52
);
1261 err
= mmc_switch_status(card
, true);
1265 /* Switch HS DDR to HS */
1266 err
= __mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
, EXT_CSD_BUS_WIDTH
,
1267 EXT_CSD_BUS_WIDTH_8
, card
->ext_csd
.generic_cmd6_time
,
1272 mmc_set_timing(host
, MMC_TIMING_MMC_HS
);
1274 err
= mmc_switch_status(card
, true);
1278 /* Switch HS to HS200 */
1279 val
= EXT_CSD_TIMING_HS200
|
1280 card
->drive_strength
<< EXT_CSD_DRV_STR_SHIFT
;
1281 err
= __mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
, EXT_CSD_HS_TIMING
,
1282 val
, card
->ext_csd
.generic_cmd6_time
, 0,
1287 mmc_set_timing(host
, MMC_TIMING_MMC_HS200
);
1290 * For HS200, CRC errors are not a reliable way to know the switch
1291 * failed. If there really is a problem, we would expect tuning will
1292 * fail and the result ends up the same.
1294 err
= mmc_switch_status(card
, false);
1298 mmc_set_bus_speed(card
);
1300 /* Prepare tuning for HS400 mode. */
1301 if (host
->ops
->prepare_hs400_tuning
)
1302 host
->ops
->prepare_hs400_tuning(host
, &host
->ios
);
1307 pr_err("%s: %s failed, error %d\n", mmc_hostname(card
->host
),
1312 static void mmc_select_driver_type(struct mmc_card
*card
)
1314 int card_drv_type
, drive_strength
, drv_type
= 0;
1315 int fixed_drv_type
= card
->host
->fixed_drv_type
;
1317 card_drv_type
= card
->ext_csd
.raw_driver_strength
|
1318 mmc_driver_type_mask(0);
1320 if (fixed_drv_type
>= 0)
1321 drive_strength
= card_drv_type
& mmc_driver_type_mask(fixed_drv_type
)
1322 ? fixed_drv_type
: 0;
1324 drive_strength
= mmc_select_drive_strength(card
,
1325 card
->ext_csd
.hs200_max_dtr
,
1326 card_drv_type
, &drv_type
);
1328 card
->drive_strength
= drive_strength
;
1331 mmc_set_driver_type(card
->host
, drv_type
);
1334 static int mmc_select_hs400es(struct mmc_card
*card
)
1336 struct mmc_host
*host
= card
->host
;
1340 if (!(host
->caps
& MMC_CAP_8_BIT_DATA
)) {
1345 if (card
->mmc_avail_type
& EXT_CSD_CARD_TYPE_HS400_1_2V
)
1346 err
= mmc_set_signal_voltage(host
, MMC_SIGNAL_VOLTAGE_120
);
1348 if (err
&& card
->mmc_avail_type
& EXT_CSD_CARD_TYPE_HS400_1_8V
)
1349 err
= mmc_set_signal_voltage(host
, MMC_SIGNAL_VOLTAGE_180
);
1351 /* If fails try again during next card power cycle */
1355 err
= mmc_select_bus_width(card
);
1356 if (err
!= MMC_BUS_WIDTH_8
) {
1357 pr_err("%s: switch to 8bit bus width failed, err:%d\n",
1358 mmc_hostname(host
), err
);
1359 err
= err
< 0 ? err
: -ENOTSUPP
;
1363 /* Switch card to HS mode */
1364 err
= __mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1365 EXT_CSD_HS_TIMING
, EXT_CSD_TIMING_HS
,
1366 card
->ext_csd
.generic_cmd6_time
, 0,
1369 pr_err("%s: switch to hs for hs400es failed, err:%d\n",
1370 mmc_hostname(host
), err
);
1374 mmc_set_timing(host
, MMC_TIMING_MMC_HS
);
1375 err
= mmc_switch_status(card
, true);
1379 mmc_set_clock(host
, card
->ext_csd
.hs_max_dtr
);
1381 /* Switch card to DDR with strobe bit */
1382 val
= EXT_CSD_DDR_BUS_WIDTH_8
| EXT_CSD_BUS_WIDTH_STROBE
;
1383 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1386 card
->ext_csd
.generic_cmd6_time
);
1388 pr_err("%s: switch to bus width for hs400es failed, err:%d\n",
1389 mmc_hostname(host
), err
);
1393 mmc_select_driver_type(card
);
1395 /* Switch card to HS400 */
1396 val
= EXT_CSD_TIMING_HS400
|
1397 card
->drive_strength
<< EXT_CSD_DRV_STR_SHIFT
;
1398 err
= __mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1399 EXT_CSD_HS_TIMING
, val
,
1400 card
->ext_csd
.generic_cmd6_time
, 0,
1403 pr_err("%s: switch to hs400es failed, err:%d\n",
1404 mmc_hostname(host
), err
);
1408 /* Set host controller to HS400 timing and frequency */
1409 mmc_set_timing(host
, MMC_TIMING_MMC_HS400
);
1411 /* Controller enable enhanced strobe function */
1412 host
->ios
.enhanced_strobe
= true;
1413 if (host
->ops
->hs400_enhanced_strobe
)
1414 host
->ops
->hs400_enhanced_strobe(host
, &host
->ios
);
1416 err
= mmc_switch_status(card
, true);
1423 pr_err("%s: %s failed, error %d\n", mmc_hostname(card
->host
),
1429 * For device supporting HS200 mode, the following sequence
1430 * should be done before executing the tuning process.
1431 * 1. set the desired bus width(4-bit or 8-bit, 1-bit is not supported)
1432 * 2. switch to HS200 mode
1433 * 3. set the clock to > 52Mhz and <=200MHz
1435 static int mmc_select_hs200(struct mmc_card
*card
)
1437 struct mmc_host
*host
= card
->host
;
1438 unsigned int old_timing
, old_signal_voltage
;
1442 old_signal_voltage
= host
->ios
.signal_voltage
;
1443 if (card
->mmc_avail_type
& EXT_CSD_CARD_TYPE_HS200_1_2V
)
1444 err
= mmc_set_signal_voltage(host
, MMC_SIGNAL_VOLTAGE_120
);
1446 if (err
&& card
->mmc_avail_type
& EXT_CSD_CARD_TYPE_HS200_1_8V
)
1447 err
= mmc_set_signal_voltage(host
, MMC_SIGNAL_VOLTAGE_180
);
1449 /* If fails try again during next card power cycle */
1453 mmc_select_driver_type(card
);
1456 * Set the bus width(4 or 8) with host's support and
1457 * switch to HS200 mode if bus width is set successfully.
1459 err
= mmc_select_bus_width(card
);
1461 val
= EXT_CSD_TIMING_HS200
|
1462 card
->drive_strength
<< EXT_CSD_DRV_STR_SHIFT
;
1463 err
= __mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1464 EXT_CSD_HS_TIMING
, val
,
1465 card
->ext_csd
.generic_cmd6_time
, 0,
1469 old_timing
= host
->ios
.timing
;
1470 mmc_set_timing(host
, MMC_TIMING_MMC_HS200
);
1473 * For HS200, CRC errors are not a reliable way to know the
1474 * switch failed. If there really is a problem, we would expect
1475 * tuning will fail and the result ends up the same.
1477 err
= mmc_switch_status(card
, false);
1480 * mmc_select_timing() assumes timing has not changed if
1481 * it is a switch error.
1483 if (err
== -EBADMSG
)
1484 mmc_set_timing(host
, old_timing
);
1488 /* fall back to the old signal voltage, if fails report error */
1489 if (mmc_set_signal_voltage(host
, old_signal_voltage
))
1492 pr_err("%s: %s failed, error %d\n", mmc_hostname(card
->host
),
1499 * Activate High Speed, HS200 or HS400ES mode if supported.
1501 static int mmc_select_timing(struct mmc_card
*card
)
1505 if (!mmc_can_ext_csd(card
))
1508 if (card
->mmc_avail_type
& EXT_CSD_CARD_TYPE_HS400ES
)
1509 err
= mmc_select_hs400es(card
);
1510 else if (card
->mmc_avail_type
& EXT_CSD_CARD_TYPE_HS200
)
1511 err
= mmc_select_hs200(card
);
1512 else if (card
->mmc_avail_type
& EXT_CSD_CARD_TYPE_HS
)
1513 err
= mmc_select_hs(card
);
1515 if (err
&& err
!= -EBADMSG
)
1520 * Set the bus speed to the selected bus timing.
1521 * If timing is not selected, backward compatible is the default.
1523 mmc_set_bus_speed(card
);
1528 * Execute tuning sequence to seek the proper bus operating
1529 * conditions for HS200 and HS400, which sends CMD21 to the device.
1531 static int mmc_hs200_tuning(struct mmc_card
*card
)
1533 struct mmc_host
*host
= card
->host
;
1536 * Timing should be adjusted to the HS400 target
1537 * operation frequency for tuning process
1539 if (card
->mmc_avail_type
& EXT_CSD_CARD_TYPE_HS400
&&
1540 host
->ios
.bus_width
== MMC_BUS_WIDTH_8
)
1541 if (host
->ops
->prepare_hs400_tuning
)
1542 host
->ops
->prepare_hs400_tuning(host
, &host
->ios
);
1544 return mmc_execute_tuning(card
);
1548 * Handle the detection and initialisation of a card.
1550 * In the case of a resume, "oldcard" will contain the card
1551 * we're trying to reinitialise.
1553 static int mmc_init_card(struct mmc_host
*host
, u32 ocr
,
1554 struct mmc_card
*oldcard
)
1556 struct mmc_card
*card
;
1561 WARN_ON(!host
->claimed
);
1563 /* Set correct bus mode for MMC before attempting init */
1564 if (!mmc_host_is_spi(host
))
1565 mmc_set_bus_mode(host
, MMC_BUSMODE_OPENDRAIN
);
1568 * Since we're changing the OCR value, we seem to
1569 * need to tell some cards to go back to the idle
1570 * state. We wait 1ms to give cards time to
1572 * mmc_go_idle is needed for eMMC that are asleep
1576 /* The extra bit indicates that we support high capacity */
1577 err
= mmc_send_op_cond(host
, ocr
| (1 << 30), &rocr
);
1582 * For SPI, enable CRC as appropriate.
1584 if (mmc_host_is_spi(host
)) {
1585 err
= mmc_spi_set_crc(host
, use_spi_crc
);
1591 * Fetch CID from card.
1593 err
= mmc_send_cid(host
, cid
);
1598 if (memcmp(cid
, oldcard
->raw_cid
, sizeof(cid
)) != 0) {
1599 pr_debug("%s: Perhaps the card was replaced\n",
1600 mmc_hostname(host
));
1608 * Allocate card structure.
1610 card
= mmc_alloc_card(host
, &mmc_type
);
1612 err
= PTR_ERR(card
);
1617 card
->type
= MMC_TYPE_MMC
;
1619 memcpy(card
->raw_cid
, cid
, sizeof(card
->raw_cid
));
1623 * Call the optional HC's init_card function to handle quirks.
1625 if (host
->ops
->init_card
)
1626 host
->ops
->init_card(host
, card
);
1629 * For native busses: set card RCA and quit open drain mode.
1631 if (!mmc_host_is_spi(host
)) {
1632 err
= mmc_set_relative_addr(card
);
1636 mmc_set_bus_mode(host
, MMC_BUSMODE_PUSHPULL
);
1641 * Fetch CSD from card.
1643 err
= mmc_send_csd(card
, card
->raw_csd
);
1647 err
= mmc_decode_csd(card
);
1650 err
= mmc_decode_cid(card
);
1656 * handling only for cards supporting DSR and hosts requesting
1659 if (card
->csd
.dsr_imp
&& host
->dsr_req
)
1663 * Select card, as all following commands rely on that.
1665 if (!mmc_host_is_spi(host
)) {
1666 err
= mmc_select_card(card
);
1672 /* Read extended CSD. */
1673 err
= mmc_read_ext_csd(card
);
1678 * If doing byte addressing, check if required to do sector
1679 * addressing. Handle the case of <2GB cards needing sector
1680 * addressing. See section 8.1 JEDEC Standard JED84-A441;
1681 * ocr register has bit 30 set for sector addressing.
1684 mmc_card_set_blockaddr(card
);
1686 /* Erase size depends on CSD and Extended CSD */
1687 mmc_set_erase_size(card
);
1690 /* Enable ERASE_GRP_DEF. This bit is lost after a reset or power off. */
1691 if (card
->ext_csd
.rev
>= 3) {
1692 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1693 EXT_CSD_ERASE_GROUP_DEF
, 1,
1694 card
->ext_csd
.generic_cmd6_time
);
1696 if (err
&& err
!= -EBADMSG
)
1702 * Just disable enhanced area off & sz
1703 * will try to enable ERASE_GROUP_DEF
1704 * during next time reinit
1706 card
->ext_csd
.enhanced_area_offset
= -EINVAL
;
1707 card
->ext_csd
.enhanced_area_size
= -EINVAL
;
1709 card
->ext_csd
.erase_group_def
= 1;
1711 * enable ERASE_GRP_DEF successfully.
1712 * This will affect the erase size, so
1713 * here need to reset erase size
1715 mmc_set_erase_size(card
);
1720 * Ensure eMMC user default partition is enabled
1722 if (card
->ext_csd
.part_config
& EXT_CSD_PART_CONFIG_ACC_MASK
) {
1723 card
->ext_csd
.part_config
&= ~EXT_CSD_PART_CONFIG_ACC_MASK
;
1724 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
, EXT_CSD_PART_CONFIG
,
1725 card
->ext_csd
.part_config
,
1726 card
->ext_csd
.part_time
);
1727 if (err
&& err
!= -EBADMSG
)
1732 * Enable power_off_notification byte in the ext_csd register
1734 if (card
->ext_csd
.rev
>= 6) {
1735 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1736 EXT_CSD_POWER_OFF_NOTIFICATION
,
1738 card
->ext_csd
.generic_cmd6_time
);
1739 if (err
&& err
!= -EBADMSG
)
1743 * The err can be -EBADMSG or 0,
1744 * so check for success and update the flag
1747 card
->ext_csd
.power_off_notification
= EXT_CSD_POWER_ON
;
1751 if (mmc_can_discard(card
))
1752 card
->erase_arg
= MMC_DISCARD_ARG
;
1753 else if (mmc_can_trim(card
))
1754 card
->erase_arg
= MMC_TRIM_ARG
;
1756 card
->erase_arg
= MMC_ERASE_ARG
;
1759 * Select timing interface
1761 err
= mmc_select_timing(card
);
1765 if (mmc_card_hs200(card
)) {
1766 host
->doing_init_tune
= 1;
1768 err
= mmc_hs200_tuning(card
);
1770 err
= mmc_select_hs400(card
);
1772 host
->doing_init_tune
= 0;
1777 } else if (!mmc_card_hs400es(card
)) {
1778 /* Select the desired bus width optionally */
1779 err
= mmc_select_bus_width(card
);
1780 if (err
> 0 && mmc_card_hs(card
)) {
1781 err
= mmc_select_hs_ddr(card
);
1788 * Choose the power class with selected bus interface
1790 mmc_select_powerclass(card
);
1793 * Enable HPI feature (if supported)
1795 if (card
->ext_csd
.hpi
) {
1796 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1797 EXT_CSD_HPI_MGMT
, 1,
1798 card
->ext_csd
.generic_cmd6_time
);
1799 if (err
&& err
!= -EBADMSG
)
1802 pr_warn("%s: Enabling HPI failed\n",
1803 mmc_hostname(card
->host
));
1804 card
->ext_csd
.hpi_en
= 0;
1807 card
->ext_csd
.hpi_en
= 1;
1812 * If cache size is higher than 0, this indicates the existence of cache
1813 * and it can be turned on. Note that some eMMCs from Micron has been
1814 * reported to need ~800 ms timeout, while enabling the cache after
1815 * sudden power failure tests. Let's extend the timeout to a minimum of
1816 * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards.
1818 if (card
->ext_csd
.cache_size
> 0) {
1819 unsigned int timeout_ms
= MIN_CACHE_EN_TIMEOUT_MS
;
1821 timeout_ms
= max(card
->ext_csd
.generic_cmd6_time
, timeout_ms
);
1822 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1823 EXT_CSD_CACHE_CTRL
, 1, timeout_ms
);
1824 if (err
&& err
!= -EBADMSG
)
1828 * Only if no error, cache is turned on successfully.
1831 pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
1832 mmc_hostname(card
->host
), err
);
1833 card
->ext_csd
.cache_ctrl
= 0;
1836 card
->ext_csd
.cache_ctrl
= 1;
1841 * Enable Command Queue if supported. Note that Packed Commands cannot
1842 * be used with Command Queue.
1844 card
->ext_csd
.cmdq_en
= false;
1845 if (card
->ext_csd
.cmdq_support
&& host
->caps2
& MMC_CAP2_CQE
) {
1846 err
= mmc_cmdq_enable(card
);
1847 if (err
&& err
!= -EBADMSG
)
1850 pr_warn("%s: Enabling CMDQ failed\n",
1851 mmc_hostname(card
->host
));
1852 card
->ext_csd
.cmdq_support
= false;
1853 card
->ext_csd
.cmdq_depth
= 0;
1858 * In some cases (e.g. RPMB or mmc_test), the Command Queue must be
1859 * disabled for a time, so a flag is needed to indicate to re-enable the
1862 card
->reenable_cmdq
= card
->ext_csd
.cmdq_en
;
1864 if (host
->cqe_ops
&& !host
->cqe_enabled
) {
1865 err
= host
->cqe_ops
->cqe_enable(host
, card
);
1867 host
->cqe_enabled
= true;
1869 if (card
->ext_csd
.cmdq_en
) {
1870 pr_info("%s: Command Queue Engine enabled\n",
1871 mmc_hostname(host
));
1873 host
->hsq_enabled
= true;
1874 pr_info("%s: Host Software Queue enabled\n",
1875 mmc_hostname(host
));
1880 if (host
->caps2
& MMC_CAP2_AVOID_3_3V
&&
1881 host
->ios
.signal_voltage
== MMC_SIGNAL_VOLTAGE_330
) {
1882 pr_err("%s: Host failed to negotiate down from 3.3V\n",
1883 mmc_hostname(host
));
1895 mmc_remove_card(card
);
1900 static int mmc_can_sleep(struct mmc_card
*card
)
1902 return (card
&& card
->ext_csd
.rev
>= 3);
1905 static int mmc_sleep(struct mmc_host
*host
)
1907 struct mmc_command cmd
= {};
1908 struct mmc_card
*card
= host
->card
;
1909 unsigned int timeout_ms
= DIV_ROUND_UP(card
->ext_csd
.sa_timeout
, 10000);
1912 /* Re-tuning can't be done once the card is deselected */
1913 mmc_retune_hold(host
);
1915 err
= mmc_deselect_cards(host
);
1919 cmd
.opcode
= MMC_SLEEP_AWAKE
;
1920 cmd
.arg
= card
->rca
<< 16;
1924 * If the max_busy_timeout of the host is specified, validate it against
1925 * the sleep cmd timeout. A failure means we need to prevent the host
1926 * from doing hw busy detection, which is done by converting to a R1
1927 * response instead of a R1B. Note, some hosts requires R1B, which also
1928 * means they are on their own when it comes to deal with the busy
1931 if (!(host
->caps
& MMC_CAP_NEED_RSP_BUSY
) && host
->max_busy_timeout
&&
1932 (timeout_ms
> host
->max_busy_timeout
)) {
1933 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
1935 cmd
.flags
= MMC_RSP_R1B
| MMC_CMD_AC
;
1936 cmd
.busy_timeout
= timeout_ms
;
1939 err
= mmc_wait_for_cmd(host
, &cmd
, 0);
1944 * If the host does not wait while the card signals busy, then we will
1945 * will have to wait the sleep/awake timeout. Note, we cannot use the
1946 * SEND_STATUS command to poll the status because that command (and most
1947 * others) is invalid while the card sleeps.
1949 if (!cmd
.busy_timeout
|| !(host
->caps
& MMC_CAP_WAIT_WHILE_BUSY
))
1950 mmc_delay(timeout_ms
);
1953 mmc_retune_release(host
);
1957 static int mmc_can_poweroff_notify(const struct mmc_card
*card
)
1960 mmc_card_mmc(card
) &&
1961 (card
->ext_csd
.power_off_notification
== EXT_CSD_POWER_ON
);
1964 static int mmc_poweroff_notify(struct mmc_card
*card
, unsigned int notify_type
)
1966 unsigned int timeout
= card
->ext_csd
.generic_cmd6_time
;
1969 /* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */
1970 if (notify_type
== EXT_CSD_POWER_OFF_LONG
)
1971 timeout
= card
->ext_csd
.power_off_longtime
;
1973 err
= __mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1974 EXT_CSD_POWER_OFF_NOTIFICATION
,
1975 notify_type
, timeout
, 0, false, false);
1977 pr_err("%s: Power Off Notification timed out, %u\n",
1978 mmc_hostname(card
->host
), timeout
);
1980 /* Disable the power off notification after the switch operation. */
1981 card
->ext_csd
.power_off_notification
= EXT_CSD_NO_POWER_NOTIFICATION
;
1987 * Host is being removed. Free up the current card.
1989 static void mmc_remove(struct mmc_host
*host
)
1991 mmc_remove_card(host
->card
);
1996 * Card detection - card is alive.
1998 static int mmc_alive(struct mmc_host
*host
)
2000 return mmc_send_status(host
->card
, NULL
);
2004 * Card detection callback from host.
2006 static void mmc_detect(struct mmc_host
*host
)
2010 mmc_get_card(host
->card
, NULL
);
2013 * Just check if our card has been removed.
2015 err
= _mmc_detect_card_removed(host
);
2017 mmc_put_card(host
->card
, NULL
);
2022 mmc_claim_host(host
);
2023 mmc_detach_bus(host
);
2024 mmc_power_off(host
);
2025 mmc_release_host(host
);
2029 static int _mmc_suspend(struct mmc_host
*host
, bool is_suspend
)
2032 unsigned int notify_type
= is_suspend
? EXT_CSD_POWER_OFF_SHORT
:
2033 EXT_CSD_POWER_OFF_LONG
;
2035 mmc_claim_host(host
);
2037 if (mmc_card_suspended(host
->card
))
2040 err
= mmc_flush_cache(host
->card
);
2044 if (mmc_can_poweroff_notify(host
->card
) &&
2045 ((host
->caps2
& MMC_CAP2_FULL_PWR_CYCLE
) || !is_suspend
||
2046 (host
->caps2
& MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND
)))
2047 err
= mmc_poweroff_notify(host
->card
, notify_type
);
2048 else if (mmc_can_sleep(host
->card
))
2049 err
= mmc_sleep(host
);
2050 else if (!mmc_host_is_spi(host
))
2051 err
= mmc_deselect_cards(host
);
2054 mmc_power_off(host
);
2055 mmc_card_set_suspended(host
->card
);
2058 mmc_release_host(host
);
2065 static int mmc_suspend(struct mmc_host
*host
)
2069 err
= _mmc_suspend(host
, true);
2071 pm_runtime_disable(&host
->card
->dev
);
2072 pm_runtime_set_suspended(&host
->card
->dev
);
2079 * This function tries to determine if the same card is still present
2080 * and, if so, restore all state to it.
2082 static int _mmc_resume(struct mmc_host
*host
)
2086 mmc_claim_host(host
);
2088 if (!mmc_card_suspended(host
->card
))
2091 mmc_power_up(host
, host
->card
->ocr
);
2092 err
= mmc_init_card(host
, host
->card
->ocr
, host
->card
);
2093 mmc_card_clr_suspended(host
->card
);
2096 mmc_release_host(host
);
2103 static int mmc_shutdown(struct mmc_host
*host
)
2108 * In a specific case for poweroff notify, we need to resume the card
2109 * before we can shutdown it properly.
2111 if (mmc_can_poweroff_notify(host
->card
) &&
2112 !(host
->caps2
& MMC_CAP2_FULL_PWR_CYCLE
))
2113 err
= _mmc_resume(host
);
2116 err
= _mmc_suspend(host
, false);
2122 * Callback for resume.
2124 static int mmc_resume(struct mmc_host
*host
)
2126 pm_runtime_enable(&host
->card
->dev
);
2131 * Callback for runtime_suspend.
2133 static int mmc_runtime_suspend(struct mmc_host
*host
)
2137 if (!(host
->caps
& MMC_CAP_AGGRESSIVE_PM
))
2140 err
= _mmc_suspend(host
, true);
2142 pr_err("%s: error %d doing aggressive suspend\n",
2143 mmc_hostname(host
), err
);
2149 * Callback for runtime_resume.
2151 static int mmc_runtime_resume(struct mmc_host
*host
)
2155 err
= _mmc_resume(host
);
2156 if (err
&& err
!= -ENOMEDIUM
)
2157 pr_err("%s: error %d doing runtime resume\n",
2158 mmc_hostname(host
), err
);
2163 static int mmc_can_reset(struct mmc_card
*card
)
2167 rst_n_function
= card
->ext_csd
.rst_n_function
;
2168 if ((rst_n_function
& EXT_CSD_RST_N_EN_MASK
) != EXT_CSD_RST_N_ENABLED
)
2173 static int _mmc_hw_reset(struct mmc_host
*host
)
2175 struct mmc_card
*card
= host
->card
;
2178 * In the case of recovery, we can't expect flushing the cache to work
2179 * always, but we have a go and ignore errors.
2181 mmc_flush_cache(host
->card
);
2183 if ((host
->caps
& MMC_CAP_HW_RESET
) && host
->ops
->hw_reset
&&
2184 mmc_can_reset(card
)) {
2185 /* If the card accept RST_n signal, send it. */
2186 mmc_set_clock(host
, host
->f_init
);
2187 host
->ops
->hw_reset(host
);
2188 /* Set initial state and call mmc_set_ios */
2189 mmc_set_initial_state(host
);
2191 /* Do a brute force power cycle */
2192 mmc_power_cycle(host
, card
->ocr
);
2193 mmc_pwrseq_reset(host
);
2195 return mmc_init_card(host
, card
->ocr
, card
);
2198 static const struct mmc_bus_ops mmc_ops
= {
2199 .remove
= mmc_remove
,
2200 .detect
= mmc_detect
,
2201 .suspend
= mmc_suspend
,
2202 .resume
= mmc_resume
,
2203 .runtime_suspend
= mmc_runtime_suspend
,
2204 .runtime_resume
= mmc_runtime_resume
,
2206 .shutdown
= mmc_shutdown
,
2207 .hw_reset
= _mmc_hw_reset
,
2211 * Starting point for MMC card init.
2213 int mmc_attach_mmc(struct mmc_host
*host
)
2218 WARN_ON(!host
->claimed
);
2220 /* Set correct bus mode for MMC before attempting attach */
2221 if (!mmc_host_is_spi(host
))
2222 mmc_set_bus_mode(host
, MMC_BUSMODE_OPENDRAIN
);
2224 err
= mmc_send_op_cond(host
, 0, &ocr
);
2228 mmc_attach_bus(host
, &mmc_ops
);
2229 if (host
->ocr_avail_mmc
)
2230 host
->ocr_avail
= host
->ocr_avail_mmc
;
2233 * We need to get OCR a different way for SPI.
2235 if (mmc_host_is_spi(host
)) {
2236 err
= mmc_spi_read_ocr(host
, 1, &ocr
);
2241 rocr
= mmc_select_voltage(host
, ocr
);
2244 * Can we support the voltage of the card?
2252 * Detect and init the card.
2254 err
= mmc_init_card(host
, rocr
, NULL
);
2258 mmc_release_host(host
);
2259 err
= mmc_add_card(host
->card
);
2263 mmc_claim_host(host
);
2267 mmc_remove_card(host
->card
);
2268 mmc_claim_host(host
);
2271 mmc_detach_bus(host
);
2273 pr_err("%s: error %d whilst initialising MMC card\n",
2274 mmc_hostname(host
), err
);