Merge remote-tracking branch 'remotes/dgilbert-gitlab/tags/pull-migration-20210726a...
[qemu/armbru.git] / hw / sd / sdhci.c
blob5b8678110b05a3df380710d266dfe198a8f421dc
1 /*
2 * SD Association Host Standard Specification v2.0 controller emulation
4 * Datasheet: PartA2_SD_Host_Controller_Simplified_Specification_Ver2.00.pdf
6 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
7 * Mitsyanko Igor <i.mitsyanko@samsung.com>
8 * Peter A.G. Crosthwaite <peter.crosthwaite@petalogix.com>
10 * Based on MMC controller for Samsung S5PC1xx-based board emulation
11 * by Alexey Merkulov and Vladimir Monakhov.
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21 * See the GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License along
24 * with this program; if not, see <http://www.gnu.org/licenses/>.
27 #include "qemu/osdep.h"
28 #include "qemu/units.h"
29 #include "qemu/error-report.h"
30 #include "qapi/error.h"
31 #include "hw/irq.h"
32 #include "hw/qdev-properties.h"
33 #include "sysemu/dma.h"
34 #include "qemu/timer.h"
35 #include "qemu/bitops.h"
36 #include "hw/sd/sdhci.h"
37 #include "migration/vmstate.h"
38 #include "sdhci-internal.h"
39 #include "qemu/log.h"
40 #include "qemu/module.h"
41 #include "trace.h"
42 #include "qom/object.h"
44 #define TYPE_SDHCI_BUS "sdhci-bus"
45 /* This is reusing the SDBus typedef from SD_BUS */
46 DECLARE_INSTANCE_CHECKER(SDBus, SDHCI_BUS,
47 TYPE_SDHCI_BUS)
49 #define MASKED_WRITE(reg, mask, val) (reg = (reg & (mask)) | (val))
51 static inline unsigned int sdhci_get_fifolen(SDHCIState *s)
53 return 1 << (9 + FIELD_EX32(s->capareg, SDHC_CAPAB, MAXBLOCKLENGTH));
56 /* return true on error */
57 static bool sdhci_check_capab_freq_range(SDHCIState *s, const char *desc,
58 uint8_t freq, Error **errp)
60 if (s->sd_spec_version >= 3) {
61 return false;
63 switch (freq) {
64 case 0:
65 case 10 ... 63:
66 break;
67 default:
68 error_setg(errp, "SD %s clock frequency can have value"
69 "in range 0-63 only", desc);
70 return true;
72 return false;
75 static void sdhci_check_capareg(SDHCIState *s, Error **errp)
77 uint64_t msk = s->capareg;
78 uint32_t val;
79 bool y;
81 switch (s->sd_spec_version) {
82 case 4:
83 val = FIELD_EX64(s->capareg, SDHC_CAPAB, BUS64BIT_V4);
84 trace_sdhci_capareg("64-bit system bus (v4)", val);
85 msk = FIELD_DP64(msk, SDHC_CAPAB, BUS64BIT_V4, 0);
87 val = FIELD_EX64(s->capareg, SDHC_CAPAB, UHS_II);
88 trace_sdhci_capareg("UHS-II", val);
89 msk = FIELD_DP64(msk, SDHC_CAPAB, UHS_II, 0);
91 val = FIELD_EX64(s->capareg, SDHC_CAPAB, ADMA3);
92 trace_sdhci_capareg("ADMA3", val);
93 msk = FIELD_DP64(msk, SDHC_CAPAB, ADMA3, 0);
95 /* fallthrough */
96 case 3:
97 val = FIELD_EX64(s->capareg, SDHC_CAPAB, ASYNC_INT);
98 trace_sdhci_capareg("async interrupt", val);
99 msk = FIELD_DP64(msk, SDHC_CAPAB, ASYNC_INT, 0);
101 val = FIELD_EX64(s->capareg, SDHC_CAPAB, SLOT_TYPE);
102 if (val) {
103 error_setg(errp, "slot-type not supported");
104 return;
106 trace_sdhci_capareg("slot type", val);
107 msk = FIELD_DP64(msk, SDHC_CAPAB, SLOT_TYPE, 0);
109 if (val != 2) {
110 val = FIELD_EX64(s->capareg, SDHC_CAPAB, EMBEDDED_8BIT);
111 trace_sdhci_capareg("8-bit bus", val);
113 msk = FIELD_DP64(msk, SDHC_CAPAB, EMBEDDED_8BIT, 0);
115 val = FIELD_EX64(s->capareg, SDHC_CAPAB, BUS_SPEED);
116 trace_sdhci_capareg("bus speed mask", val);
117 msk = FIELD_DP64(msk, SDHC_CAPAB, BUS_SPEED, 0);
119 val = FIELD_EX64(s->capareg, SDHC_CAPAB, DRIVER_STRENGTH);
120 trace_sdhci_capareg("driver strength mask", val);
121 msk = FIELD_DP64(msk, SDHC_CAPAB, DRIVER_STRENGTH, 0);
123 val = FIELD_EX64(s->capareg, SDHC_CAPAB, TIMER_RETUNING);
124 trace_sdhci_capareg("timer re-tuning", val);
125 msk = FIELD_DP64(msk, SDHC_CAPAB, TIMER_RETUNING, 0);
127 val = FIELD_EX64(s->capareg, SDHC_CAPAB, SDR50_TUNING);
128 trace_sdhci_capareg("use SDR50 tuning", val);
129 msk = FIELD_DP64(msk, SDHC_CAPAB, SDR50_TUNING, 0);
131 val = FIELD_EX64(s->capareg, SDHC_CAPAB, RETUNING_MODE);
132 trace_sdhci_capareg("re-tuning mode", val);
133 msk = FIELD_DP64(msk, SDHC_CAPAB, RETUNING_MODE, 0);
135 val = FIELD_EX64(s->capareg, SDHC_CAPAB, CLOCK_MULT);
136 trace_sdhci_capareg("clock multiplier", val);
137 msk = FIELD_DP64(msk, SDHC_CAPAB, CLOCK_MULT, 0);
139 /* fallthrough */
140 case 2: /* default version */
141 val = FIELD_EX64(s->capareg, SDHC_CAPAB, ADMA2);
142 trace_sdhci_capareg("ADMA2", val);
143 msk = FIELD_DP64(msk, SDHC_CAPAB, ADMA2, 0);
145 val = FIELD_EX64(s->capareg, SDHC_CAPAB, ADMA1);
146 trace_sdhci_capareg("ADMA1", val);
147 msk = FIELD_DP64(msk, SDHC_CAPAB, ADMA1, 0);
149 val = FIELD_EX64(s->capareg, SDHC_CAPAB, BUS64BIT);
150 trace_sdhci_capareg("64-bit system bus (v3)", val);
151 msk = FIELD_DP64(msk, SDHC_CAPAB, BUS64BIT, 0);
153 /* fallthrough */
154 case 1:
155 y = FIELD_EX64(s->capareg, SDHC_CAPAB, TOUNIT);
156 msk = FIELD_DP64(msk, SDHC_CAPAB, TOUNIT, 0);
158 val = FIELD_EX64(s->capareg, SDHC_CAPAB, TOCLKFREQ);
159 trace_sdhci_capareg(y ? "timeout (MHz)" : "Timeout (KHz)", val);
160 if (sdhci_check_capab_freq_range(s, "timeout", val, errp)) {
161 return;
163 msk = FIELD_DP64(msk, SDHC_CAPAB, TOCLKFREQ, 0);
165 val = FIELD_EX64(s->capareg, SDHC_CAPAB, BASECLKFREQ);
166 trace_sdhci_capareg(y ? "base (MHz)" : "Base (KHz)", val);
167 if (sdhci_check_capab_freq_range(s, "base", val, errp)) {
168 return;
170 msk = FIELD_DP64(msk, SDHC_CAPAB, BASECLKFREQ, 0);
172 val = FIELD_EX64(s->capareg, SDHC_CAPAB, MAXBLOCKLENGTH);
173 if (val >= 3) {
174 error_setg(errp, "block size can be 512, 1024 or 2048 only");
175 return;
177 trace_sdhci_capareg("max block length", sdhci_get_fifolen(s));
178 msk = FIELD_DP64(msk, SDHC_CAPAB, MAXBLOCKLENGTH, 0);
180 val = FIELD_EX64(s->capareg, SDHC_CAPAB, HIGHSPEED);
181 trace_sdhci_capareg("high speed", val);
182 msk = FIELD_DP64(msk, SDHC_CAPAB, HIGHSPEED, 0);
184 val = FIELD_EX64(s->capareg, SDHC_CAPAB, SDMA);
185 trace_sdhci_capareg("SDMA", val);
186 msk = FIELD_DP64(msk, SDHC_CAPAB, SDMA, 0);
188 val = FIELD_EX64(s->capareg, SDHC_CAPAB, SUSPRESUME);
189 trace_sdhci_capareg("suspend/resume", val);
190 msk = FIELD_DP64(msk, SDHC_CAPAB, SUSPRESUME, 0);
192 val = FIELD_EX64(s->capareg, SDHC_CAPAB, V33);
193 trace_sdhci_capareg("3.3v", val);
194 msk = FIELD_DP64(msk, SDHC_CAPAB, V33, 0);
196 val = FIELD_EX64(s->capareg, SDHC_CAPAB, V30);
197 trace_sdhci_capareg("3.0v", val);
198 msk = FIELD_DP64(msk, SDHC_CAPAB, V30, 0);
200 val = FIELD_EX64(s->capareg, SDHC_CAPAB, V18);
201 trace_sdhci_capareg("1.8v", val);
202 msk = FIELD_DP64(msk, SDHC_CAPAB, V18, 0);
203 break;
205 default:
206 error_setg(errp, "Unsupported spec version: %u", s->sd_spec_version);
208 if (msk) {
209 qemu_log_mask(LOG_UNIMP,
210 "SDHCI: unknown CAPAB mask: 0x%016" PRIx64 "\n", msk);
214 static uint8_t sdhci_slotint(SDHCIState *s)
216 return (s->norintsts & s->norintsigen) || (s->errintsts & s->errintsigen) ||
217 ((s->norintsts & SDHC_NIS_INSERT) && (s->wakcon & SDHC_WKUP_ON_INS)) ||
218 ((s->norintsts & SDHC_NIS_REMOVE) && (s->wakcon & SDHC_WKUP_ON_RMV));
221 /* Return true if IRQ was pending and delivered */
222 static bool sdhci_update_irq(SDHCIState *s)
224 bool pending = sdhci_slotint(s);
226 qemu_set_irq(s->irq, pending);
228 return pending;
231 static void sdhci_raise_insertion_irq(void *opaque)
233 SDHCIState *s = (SDHCIState *)opaque;
235 if (s->norintsts & SDHC_NIS_REMOVE) {
236 timer_mod(s->insert_timer,
237 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_INSERTION_DELAY);
238 } else {
239 s->prnsts = 0x1ff0000;
240 if (s->norintstsen & SDHC_NISEN_INSERT) {
241 s->norintsts |= SDHC_NIS_INSERT;
243 sdhci_update_irq(s);
247 static void sdhci_set_inserted(DeviceState *dev, bool level)
249 SDHCIState *s = (SDHCIState *)dev;
251 trace_sdhci_set_inserted(level ? "insert" : "eject");
252 if ((s->norintsts & SDHC_NIS_REMOVE) && level) {
253 /* Give target some time to notice card ejection */
254 timer_mod(s->insert_timer,
255 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_INSERTION_DELAY);
256 } else {
257 if (level) {
258 s->prnsts = 0x1ff0000;
259 if (s->norintstsen & SDHC_NISEN_INSERT) {
260 s->norintsts |= SDHC_NIS_INSERT;
262 } else {
263 s->prnsts = 0x1fa0000;
264 s->pwrcon &= ~SDHC_POWER_ON;
265 s->clkcon &= ~SDHC_CLOCK_SDCLK_EN;
266 if (s->norintstsen & SDHC_NISEN_REMOVE) {
267 s->norintsts |= SDHC_NIS_REMOVE;
270 sdhci_update_irq(s);
274 static void sdhci_set_readonly(DeviceState *dev, bool level)
276 SDHCIState *s = (SDHCIState *)dev;
278 if (level) {
279 s->prnsts &= ~SDHC_WRITE_PROTECT;
280 } else {
281 /* Write enabled */
282 s->prnsts |= SDHC_WRITE_PROTECT;
286 static void sdhci_reset(SDHCIState *s)
288 DeviceState *dev = DEVICE(s);
290 timer_del(s->insert_timer);
291 timer_del(s->transfer_timer);
293 /* Set all registers to 0. Capabilities/Version registers are not cleared
294 * and assumed to always preserve their value, given to them during
295 * initialization */
296 memset(&s->sdmasysad, 0, (uintptr_t)&s->capareg - (uintptr_t)&s->sdmasysad);
298 /* Reset other state based on current card insertion/readonly status */
299 sdhci_set_inserted(dev, sdbus_get_inserted(&s->sdbus));
300 sdhci_set_readonly(dev, sdbus_get_readonly(&s->sdbus));
302 s->data_count = 0;
303 s->stopped_state = sdhc_not_stopped;
304 s->pending_insert_state = false;
307 static void sdhci_poweron_reset(DeviceState *dev)
309 /* QOM (ie power-on) reset. This is identical to reset
310 * commanded via device register apart from handling of the
311 * 'pending insert on powerup' quirk.
313 SDHCIState *s = (SDHCIState *)dev;
315 sdhci_reset(s);
317 if (s->pending_insert_quirk) {
318 s->pending_insert_state = true;
322 static void sdhci_data_transfer(void *opaque);
324 static void sdhci_send_command(SDHCIState *s)
326 SDRequest request;
327 uint8_t response[16];
328 int rlen;
329 bool timeout = false;
331 s->errintsts = 0;
332 s->acmd12errsts = 0;
333 request.cmd = s->cmdreg >> 8;
334 request.arg = s->argument;
336 trace_sdhci_send_command(request.cmd, request.arg);
337 rlen = sdbus_do_command(&s->sdbus, &request, response);
339 if (s->cmdreg & SDHC_CMD_RESPONSE) {
340 if (rlen == 4) {
341 s->rspreg[0] = ldl_be_p(response);
342 s->rspreg[1] = s->rspreg[2] = s->rspreg[3] = 0;
343 trace_sdhci_response4(s->rspreg[0]);
344 } else if (rlen == 16) {
345 s->rspreg[0] = ldl_be_p(&response[11]);
346 s->rspreg[1] = ldl_be_p(&response[7]);
347 s->rspreg[2] = ldl_be_p(&response[3]);
348 s->rspreg[3] = (response[0] << 16) | (response[1] << 8) |
349 response[2];
350 trace_sdhci_response16(s->rspreg[3], s->rspreg[2],
351 s->rspreg[1], s->rspreg[0]);
352 } else {
353 timeout = true;
354 trace_sdhci_error("timeout waiting for command response");
355 if (s->errintstsen & SDHC_EISEN_CMDTIMEOUT) {
356 s->errintsts |= SDHC_EIS_CMDTIMEOUT;
357 s->norintsts |= SDHC_NIS_ERR;
361 if (!(s->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
362 (s->norintstsen & SDHC_NISEN_TRSCMP) &&
363 (s->cmdreg & SDHC_CMD_RESPONSE) == SDHC_CMD_RSP_WITH_BUSY) {
364 s->norintsts |= SDHC_NIS_TRSCMP;
368 if (s->norintstsen & SDHC_NISEN_CMDCMP) {
369 s->norintsts |= SDHC_NIS_CMDCMP;
372 sdhci_update_irq(s);
374 if (!timeout && s->blksize && (s->cmdreg & SDHC_CMD_DATA_PRESENT)) {
375 s->data_count = 0;
376 sdhci_data_transfer(s);
380 static void sdhci_end_transfer(SDHCIState *s)
382 /* Automatically send CMD12 to stop transfer if AutoCMD12 enabled */
383 if ((s->trnmod & SDHC_TRNS_ACMD12) != 0) {
384 SDRequest request;
385 uint8_t response[16];
387 request.cmd = 0x0C;
388 request.arg = 0;
389 trace_sdhci_end_transfer(request.cmd, request.arg);
390 sdbus_do_command(&s->sdbus, &request, response);
391 /* Auto CMD12 response goes to the upper Response register */
392 s->rspreg[3] = ldl_be_p(response);
395 s->prnsts &= ~(SDHC_DOING_READ | SDHC_DOING_WRITE |
396 SDHC_DAT_LINE_ACTIVE | SDHC_DATA_INHIBIT |
397 SDHC_SPACE_AVAILABLE | SDHC_DATA_AVAILABLE);
399 if (s->norintstsen & SDHC_NISEN_TRSCMP) {
400 s->norintsts |= SDHC_NIS_TRSCMP;
403 sdhci_update_irq(s);
407 * Programmed i/o data transfer
409 #define BLOCK_SIZE_MASK (4 * KiB - 1)
411 /* Fill host controller's read buffer with BLKSIZE bytes of data from card */
412 static void sdhci_read_block_from_card(SDHCIState *s)
414 const uint16_t blk_size = s->blksize & BLOCK_SIZE_MASK;
416 if ((s->trnmod & SDHC_TRNS_MULTI) &&
417 (s->trnmod & SDHC_TRNS_BLK_CNT_EN) && (s->blkcnt == 0)) {
418 return;
421 if (!FIELD_EX32(s->hostctl2, SDHC_HOSTCTL2, EXECUTE_TUNING)) {
422 /* Device is not in tuning */
423 sdbus_read_data(&s->sdbus, s->fifo_buffer, blk_size);
426 if (FIELD_EX32(s->hostctl2, SDHC_HOSTCTL2, EXECUTE_TUNING)) {
427 /* Device is in tuning */
428 s->hostctl2 &= ~R_SDHC_HOSTCTL2_EXECUTE_TUNING_MASK;
429 s->hostctl2 |= R_SDHC_HOSTCTL2_SAMPLING_CLKSEL_MASK;
430 s->prnsts &= ~(SDHC_DAT_LINE_ACTIVE | SDHC_DOING_READ |
431 SDHC_DATA_INHIBIT);
432 goto read_done;
435 /* New data now available for READ through Buffer Port Register */
436 s->prnsts |= SDHC_DATA_AVAILABLE;
437 if (s->norintstsen & SDHC_NISEN_RBUFRDY) {
438 s->norintsts |= SDHC_NIS_RBUFRDY;
441 /* Clear DAT line active status if that was the last block */
442 if ((s->trnmod & SDHC_TRNS_MULTI) == 0 ||
443 ((s->trnmod & SDHC_TRNS_MULTI) && s->blkcnt == 1)) {
444 s->prnsts &= ~SDHC_DAT_LINE_ACTIVE;
447 /* If stop at block gap request was set and it's not the last block of
448 * data - generate Block Event interrupt */
449 if (s->stopped_state == sdhc_gap_read && (s->trnmod & SDHC_TRNS_MULTI) &&
450 s->blkcnt != 1) {
451 s->prnsts &= ~SDHC_DAT_LINE_ACTIVE;
452 if (s->norintstsen & SDHC_EISEN_BLKGAP) {
453 s->norintsts |= SDHC_EIS_BLKGAP;
457 read_done:
458 sdhci_update_irq(s);
461 /* Read @size byte of data from host controller @s BUFFER DATA PORT register */
462 static uint32_t sdhci_read_dataport(SDHCIState *s, unsigned size)
464 uint32_t value = 0;
465 int i;
467 /* first check that a valid data exists in host controller input buffer */
468 if ((s->prnsts & SDHC_DATA_AVAILABLE) == 0) {
469 trace_sdhci_error("read from empty buffer");
470 return 0;
473 for (i = 0; i < size; i++) {
474 value |= s->fifo_buffer[s->data_count] << i * 8;
475 s->data_count++;
476 /* check if we've read all valid data (blksize bytes) from buffer */
477 if ((s->data_count) >= (s->blksize & BLOCK_SIZE_MASK)) {
478 trace_sdhci_read_dataport(s->data_count);
479 s->prnsts &= ~SDHC_DATA_AVAILABLE; /* no more data in a buffer */
480 s->data_count = 0; /* next buff read must start at position [0] */
482 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) {
483 s->blkcnt--;
486 /* if that was the last block of data */
487 if ((s->trnmod & SDHC_TRNS_MULTI) == 0 ||
488 ((s->trnmod & SDHC_TRNS_BLK_CNT_EN) && (s->blkcnt == 0)) ||
489 /* stop at gap request */
490 (s->stopped_state == sdhc_gap_read &&
491 !(s->prnsts & SDHC_DAT_LINE_ACTIVE))) {
492 sdhci_end_transfer(s);
493 } else { /* if there are more data, read next block from card */
494 sdhci_read_block_from_card(s);
496 break;
500 return value;
503 /* Write data from host controller FIFO to card */
504 static void sdhci_write_block_to_card(SDHCIState *s)
506 if (s->prnsts & SDHC_SPACE_AVAILABLE) {
507 if (s->norintstsen & SDHC_NISEN_WBUFRDY) {
508 s->norintsts |= SDHC_NIS_WBUFRDY;
510 sdhci_update_irq(s);
511 return;
514 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) {
515 if (s->blkcnt == 0) {
516 return;
517 } else {
518 s->blkcnt--;
522 sdbus_write_data(&s->sdbus, s->fifo_buffer, s->blksize & BLOCK_SIZE_MASK);
524 /* Next data can be written through BUFFER DATORT register */
525 s->prnsts |= SDHC_SPACE_AVAILABLE;
527 /* Finish transfer if that was the last block of data */
528 if ((s->trnmod & SDHC_TRNS_MULTI) == 0 ||
529 ((s->trnmod & SDHC_TRNS_MULTI) &&
530 (s->trnmod & SDHC_TRNS_BLK_CNT_EN) && (s->blkcnt == 0))) {
531 sdhci_end_transfer(s);
532 } else if (s->norintstsen & SDHC_NISEN_WBUFRDY) {
533 s->norintsts |= SDHC_NIS_WBUFRDY;
536 /* Generate Block Gap Event if requested and if not the last block */
537 if (s->stopped_state == sdhc_gap_write && (s->trnmod & SDHC_TRNS_MULTI) &&
538 s->blkcnt > 0) {
539 s->prnsts &= ~SDHC_DOING_WRITE;
540 if (s->norintstsen & SDHC_EISEN_BLKGAP) {
541 s->norintsts |= SDHC_EIS_BLKGAP;
543 sdhci_end_transfer(s);
546 sdhci_update_irq(s);
549 /* Write @size bytes of @value data to host controller @s Buffer Data Port
550 * register */
551 static void sdhci_write_dataport(SDHCIState *s, uint32_t value, unsigned size)
553 unsigned i;
555 /* Check that there is free space left in a buffer */
556 if (!(s->prnsts & SDHC_SPACE_AVAILABLE)) {
557 trace_sdhci_error("Can't write to data buffer: buffer full");
558 return;
561 for (i = 0; i < size; i++) {
562 s->fifo_buffer[s->data_count] = value & 0xFF;
563 s->data_count++;
564 value >>= 8;
565 if (s->data_count >= (s->blksize & BLOCK_SIZE_MASK)) {
566 trace_sdhci_write_dataport(s->data_count);
567 s->data_count = 0;
568 s->prnsts &= ~SDHC_SPACE_AVAILABLE;
569 if (s->prnsts & SDHC_DOING_WRITE) {
570 sdhci_write_block_to_card(s);
577 * Single DMA data transfer
580 /* Multi block SDMA transfer */
581 static void sdhci_sdma_transfer_multi_blocks(SDHCIState *s)
583 bool page_aligned = false;
584 unsigned int begin;
585 const uint16_t block_size = s->blksize & BLOCK_SIZE_MASK;
586 uint32_t boundary_chk = 1 << (((s->blksize & ~BLOCK_SIZE_MASK) >> 12) + 12);
587 uint32_t boundary_count = boundary_chk - (s->sdmasysad % boundary_chk);
589 if (!(s->trnmod & SDHC_TRNS_BLK_CNT_EN) || !s->blkcnt) {
590 qemu_log_mask(LOG_UNIMP, "infinite transfer is not supported\n");
591 return;
594 /* XXX: Some sd/mmc drivers (for example, u-boot-slp) do not account for
595 * possible stop at page boundary if initial address is not page aligned,
596 * allow them to work properly */
597 if ((s->sdmasysad % boundary_chk) == 0) {
598 page_aligned = true;
601 s->prnsts |= SDHC_DATA_INHIBIT | SDHC_DAT_LINE_ACTIVE;
602 if (s->trnmod & SDHC_TRNS_READ) {
603 s->prnsts |= SDHC_DOING_READ;
604 while (s->blkcnt) {
605 if (s->data_count == 0) {
606 sdbus_read_data(&s->sdbus, s->fifo_buffer, block_size);
608 begin = s->data_count;
609 if (((boundary_count + begin) < block_size) && page_aligned) {
610 s->data_count = boundary_count + begin;
611 boundary_count = 0;
612 } else {
613 s->data_count = block_size;
614 boundary_count -= block_size - begin;
615 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) {
616 s->blkcnt--;
619 dma_memory_write(s->dma_as, s->sdmasysad,
620 &s->fifo_buffer[begin], s->data_count - begin);
621 s->sdmasysad += s->data_count - begin;
622 if (s->data_count == block_size) {
623 s->data_count = 0;
625 if (page_aligned && boundary_count == 0) {
626 break;
629 } else {
630 s->prnsts |= SDHC_DOING_WRITE;
631 while (s->blkcnt) {
632 begin = s->data_count;
633 if (((boundary_count + begin) < block_size) && page_aligned) {
634 s->data_count = boundary_count + begin;
635 boundary_count = 0;
636 } else {
637 s->data_count = block_size;
638 boundary_count -= block_size - begin;
640 dma_memory_read(s->dma_as, s->sdmasysad,
641 &s->fifo_buffer[begin], s->data_count - begin);
642 s->sdmasysad += s->data_count - begin;
643 if (s->data_count == block_size) {
644 sdbus_write_data(&s->sdbus, s->fifo_buffer, block_size);
645 s->data_count = 0;
646 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) {
647 s->blkcnt--;
650 if (page_aligned && boundary_count == 0) {
651 break;
656 if (s->blkcnt == 0) {
657 sdhci_end_transfer(s);
658 } else {
659 if (s->norintstsen & SDHC_NISEN_DMA) {
660 s->norintsts |= SDHC_NIS_DMA;
662 sdhci_update_irq(s);
666 /* single block SDMA transfer */
667 static void sdhci_sdma_transfer_single_block(SDHCIState *s)
669 uint32_t datacnt = s->blksize & BLOCK_SIZE_MASK;
671 if (s->trnmod & SDHC_TRNS_READ) {
672 sdbus_read_data(&s->sdbus, s->fifo_buffer, datacnt);
673 dma_memory_write(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt);
674 } else {
675 dma_memory_read(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt);
676 sdbus_write_data(&s->sdbus, s->fifo_buffer, datacnt);
678 s->blkcnt--;
680 sdhci_end_transfer(s);
683 typedef struct ADMADescr {
684 hwaddr addr;
685 uint16_t length;
686 uint8_t attr;
687 uint8_t incr;
688 } ADMADescr;
690 static void get_adma_description(SDHCIState *s, ADMADescr *dscr)
692 uint32_t adma1 = 0;
693 uint64_t adma2 = 0;
694 hwaddr entry_addr = (hwaddr)s->admasysaddr;
695 switch (SDHC_DMA_TYPE(s->hostctl1)) {
696 case SDHC_CTRL_ADMA2_32:
697 dma_memory_read(s->dma_as, entry_addr, &adma2, sizeof(adma2));
698 adma2 = le64_to_cpu(adma2);
699 /* The spec does not specify endianness of descriptor table.
700 * We currently assume that it is LE.
702 dscr->addr = (hwaddr)extract64(adma2, 32, 32) & ~0x3ull;
703 dscr->length = (uint16_t)extract64(adma2, 16, 16);
704 dscr->attr = (uint8_t)extract64(adma2, 0, 7);
705 dscr->incr = 8;
706 break;
707 case SDHC_CTRL_ADMA1_32:
708 dma_memory_read(s->dma_as, entry_addr, &adma1, sizeof(adma1));
709 adma1 = le32_to_cpu(adma1);
710 dscr->addr = (hwaddr)(adma1 & 0xFFFFF000);
711 dscr->attr = (uint8_t)extract32(adma1, 0, 7);
712 dscr->incr = 4;
713 if ((dscr->attr & SDHC_ADMA_ATTR_ACT_MASK) == SDHC_ADMA_ATTR_SET_LEN) {
714 dscr->length = (uint16_t)extract32(adma1, 12, 16);
715 } else {
716 dscr->length = 4 * KiB;
718 break;
719 case SDHC_CTRL_ADMA2_64:
720 dma_memory_read(s->dma_as, entry_addr, &dscr->attr, 1);
721 dma_memory_read(s->dma_as, entry_addr + 2, &dscr->length, 2);
722 dscr->length = le16_to_cpu(dscr->length);
723 dma_memory_read(s->dma_as, entry_addr + 4, &dscr->addr, 8);
724 dscr->addr = le64_to_cpu(dscr->addr);
725 dscr->attr &= (uint8_t) ~0xC0;
726 dscr->incr = 12;
727 break;
731 /* Advanced DMA data transfer */
733 static void sdhci_do_adma(SDHCIState *s)
735 unsigned int begin, length;
736 const uint16_t block_size = s->blksize & BLOCK_SIZE_MASK;
737 ADMADescr dscr = {};
738 int i;
740 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN && !s->blkcnt) {
741 /* Stop Multiple Transfer */
742 sdhci_end_transfer(s);
743 return;
746 for (i = 0; i < SDHC_ADMA_DESCS_PER_DELAY; ++i) {
747 s->admaerr &= ~SDHC_ADMAERR_LENGTH_MISMATCH;
749 get_adma_description(s, &dscr);
750 trace_sdhci_adma_loop(dscr.addr, dscr.length, dscr.attr);
752 if ((dscr.attr & SDHC_ADMA_ATTR_VALID) == 0) {
753 /* Indicate that error occurred in ST_FDS state */
754 s->admaerr &= ~SDHC_ADMAERR_STATE_MASK;
755 s->admaerr |= SDHC_ADMAERR_STATE_ST_FDS;
757 /* Generate ADMA error interrupt */
758 if (s->errintstsen & SDHC_EISEN_ADMAERR) {
759 s->errintsts |= SDHC_EIS_ADMAERR;
760 s->norintsts |= SDHC_NIS_ERR;
763 sdhci_update_irq(s);
764 return;
767 length = dscr.length ? dscr.length : 64 * KiB;
769 switch (dscr.attr & SDHC_ADMA_ATTR_ACT_MASK) {
770 case SDHC_ADMA_ATTR_ACT_TRAN: /* data transfer */
771 s->prnsts |= SDHC_DATA_INHIBIT | SDHC_DAT_LINE_ACTIVE;
772 if (s->trnmod & SDHC_TRNS_READ) {
773 s->prnsts |= SDHC_DOING_READ;
774 while (length) {
775 if (s->data_count == 0) {
776 sdbus_read_data(&s->sdbus, s->fifo_buffer, block_size);
778 begin = s->data_count;
779 if ((length + begin) < block_size) {
780 s->data_count = length + begin;
781 length = 0;
782 } else {
783 s->data_count = block_size;
784 length -= block_size - begin;
786 dma_memory_write(s->dma_as, dscr.addr,
787 &s->fifo_buffer[begin],
788 s->data_count - begin);
789 dscr.addr += s->data_count - begin;
790 if (s->data_count == block_size) {
791 s->data_count = 0;
792 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) {
793 s->blkcnt--;
794 if (s->blkcnt == 0) {
795 break;
800 } else {
801 s->prnsts |= SDHC_DOING_WRITE;
802 while (length) {
803 begin = s->data_count;
804 if ((length + begin) < block_size) {
805 s->data_count = length + begin;
806 length = 0;
807 } else {
808 s->data_count = block_size;
809 length -= block_size - begin;
811 dma_memory_read(s->dma_as, dscr.addr,
812 &s->fifo_buffer[begin],
813 s->data_count - begin);
814 dscr.addr += s->data_count - begin;
815 if (s->data_count == block_size) {
816 sdbus_write_data(&s->sdbus, s->fifo_buffer, block_size);
817 s->data_count = 0;
818 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) {
819 s->blkcnt--;
820 if (s->blkcnt == 0) {
821 break;
827 s->admasysaddr += dscr.incr;
828 break;
829 case SDHC_ADMA_ATTR_ACT_LINK: /* link to next descriptor table */
830 s->admasysaddr = dscr.addr;
831 trace_sdhci_adma("link", s->admasysaddr);
832 break;
833 default:
834 s->admasysaddr += dscr.incr;
835 break;
838 if (dscr.attr & SDHC_ADMA_ATTR_INT) {
839 trace_sdhci_adma("interrupt", s->admasysaddr);
840 if (s->norintstsen & SDHC_NISEN_DMA) {
841 s->norintsts |= SDHC_NIS_DMA;
844 if (sdhci_update_irq(s) && !(dscr.attr & SDHC_ADMA_ATTR_END)) {
845 /* IRQ delivered, reschedule current transfer */
846 break;
850 /* ADMA transfer terminates if blkcnt == 0 or by END attribute */
851 if (((s->trnmod & SDHC_TRNS_BLK_CNT_EN) &&
852 (s->blkcnt == 0)) || (dscr.attr & SDHC_ADMA_ATTR_END)) {
853 trace_sdhci_adma_transfer_completed();
854 if (length || ((dscr.attr & SDHC_ADMA_ATTR_END) &&
855 (s->trnmod & SDHC_TRNS_BLK_CNT_EN) &&
856 s->blkcnt != 0)) {
857 trace_sdhci_error("SD/MMC host ADMA length mismatch");
858 s->admaerr |= SDHC_ADMAERR_LENGTH_MISMATCH |
859 SDHC_ADMAERR_STATE_ST_TFR;
860 if (s->errintstsen & SDHC_EISEN_ADMAERR) {
861 trace_sdhci_error("Set ADMA error flag");
862 s->errintsts |= SDHC_EIS_ADMAERR;
863 s->norintsts |= SDHC_NIS_ERR;
866 sdhci_update_irq(s);
868 sdhci_end_transfer(s);
869 return;
874 /* we have unfinished business - reschedule to continue ADMA */
875 timer_mod(s->transfer_timer,
876 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_TRANSFER_DELAY);
879 /* Perform data transfer according to controller configuration */
881 static void sdhci_data_transfer(void *opaque)
883 SDHCIState *s = (SDHCIState *)opaque;
885 if (s->trnmod & SDHC_TRNS_DMA) {
886 switch (SDHC_DMA_TYPE(s->hostctl1)) {
887 case SDHC_CTRL_SDMA:
888 if ((s->blkcnt == 1) || !(s->trnmod & SDHC_TRNS_MULTI)) {
889 sdhci_sdma_transfer_single_block(s);
890 } else {
891 sdhci_sdma_transfer_multi_blocks(s);
894 break;
895 case SDHC_CTRL_ADMA1_32:
896 if (!(s->capareg & R_SDHC_CAPAB_ADMA1_MASK)) {
897 trace_sdhci_error("ADMA1 not supported");
898 break;
901 sdhci_do_adma(s);
902 break;
903 case SDHC_CTRL_ADMA2_32:
904 if (!(s->capareg & R_SDHC_CAPAB_ADMA2_MASK)) {
905 trace_sdhci_error("ADMA2 not supported");
906 break;
909 sdhci_do_adma(s);
910 break;
911 case SDHC_CTRL_ADMA2_64:
912 if (!(s->capareg & R_SDHC_CAPAB_ADMA2_MASK) ||
913 !(s->capareg & R_SDHC_CAPAB_BUS64BIT_MASK)) {
914 trace_sdhci_error("64 bit ADMA not supported");
915 break;
918 sdhci_do_adma(s);
919 break;
920 default:
921 trace_sdhci_error("Unsupported DMA type");
922 break;
924 } else {
925 if ((s->trnmod & SDHC_TRNS_READ) && sdbus_data_ready(&s->sdbus)) {
926 s->prnsts |= SDHC_DOING_READ | SDHC_DATA_INHIBIT |
927 SDHC_DAT_LINE_ACTIVE;
928 sdhci_read_block_from_card(s);
929 } else {
930 s->prnsts |= SDHC_DOING_WRITE | SDHC_DAT_LINE_ACTIVE |
931 SDHC_SPACE_AVAILABLE | SDHC_DATA_INHIBIT;
932 sdhci_write_block_to_card(s);
937 static bool sdhci_can_issue_command(SDHCIState *s)
939 if (!SDHC_CLOCK_IS_ON(s->clkcon) ||
940 (((s->prnsts & SDHC_DATA_INHIBIT) || s->stopped_state) &&
941 ((s->cmdreg & SDHC_CMD_DATA_PRESENT) ||
942 ((s->cmdreg & SDHC_CMD_RESPONSE) == SDHC_CMD_RSP_WITH_BUSY &&
943 !(SDHC_COMMAND_TYPE(s->cmdreg) == SDHC_CMD_ABORT))))) {
944 return false;
947 return true;
950 /* The Buffer Data Port register must be accessed in sequential and
951 * continuous manner */
952 static inline bool
953 sdhci_buff_access_is_sequential(SDHCIState *s, unsigned byte_num)
955 if ((s->data_count & 0x3) != byte_num) {
956 trace_sdhci_error("Non-sequential access to Buffer Data Port register"
957 "is prohibited\n");
958 return false;
960 return true;
963 static void sdhci_resume_pending_transfer(SDHCIState *s)
965 timer_del(s->transfer_timer);
966 sdhci_data_transfer(s);
969 static uint64_t sdhci_read(void *opaque, hwaddr offset, unsigned size)
971 SDHCIState *s = (SDHCIState *)opaque;
972 uint32_t ret = 0;
974 if (timer_pending(s->transfer_timer)) {
975 sdhci_resume_pending_transfer(s);
978 switch (offset & ~0x3) {
979 case SDHC_SYSAD:
980 ret = s->sdmasysad;
981 break;
982 case SDHC_BLKSIZE:
983 ret = s->blksize | (s->blkcnt << 16);
984 break;
985 case SDHC_ARGUMENT:
986 ret = s->argument;
987 break;
988 case SDHC_TRNMOD:
989 ret = s->trnmod | (s->cmdreg << 16);
990 break;
991 case SDHC_RSPREG0 ... SDHC_RSPREG3:
992 ret = s->rspreg[((offset & ~0x3) - SDHC_RSPREG0) >> 2];
993 break;
994 case SDHC_BDATA:
995 if (sdhci_buff_access_is_sequential(s, offset - SDHC_BDATA)) {
996 ret = sdhci_read_dataport(s, size);
997 trace_sdhci_access("rd", size << 3, offset, "->", ret, ret);
998 return ret;
1000 break;
1001 case SDHC_PRNSTS:
1002 ret = s->prnsts;
1003 ret = FIELD_DP32(ret, SDHC_PRNSTS, DAT_LVL,
1004 sdbus_get_dat_lines(&s->sdbus));
1005 ret = FIELD_DP32(ret, SDHC_PRNSTS, CMD_LVL,
1006 sdbus_get_cmd_line(&s->sdbus));
1007 break;
1008 case SDHC_HOSTCTL:
1009 ret = s->hostctl1 | (s->pwrcon << 8) | (s->blkgap << 16) |
1010 (s->wakcon << 24);
1011 break;
1012 case SDHC_CLKCON:
1013 ret = s->clkcon | (s->timeoutcon << 16);
1014 break;
1015 case SDHC_NORINTSTS:
1016 ret = s->norintsts | (s->errintsts << 16);
1017 break;
1018 case SDHC_NORINTSTSEN:
1019 ret = s->norintstsen | (s->errintstsen << 16);
1020 break;
1021 case SDHC_NORINTSIGEN:
1022 ret = s->norintsigen | (s->errintsigen << 16);
1023 break;
1024 case SDHC_ACMD12ERRSTS:
1025 ret = s->acmd12errsts | (s->hostctl2 << 16);
1026 break;
1027 case SDHC_CAPAB:
1028 ret = (uint32_t)s->capareg;
1029 break;
1030 case SDHC_CAPAB + 4:
1031 ret = (uint32_t)(s->capareg >> 32);
1032 break;
1033 case SDHC_MAXCURR:
1034 ret = (uint32_t)s->maxcurr;
1035 break;
1036 case SDHC_MAXCURR + 4:
1037 ret = (uint32_t)(s->maxcurr >> 32);
1038 break;
1039 case SDHC_ADMAERR:
1040 ret = s->admaerr;
1041 break;
1042 case SDHC_ADMASYSADDR:
1043 ret = (uint32_t)s->admasysaddr;
1044 break;
1045 case SDHC_ADMASYSADDR + 4:
1046 ret = (uint32_t)(s->admasysaddr >> 32);
1047 break;
1048 case SDHC_SLOT_INT_STATUS:
1049 ret = (s->version << 16) | sdhci_slotint(s);
1050 break;
1051 default:
1052 qemu_log_mask(LOG_UNIMP, "SDHC rd_%ub @0x%02" HWADDR_PRIx " "
1053 "not implemented\n", size, offset);
1054 break;
1057 ret >>= (offset & 0x3) * 8;
1058 ret &= (1ULL << (size * 8)) - 1;
1059 trace_sdhci_access("rd", size << 3, offset, "->", ret, ret);
1060 return ret;
1063 static inline void sdhci_blkgap_write(SDHCIState *s, uint8_t value)
1065 if ((value & SDHC_STOP_AT_GAP_REQ) && (s->blkgap & SDHC_STOP_AT_GAP_REQ)) {
1066 return;
1068 s->blkgap = value & SDHC_STOP_AT_GAP_REQ;
1070 if ((value & SDHC_CONTINUE_REQ) && s->stopped_state &&
1071 (s->blkgap & SDHC_STOP_AT_GAP_REQ) == 0) {
1072 if (s->stopped_state == sdhc_gap_read) {
1073 s->prnsts |= SDHC_DAT_LINE_ACTIVE | SDHC_DOING_READ;
1074 sdhci_read_block_from_card(s);
1075 } else {
1076 s->prnsts |= SDHC_DAT_LINE_ACTIVE | SDHC_DOING_WRITE;
1077 sdhci_write_block_to_card(s);
1079 s->stopped_state = sdhc_not_stopped;
1080 } else if (!s->stopped_state && (value & SDHC_STOP_AT_GAP_REQ)) {
1081 if (s->prnsts & SDHC_DOING_READ) {
1082 s->stopped_state = sdhc_gap_read;
1083 } else if (s->prnsts & SDHC_DOING_WRITE) {
1084 s->stopped_state = sdhc_gap_write;
1089 static inline void sdhci_reset_write(SDHCIState *s, uint8_t value)
1091 switch (value) {
1092 case SDHC_RESET_ALL:
1093 sdhci_reset(s);
1094 break;
1095 case SDHC_RESET_CMD:
1096 s->prnsts &= ~SDHC_CMD_INHIBIT;
1097 s->norintsts &= ~SDHC_NIS_CMDCMP;
1098 break;
1099 case SDHC_RESET_DATA:
1100 s->data_count = 0;
1101 s->prnsts &= ~(SDHC_SPACE_AVAILABLE | SDHC_DATA_AVAILABLE |
1102 SDHC_DOING_READ | SDHC_DOING_WRITE |
1103 SDHC_DATA_INHIBIT | SDHC_DAT_LINE_ACTIVE);
1104 s->blkgap &= ~(SDHC_STOP_AT_GAP_REQ | SDHC_CONTINUE_REQ);
1105 s->stopped_state = sdhc_not_stopped;
1106 s->norintsts &= ~(SDHC_NIS_WBUFRDY | SDHC_NIS_RBUFRDY |
1107 SDHC_NIS_DMA | SDHC_NIS_TRSCMP | SDHC_NIS_BLKGAP);
1108 break;
1112 static void
1113 sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size)
1115 SDHCIState *s = (SDHCIState *)opaque;
1116 unsigned shift = 8 * (offset & 0x3);
1117 uint32_t mask = ~(((1ULL << (size * 8)) - 1) << shift);
1118 uint32_t value = val;
1119 value <<= shift;
1121 if (timer_pending(s->transfer_timer)) {
1122 sdhci_resume_pending_transfer(s);
1125 switch (offset & ~0x3) {
1126 case SDHC_SYSAD:
1127 if (!TRANSFERRING_DATA(s->prnsts)) {
1128 s->sdmasysad = (s->sdmasysad & mask) | value;
1129 MASKED_WRITE(s->sdmasysad, mask, value);
1130 /* Writing to last byte of sdmasysad might trigger transfer */
1131 if (!(mask & 0xFF000000) && s->blkcnt && s->blksize &&
1132 SDHC_DMA_TYPE(s->hostctl1) == SDHC_CTRL_SDMA) {
1133 if (s->trnmod & SDHC_TRNS_MULTI) {
1134 sdhci_sdma_transfer_multi_blocks(s);
1135 } else {
1136 sdhci_sdma_transfer_single_block(s);
1140 break;
1141 case SDHC_BLKSIZE:
1142 if (!TRANSFERRING_DATA(s->prnsts)) {
1143 uint16_t blksize = s->blksize;
1145 MASKED_WRITE(s->blksize, mask, extract32(value, 0, 12));
1146 MASKED_WRITE(s->blkcnt, mask >> 16, value >> 16);
1148 /* Limit block size to the maximum buffer size */
1149 if (extract32(s->blksize, 0, 12) > s->buf_maxsz) {
1150 qemu_log_mask(LOG_GUEST_ERROR, "%s: Size 0x%x is larger than "
1151 "the maximum buffer 0x%x\n", __func__, s->blksize,
1152 s->buf_maxsz);
1154 s->blksize = deposit32(s->blksize, 0, 12, s->buf_maxsz);
1158 * If the block size is programmed to a different value from
1159 * the previous one, reset the data pointer of s->fifo_buffer[]
1160 * so that s->fifo_buffer[] can be filled in using the new block
1161 * size in the next transfer.
1163 if (blksize != s->blksize) {
1164 s->data_count = 0;
1168 break;
1169 case SDHC_ARGUMENT:
1170 MASKED_WRITE(s->argument, mask, value);
1171 break;
1172 case SDHC_TRNMOD:
1173 /* DMA can be enabled only if it is supported as indicated by
1174 * capabilities register */
1175 if (!(s->capareg & R_SDHC_CAPAB_SDMA_MASK)) {
1176 value &= ~SDHC_TRNS_DMA;
1178 MASKED_WRITE(s->trnmod, mask, value & SDHC_TRNMOD_MASK);
1179 MASKED_WRITE(s->cmdreg, mask >> 16, value >> 16);
1181 /* Writing to the upper byte of CMDREG triggers SD command generation */
1182 if ((mask & 0xFF000000) || !sdhci_can_issue_command(s)) {
1183 break;
1186 sdhci_send_command(s);
1187 break;
1188 case SDHC_BDATA:
1189 if (sdhci_buff_access_is_sequential(s, offset - SDHC_BDATA)) {
1190 sdhci_write_dataport(s, value >> shift, size);
1192 break;
1193 case SDHC_HOSTCTL:
1194 if (!(mask & 0xFF0000)) {
1195 sdhci_blkgap_write(s, value >> 16);
1197 MASKED_WRITE(s->hostctl1, mask, value);
1198 MASKED_WRITE(s->pwrcon, mask >> 8, value >> 8);
1199 MASKED_WRITE(s->wakcon, mask >> 24, value >> 24);
1200 if (!(s->prnsts & SDHC_CARD_PRESENT) || ((s->pwrcon >> 1) & 0x7) < 5 ||
1201 !(s->capareg & (1 << (31 - ((s->pwrcon >> 1) & 0x7))))) {
1202 s->pwrcon &= ~SDHC_POWER_ON;
1204 break;
1205 case SDHC_CLKCON:
1206 if (!(mask & 0xFF000000)) {
1207 sdhci_reset_write(s, value >> 24);
1209 MASKED_WRITE(s->clkcon, mask, value);
1210 MASKED_WRITE(s->timeoutcon, mask >> 16, value >> 16);
1211 if (s->clkcon & SDHC_CLOCK_INT_EN) {
1212 s->clkcon |= SDHC_CLOCK_INT_STABLE;
1213 } else {
1214 s->clkcon &= ~SDHC_CLOCK_INT_STABLE;
1216 break;
1217 case SDHC_NORINTSTS:
1218 if (s->norintstsen & SDHC_NISEN_CARDINT) {
1219 value &= ~SDHC_NIS_CARDINT;
1221 s->norintsts &= mask | ~value;
1222 s->errintsts &= (mask >> 16) | ~(value >> 16);
1223 if (s->errintsts) {
1224 s->norintsts |= SDHC_NIS_ERR;
1225 } else {
1226 s->norintsts &= ~SDHC_NIS_ERR;
1228 sdhci_update_irq(s);
1229 break;
1230 case SDHC_NORINTSTSEN:
1231 MASKED_WRITE(s->norintstsen, mask, value);
1232 MASKED_WRITE(s->errintstsen, mask >> 16, value >> 16);
1233 s->norintsts &= s->norintstsen;
1234 s->errintsts &= s->errintstsen;
1235 if (s->errintsts) {
1236 s->norintsts |= SDHC_NIS_ERR;
1237 } else {
1238 s->norintsts &= ~SDHC_NIS_ERR;
1240 /* Quirk for Raspberry Pi: pending card insert interrupt
1241 * appears when first enabled after power on */
1242 if ((s->norintstsen & SDHC_NISEN_INSERT) && s->pending_insert_state) {
1243 assert(s->pending_insert_quirk);
1244 s->norintsts |= SDHC_NIS_INSERT;
1245 s->pending_insert_state = false;
1247 sdhci_update_irq(s);
1248 break;
1249 case SDHC_NORINTSIGEN:
1250 MASKED_WRITE(s->norintsigen, mask, value);
1251 MASKED_WRITE(s->errintsigen, mask >> 16, value >> 16);
1252 sdhci_update_irq(s);
1253 break;
1254 case SDHC_ADMAERR:
1255 MASKED_WRITE(s->admaerr, mask, value);
1256 break;
1257 case SDHC_ADMASYSADDR:
1258 s->admasysaddr = (s->admasysaddr & (0xFFFFFFFF00000000ULL |
1259 (uint64_t)mask)) | (uint64_t)value;
1260 break;
1261 case SDHC_ADMASYSADDR + 4:
1262 s->admasysaddr = (s->admasysaddr & (0x00000000FFFFFFFFULL |
1263 ((uint64_t)mask << 32))) | ((uint64_t)value << 32);
1264 break;
1265 case SDHC_FEAER:
1266 s->acmd12errsts |= value;
1267 s->errintsts |= (value >> 16) & s->errintstsen;
1268 if (s->acmd12errsts) {
1269 s->errintsts |= SDHC_EIS_CMD12ERR;
1271 if (s->errintsts) {
1272 s->norintsts |= SDHC_NIS_ERR;
1274 sdhci_update_irq(s);
1275 break;
1276 case SDHC_ACMD12ERRSTS:
1277 MASKED_WRITE(s->acmd12errsts, mask, value & UINT16_MAX);
1278 if (s->uhs_mode >= UHS_I) {
1279 MASKED_WRITE(s->hostctl2, mask >> 16, value >> 16);
1281 if (FIELD_EX32(s->hostctl2, SDHC_HOSTCTL2, V18_ENA)) {
1282 sdbus_set_voltage(&s->sdbus, SD_VOLTAGE_1_8V);
1283 } else {
1284 sdbus_set_voltage(&s->sdbus, SD_VOLTAGE_3_3V);
1287 break;
1289 case SDHC_CAPAB:
1290 case SDHC_CAPAB + 4:
1291 case SDHC_MAXCURR:
1292 case SDHC_MAXCURR + 4:
1293 qemu_log_mask(LOG_GUEST_ERROR, "SDHC wr_%ub @0x%02" HWADDR_PRIx
1294 " <- 0x%08x read-only\n", size, offset, value >> shift);
1295 break;
1297 default:
1298 qemu_log_mask(LOG_UNIMP, "SDHC wr_%ub @0x%02" HWADDR_PRIx " <- 0x%08x "
1299 "not implemented\n", size, offset, value >> shift);
1300 break;
1302 trace_sdhci_access("wr", size << 3, offset, "<-",
1303 value >> shift, value >> shift);
1306 static const MemoryRegionOps sdhci_mmio_ops = {
1307 .read = sdhci_read,
1308 .write = sdhci_write,
1309 .valid = {
1310 .min_access_size = 1,
1311 .max_access_size = 4,
1312 .unaligned = false
1314 .endianness = DEVICE_LITTLE_ENDIAN,
1317 static void sdhci_init_readonly_registers(SDHCIState *s, Error **errp)
1319 ERRP_GUARD();
1321 switch (s->sd_spec_version) {
1322 case 2 ... 3:
1323 break;
1324 default:
1325 error_setg(errp, "Only Spec v2/v3 are supported");
1326 return;
1328 s->version = (SDHC_HCVER_VENDOR << 8) | (s->sd_spec_version - 1);
1330 sdhci_check_capareg(s, errp);
1331 if (*errp) {
1332 return;
1336 /* --- qdev common --- */
1338 void sdhci_initfn(SDHCIState *s)
1340 qbus_create_inplace(&s->sdbus, sizeof(s->sdbus),
1341 TYPE_SDHCI_BUS, DEVICE(s), "sd-bus");
1343 s->insert_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_raise_insertion_irq, s);
1344 s->transfer_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_data_transfer, s);
1346 s->io_ops = &sdhci_mmio_ops;
1349 void sdhci_uninitfn(SDHCIState *s)
1351 timer_free(s->insert_timer);
1352 timer_free(s->transfer_timer);
1354 g_free(s->fifo_buffer);
1355 s->fifo_buffer = NULL;
1358 void sdhci_common_realize(SDHCIState *s, Error **errp)
1360 ERRP_GUARD();
1362 sdhci_init_readonly_registers(s, errp);
1363 if (*errp) {
1364 return;
1366 s->buf_maxsz = sdhci_get_fifolen(s);
1367 s->fifo_buffer = g_malloc0(s->buf_maxsz);
1369 memory_region_init_io(&s->iomem, OBJECT(s), s->io_ops, s, "sdhci",
1370 SDHC_REGISTERS_MAP_SIZE);
1373 void sdhci_common_unrealize(SDHCIState *s)
1375 /* This function is expected to be called only once for each class:
1376 * - SysBus: via DeviceClass->unrealize(),
1377 * - PCI: via PCIDeviceClass->exit().
1378 * However to avoid double-free and/or use-after-free we still nullify
1379 * this variable (better safe than sorry!). */
1380 g_free(s->fifo_buffer);
1381 s->fifo_buffer = NULL;
1384 static bool sdhci_pending_insert_vmstate_needed(void *opaque)
1386 SDHCIState *s = opaque;
1388 return s->pending_insert_state;
1391 static const VMStateDescription sdhci_pending_insert_vmstate = {
1392 .name = "sdhci/pending-insert",
1393 .version_id = 1,
1394 .minimum_version_id = 1,
1395 .needed = sdhci_pending_insert_vmstate_needed,
1396 .fields = (VMStateField[]) {
1397 VMSTATE_BOOL(pending_insert_state, SDHCIState),
1398 VMSTATE_END_OF_LIST()
1402 const VMStateDescription sdhci_vmstate = {
1403 .name = "sdhci",
1404 .version_id = 1,
1405 .minimum_version_id = 1,
1406 .fields = (VMStateField[]) {
1407 VMSTATE_UINT32(sdmasysad, SDHCIState),
1408 VMSTATE_UINT16(blksize, SDHCIState),
1409 VMSTATE_UINT16(blkcnt, SDHCIState),
1410 VMSTATE_UINT32(argument, SDHCIState),
1411 VMSTATE_UINT16(trnmod, SDHCIState),
1412 VMSTATE_UINT16(cmdreg, SDHCIState),
1413 VMSTATE_UINT32_ARRAY(rspreg, SDHCIState, 4),
1414 VMSTATE_UINT32(prnsts, SDHCIState),
1415 VMSTATE_UINT8(hostctl1, SDHCIState),
1416 VMSTATE_UINT8(pwrcon, SDHCIState),
1417 VMSTATE_UINT8(blkgap, SDHCIState),
1418 VMSTATE_UINT8(wakcon, SDHCIState),
1419 VMSTATE_UINT16(clkcon, SDHCIState),
1420 VMSTATE_UINT8(timeoutcon, SDHCIState),
1421 VMSTATE_UINT8(admaerr, SDHCIState),
1422 VMSTATE_UINT16(norintsts, SDHCIState),
1423 VMSTATE_UINT16(errintsts, SDHCIState),
1424 VMSTATE_UINT16(norintstsen, SDHCIState),
1425 VMSTATE_UINT16(errintstsen, SDHCIState),
1426 VMSTATE_UINT16(norintsigen, SDHCIState),
1427 VMSTATE_UINT16(errintsigen, SDHCIState),
1428 VMSTATE_UINT16(acmd12errsts, SDHCIState),
1429 VMSTATE_UINT16(data_count, SDHCIState),
1430 VMSTATE_UINT64(admasysaddr, SDHCIState),
1431 VMSTATE_UINT8(stopped_state, SDHCIState),
1432 VMSTATE_VBUFFER_UINT32(fifo_buffer, SDHCIState, 1, NULL, buf_maxsz),
1433 VMSTATE_TIMER_PTR(insert_timer, SDHCIState),
1434 VMSTATE_TIMER_PTR(transfer_timer, SDHCIState),
1435 VMSTATE_END_OF_LIST()
1437 .subsections = (const VMStateDescription*[]) {
1438 &sdhci_pending_insert_vmstate,
1439 NULL
1443 void sdhci_common_class_init(ObjectClass *klass, void *data)
1445 DeviceClass *dc = DEVICE_CLASS(klass);
1447 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1448 dc->vmsd = &sdhci_vmstate;
1449 dc->reset = sdhci_poweron_reset;
1452 /* --- qdev SysBus --- */
1454 static Property sdhci_sysbus_properties[] = {
1455 DEFINE_SDHCI_COMMON_PROPERTIES(SDHCIState),
1456 DEFINE_PROP_BOOL("pending-insert-quirk", SDHCIState, pending_insert_quirk,
1457 false),
1458 DEFINE_PROP_LINK("dma", SDHCIState,
1459 dma_mr, TYPE_MEMORY_REGION, MemoryRegion *),
1460 DEFINE_PROP_END_OF_LIST(),
1463 static void sdhci_sysbus_init(Object *obj)
1465 SDHCIState *s = SYSBUS_SDHCI(obj);
1467 sdhci_initfn(s);
1470 static void sdhci_sysbus_finalize(Object *obj)
1472 SDHCIState *s = SYSBUS_SDHCI(obj);
1474 if (s->dma_mr) {
1475 object_unparent(OBJECT(s->dma_mr));
1478 sdhci_uninitfn(s);
1481 static void sdhci_sysbus_realize(DeviceState *dev, Error **errp)
1483 ERRP_GUARD();
1484 SDHCIState *s = SYSBUS_SDHCI(dev);
1485 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1487 sdhci_common_realize(s, errp);
1488 if (*errp) {
1489 return;
1492 if (s->dma_mr) {
1493 s->dma_as = &s->sysbus_dma_as;
1494 address_space_init(s->dma_as, s->dma_mr, "sdhci-dma");
1495 } else {
1496 /* use system_memory() if property "dma" not set */
1497 s->dma_as = &address_space_memory;
1500 sysbus_init_irq(sbd, &s->irq);
1502 sysbus_init_mmio(sbd, &s->iomem);
1505 static void sdhci_sysbus_unrealize(DeviceState *dev)
1507 SDHCIState *s = SYSBUS_SDHCI(dev);
1509 sdhci_common_unrealize(s);
1511 if (s->dma_mr) {
1512 address_space_destroy(s->dma_as);
1516 static void sdhci_sysbus_class_init(ObjectClass *klass, void *data)
1518 DeviceClass *dc = DEVICE_CLASS(klass);
1520 device_class_set_props(dc, sdhci_sysbus_properties);
1521 dc->realize = sdhci_sysbus_realize;
1522 dc->unrealize = sdhci_sysbus_unrealize;
1524 sdhci_common_class_init(klass, data);
1527 static const TypeInfo sdhci_sysbus_info = {
1528 .name = TYPE_SYSBUS_SDHCI,
1529 .parent = TYPE_SYS_BUS_DEVICE,
1530 .instance_size = sizeof(SDHCIState),
1531 .instance_init = sdhci_sysbus_init,
1532 .instance_finalize = sdhci_sysbus_finalize,
1533 .class_init = sdhci_sysbus_class_init,
1536 /* --- qdev bus master --- */
1538 static void sdhci_bus_class_init(ObjectClass *klass, void *data)
1540 SDBusClass *sbc = SD_BUS_CLASS(klass);
1542 sbc->set_inserted = sdhci_set_inserted;
1543 sbc->set_readonly = sdhci_set_readonly;
1546 static const TypeInfo sdhci_bus_info = {
1547 .name = TYPE_SDHCI_BUS,
1548 .parent = TYPE_SD_BUS,
1549 .instance_size = sizeof(SDBus),
1550 .class_init = sdhci_bus_class_init,
1553 /* --- qdev i.MX eSDHC --- */
1555 static uint64_t usdhc_read(void *opaque, hwaddr offset, unsigned size)
1557 SDHCIState *s = SYSBUS_SDHCI(opaque);
1558 uint32_t ret;
1559 uint16_t hostctl1;
1561 switch (offset) {
1562 default:
1563 return sdhci_read(opaque, offset, size);
1565 case SDHC_HOSTCTL:
1567 * For a detailed explanation on the following bit
1568 * manipulation code see comments in a similar part of
1569 * usdhc_write()
1571 hostctl1 = SDHC_DMA_TYPE(s->hostctl1) << (8 - 3);
1573 if (s->hostctl1 & SDHC_CTRL_8BITBUS) {
1574 hostctl1 |= ESDHC_CTRL_8BITBUS;
1577 if (s->hostctl1 & SDHC_CTRL_4BITBUS) {
1578 hostctl1 |= ESDHC_CTRL_4BITBUS;
1581 ret = hostctl1;
1582 ret |= (uint32_t)s->blkgap << 16;
1583 ret |= (uint32_t)s->wakcon << 24;
1585 break;
1587 case SDHC_PRNSTS:
1588 /* Add SDSTB (SD Clock Stable) bit to PRNSTS */
1589 ret = sdhci_read(opaque, offset, size) & ~ESDHC_PRNSTS_SDSTB;
1590 if (s->clkcon & SDHC_CLOCK_INT_STABLE) {
1591 ret |= ESDHC_PRNSTS_SDSTB;
1593 break;
1595 case ESDHC_VENDOR_SPEC:
1596 ret = s->vendor_spec;
1597 break;
1598 case ESDHC_DLL_CTRL:
1599 case ESDHC_TUNE_CTRL_STATUS:
1600 case ESDHC_UNDOCUMENTED_REG27:
1601 case ESDHC_TUNING_CTRL:
1602 case ESDHC_MIX_CTRL:
1603 case ESDHC_WTMK_LVL:
1604 ret = 0;
1605 break;
1608 return ret;
1611 static void
1612 usdhc_write(void *opaque, hwaddr offset, uint64_t val, unsigned size)
1614 SDHCIState *s = SYSBUS_SDHCI(opaque);
1615 uint8_t hostctl1;
1616 uint32_t value = (uint32_t)val;
1618 switch (offset) {
1619 case ESDHC_DLL_CTRL:
1620 case ESDHC_TUNE_CTRL_STATUS:
1621 case ESDHC_UNDOCUMENTED_REG27:
1622 case ESDHC_TUNING_CTRL:
1623 case ESDHC_WTMK_LVL:
1624 break;
1626 case ESDHC_VENDOR_SPEC:
1627 s->vendor_spec = value;
1628 switch (s->vendor) {
1629 case SDHCI_VENDOR_IMX:
1630 if (value & ESDHC_IMX_FRC_SDCLK_ON) {
1631 s->prnsts &= ~SDHC_IMX_CLOCK_GATE_OFF;
1632 } else {
1633 s->prnsts |= SDHC_IMX_CLOCK_GATE_OFF;
1635 break;
1636 default:
1637 break;
1639 break;
1641 case SDHC_HOSTCTL:
1643 * Here's What ESDHCI has at offset 0x28 (SDHC_HOSTCTL)
1645 * 7 6 5 4 3 2 1 0
1646 * |-----------+--------+--------+-----------+----------+---------|
1647 * | Card | Card | Endian | DATA3 | Data | Led |
1648 * | Detect | Detect | Mode | as Card | Transfer | Control |
1649 * | Signal | Test | | Detection | Width | |
1650 * | Selection | Level | | Pin | | |
1651 * |-----------+--------+--------+-----------+----------+---------|
1653 * and 0x29
1655 * 15 10 9 8
1656 * |----------+------|
1657 * | Reserved | DMA |
1658 * | | Sel. |
1659 * | | |
1660 * |----------+------|
1662 * and here's what SDCHI spec expects those offsets to be:
1664 * 0x28 (Host Control Register)
1666 * 7 6 5 4 3 2 1 0
1667 * |--------+--------+----------+------+--------+----------+---------|
1668 * | Card | Card | Extended | DMA | High | Data | LED |
1669 * | Detect | Detect | Data | Sel. | Speed | Transfer | Control |
1670 * | Signal | Test | Transfer | | Enable | Width | |
1671 * | Sel. | Level | Width | | | | |
1672 * |--------+--------+----------+------+--------+----------+---------|
1674 * and 0x29 (Power Control Register)
1676 * |----------------------------------|
1677 * | Power Control Register |
1678 * | |
1679 * | Description omitted, |
1680 * | since it has no analog in ESDHCI |
1681 * | |
1682 * |----------------------------------|
1684 * Since offsets 0x2A and 0x2B should be compatible between
1685 * both IP specs we only need to reconcile least 16-bit of the
1686 * word we've been given.
1690 * First, save bits 7 6 and 0 since they are identical
1692 hostctl1 = value & (SDHC_CTRL_LED |
1693 SDHC_CTRL_CDTEST_INS |
1694 SDHC_CTRL_CDTEST_EN);
1696 * Second, split "Data Transfer Width" from bits 2 and 1 in to
1697 * bits 5 and 1
1699 if (value & ESDHC_CTRL_8BITBUS) {
1700 hostctl1 |= SDHC_CTRL_8BITBUS;
1703 if (value & ESDHC_CTRL_4BITBUS) {
1704 hostctl1 |= ESDHC_CTRL_4BITBUS;
1708 * Third, move DMA select from bits 9 and 8 to bits 4 and 3
1710 hostctl1 |= SDHC_DMA_TYPE(value >> (8 - 3));
1713 * Now place the corrected value into low 16-bit of the value
1714 * we are going to give standard SDHCI write function
1716 * NOTE: This transformation should be the inverse of what can
1717 * be found in drivers/mmc/host/sdhci-esdhc-imx.c in Linux
1718 * kernel
1720 value &= ~UINT16_MAX;
1721 value |= hostctl1;
1722 value |= (uint16_t)s->pwrcon << 8;
1724 sdhci_write(opaque, offset, value, size);
1725 break;
1727 case ESDHC_MIX_CTRL:
1729 * So, when SD/MMC stack in Linux tries to write to "Transfer
1730 * Mode Register", ESDHC i.MX quirk code will translate it
1731 * into a write to ESDHC_MIX_CTRL, so we do the opposite in
1732 * order to get where we started
1734 * Note that Auto CMD23 Enable bit is located in a wrong place
1735 * on i.MX, but since it is not used by QEMU we do not care.
1737 * We don't want to call sdhci_write(.., SDHC_TRNMOD, ...)
1738 * here becuase it will result in a call to
1739 * sdhci_send_command(s) which we don't want.
1742 s->trnmod = value & UINT16_MAX;
1743 break;
1744 case SDHC_TRNMOD:
1746 * Similar to above, but this time a write to "Command
1747 * Register" will be translated into a 4-byte write to
1748 * "Transfer Mode register" where lower 16-bit of value would
1749 * be set to zero. So what we do is fill those bits with
1750 * cached value from s->trnmod and let the SDHCI
1751 * infrastructure handle the rest
1753 sdhci_write(opaque, offset, val | s->trnmod, size);
1754 break;
1755 case SDHC_BLKSIZE:
1757 * ESDHCI does not implement "Host SDMA Buffer Boundary", and
1758 * Linux driver will try to zero this field out which will
1759 * break the rest of SDHCI emulation.
1761 * Linux defaults to maximum possible setting (512K boundary)
1762 * and it seems to be the only option that i.MX IP implements,
1763 * so we artificially set it to that value.
1765 val |= 0x7 << 12;
1766 /* FALLTHROUGH */
1767 default:
1768 sdhci_write(opaque, offset, val, size);
1769 break;
1773 static const MemoryRegionOps usdhc_mmio_ops = {
1774 .read = usdhc_read,
1775 .write = usdhc_write,
1776 .valid = {
1777 .min_access_size = 1,
1778 .max_access_size = 4,
1779 .unaligned = false
1781 .endianness = DEVICE_LITTLE_ENDIAN,
1784 static void imx_usdhc_init(Object *obj)
1786 SDHCIState *s = SYSBUS_SDHCI(obj);
1788 s->io_ops = &usdhc_mmio_ops;
1789 s->quirks = SDHCI_QUIRK_NO_BUSY_IRQ;
1792 static const TypeInfo imx_usdhc_info = {
1793 .name = TYPE_IMX_USDHC,
1794 .parent = TYPE_SYSBUS_SDHCI,
1795 .instance_init = imx_usdhc_init,
1798 /* --- qdev Samsung s3c --- */
1800 #define S3C_SDHCI_CONTROL2 0x80
1801 #define S3C_SDHCI_CONTROL3 0x84
1802 #define S3C_SDHCI_CONTROL4 0x8c
1804 static uint64_t sdhci_s3c_read(void *opaque, hwaddr offset, unsigned size)
1806 uint64_t ret;
1808 switch (offset) {
1809 case S3C_SDHCI_CONTROL2:
1810 case S3C_SDHCI_CONTROL3:
1811 case S3C_SDHCI_CONTROL4:
1812 /* ignore */
1813 ret = 0;
1814 break;
1815 default:
1816 ret = sdhci_read(opaque, offset, size);
1817 break;
1820 return ret;
1823 static void sdhci_s3c_write(void *opaque, hwaddr offset, uint64_t val,
1824 unsigned size)
1826 switch (offset) {
1827 case S3C_SDHCI_CONTROL2:
1828 case S3C_SDHCI_CONTROL3:
1829 case S3C_SDHCI_CONTROL4:
1830 /* ignore */
1831 break;
1832 default:
1833 sdhci_write(opaque, offset, val, size);
1834 break;
1838 static const MemoryRegionOps sdhci_s3c_mmio_ops = {
1839 .read = sdhci_s3c_read,
1840 .write = sdhci_s3c_write,
1841 .valid = {
1842 .min_access_size = 1,
1843 .max_access_size = 4,
1844 .unaligned = false
1846 .endianness = DEVICE_LITTLE_ENDIAN,
1849 static void sdhci_s3c_init(Object *obj)
1851 SDHCIState *s = SYSBUS_SDHCI(obj);
1853 s->io_ops = &sdhci_s3c_mmio_ops;
1856 static const TypeInfo sdhci_s3c_info = {
1857 .name = TYPE_S3C_SDHCI ,
1858 .parent = TYPE_SYSBUS_SDHCI,
1859 .instance_init = sdhci_s3c_init,
1862 static void sdhci_register_types(void)
1864 type_register_static(&sdhci_sysbus_info);
1865 type_register_static(&sdhci_bus_info);
1866 type_register_static(&imx_usdhc_info);
1867 type_register_static(&sdhci_s3c_info);
1870 type_init(sdhci_register_types)