2 * Shared part of driver for MMC/SDHC controller on Cavium OCTEON and
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
9 * Copyright (C) 2012-2017 Cavium Inc.
11 * David Daney <david.daney@cavium.com>
12 * Peter Swain <pswain@cavium.com>
13 * Steven J. Hill <steven.hill@cavium.com>
14 * Jan Glauber <jglauber@cavium.com>
16 #include <linux/bitfield.h>
17 #include <linux/delay.h>
18 #include <linux/dma-direction.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/gpio/consumer.h>
21 #include <linux/interrupt.h>
22 #include <linux/mmc/mmc.h>
23 #include <linux/mmc/slot-gpio.h>
24 #include <linux/module.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/scatterlist.h>
27 #include <linux/time.h>
31 const char *cvm_mmc_irq_names
[] = {
44 * The Cavium MMC host hardware assumes that all commands have fixed
45 * command and response types. These are correct if MMC devices are
46 * being used. However, non-MMC devices like SD use command and
47 * response types that are unexpected by the host hardware.
49 * The command and response types can be overridden by supplying an
50 * XOR value that is applied to the type. We calculate the XOR value
51 * from the values in this table and the flags passed from the MMC
54 static struct cvm_mmc_cr_type cvm_mmc_cr_types
[] = {
111 {0xff, 0xff}, /* CMD56 */
121 static struct cvm_mmc_cr_mods
cvm_mmc_get_cr_mods(struct mmc_command
*cmd
)
123 struct cvm_mmc_cr_type
*cr
;
124 u8 hardware_ctype
, hardware_rtype
;
125 u8 desired_ctype
= 0, desired_rtype
= 0;
126 struct cvm_mmc_cr_mods r
;
128 cr
= cvm_mmc_cr_types
+ (cmd
->opcode
& 0x3f);
129 hardware_ctype
= cr
->ctype
;
130 hardware_rtype
= cr
->rtype
;
131 if (cmd
->opcode
== MMC_GEN_CMD
)
132 hardware_ctype
= (cmd
->arg
& 1) ? 1 : 2;
134 switch (mmc_cmd_type(cmd
)) {
136 desired_ctype
= (cmd
->data
->flags
& MMC_DATA_WRITE
) ? 2 : 1;
145 switch (mmc_resp_type(cmd
)) {
149 case MMC_RSP_R1
:/* MMC_RSP_R5, MMC_RSP_R6, MMC_RSP_R7 */
156 case MMC_RSP_R3
: /* MMC_RSP_R4 */
160 r
.ctype_xor
= desired_ctype
^ hardware_ctype
;
161 r
.rtype_xor
= desired_rtype
^ hardware_rtype
;
165 static void check_switch_errors(struct cvm_mmc_host
*host
)
169 emm_switch
= readq(host
->base
+ MIO_EMM_SWITCH(host
));
170 if (emm_switch
& MIO_EMM_SWITCH_ERR0
)
171 dev_err(host
->dev
, "Switch power class error\n");
172 if (emm_switch
& MIO_EMM_SWITCH_ERR1
)
173 dev_err(host
->dev
, "Switch hs timing error\n");
174 if (emm_switch
& MIO_EMM_SWITCH_ERR2
)
175 dev_err(host
->dev
, "Switch bus width error\n");
178 static void clear_bus_id(u64
*reg
)
180 u64 bus_id_mask
= GENMASK_ULL(61, 60);
182 *reg
&= ~bus_id_mask
;
185 static void set_bus_id(u64
*reg
, int bus_id
)
188 *reg
|= FIELD_PREP(GENMASK(61, 60), bus_id
);
191 static int get_bus_id(u64 reg
)
193 return FIELD_GET(GENMASK_ULL(61, 60), reg
);
197 * We never set the switch_exe bit since that would interfere
198 * with the commands send by the MMC core.
200 static void do_switch(struct cvm_mmc_host
*host
, u64 emm_switch
)
207 * Modes setting only taken from slot 0. Work around that hardware
208 * issue by first switching to slot 0.
210 bus_id
= get_bus_id(emm_switch
);
211 clear_bus_id(&emm_switch
);
212 writeq(emm_switch
, host
->base
+ MIO_EMM_SWITCH(host
));
214 set_bus_id(&emm_switch
, bus_id
);
215 writeq(emm_switch
, host
->base
+ MIO_EMM_SWITCH(host
));
217 /* wait for the switch to finish */
219 rsp_sts
= readq(host
->base
+ MIO_EMM_RSP_STS(host
));
220 if (!(rsp_sts
& MIO_EMM_RSP_STS_SWITCH_VAL
))
225 check_switch_errors(host
);
228 static bool switch_val_changed(struct cvm_mmc_slot
*slot
, u64 new_val
)
230 /* Match BUS_ID, HS_TIMING, BUS_WIDTH, POWER_CLASS, CLK_HI, CLK_LO */
231 u64 match
= 0x3001070fffffffffull
;
233 return (slot
->cached_switch
& match
) != (new_val
& match
);
236 static void set_wdog(struct cvm_mmc_slot
*slot
, unsigned int ns
)
244 timeout
= (slot
->clock
* ns
) / NSEC_PER_SEC
;
246 timeout
= (slot
->clock
* 850ull) / 1000ull;
247 writeq(timeout
, slot
->host
->base
+ MIO_EMM_WDOG(slot
->host
));
250 static void cvm_mmc_reset_bus(struct cvm_mmc_slot
*slot
)
252 struct cvm_mmc_host
*host
= slot
->host
;
253 u64 emm_switch
, wdog
;
255 emm_switch
= readq(slot
->host
->base
+ MIO_EMM_SWITCH(host
));
256 emm_switch
&= ~(MIO_EMM_SWITCH_EXE
| MIO_EMM_SWITCH_ERR0
|
257 MIO_EMM_SWITCH_ERR1
| MIO_EMM_SWITCH_ERR2
);
258 set_bus_id(&emm_switch
, slot
->bus_id
);
260 wdog
= readq(slot
->host
->base
+ MIO_EMM_WDOG(host
));
261 do_switch(slot
->host
, emm_switch
);
263 slot
->cached_switch
= emm_switch
;
267 writeq(wdog
, slot
->host
->base
+ MIO_EMM_WDOG(host
));
270 /* Switch to another slot if needed */
271 static void cvm_mmc_switch_to(struct cvm_mmc_slot
*slot
)
273 struct cvm_mmc_host
*host
= slot
->host
;
274 struct cvm_mmc_slot
*old_slot
;
275 u64 emm_sample
, emm_switch
;
277 if (slot
->bus_id
== host
->last_slot
)
280 if (host
->last_slot
>= 0 && host
->slot
[host
->last_slot
]) {
281 old_slot
= host
->slot
[host
->last_slot
];
282 old_slot
->cached_switch
= readq(host
->base
+ MIO_EMM_SWITCH(host
));
283 old_slot
->cached_rca
= readq(host
->base
+ MIO_EMM_RCA(host
));
286 writeq(slot
->cached_rca
, host
->base
+ MIO_EMM_RCA(host
));
287 emm_switch
= slot
->cached_switch
;
288 set_bus_id(&emm_switch
, slot
->bus_id
);
289 do_switch(host
, emm_switch
);
291 emm_sample
= FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT
, slot
->cmd_cnt
) |
292 FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT
, slot
->dat_cnt
);
293 writeq(emm_sample
, host
->base
+ MIO_EMM_SAMPLE(host
));
295 host
->last_slot
= slot
->bus_id
;
298 static void do_read(struct cvm_mmc_host
*host
, struct mmc_request
*req
,
301 struct sg_mapping_iter
*smi
= &host
->smi
;
302 int data_len
= req
->data
->blocks
* req
->data
->blksz
;
303 int bytes_xfered
, shift
= -1;
306 /* Auto inc from offset zero */
307 writeq((0x10000 | (dbuf
<< 6)), host
->base
+ MIO_EMM_BUF_IDX(host
));
309 for (bytes_xfered
= 0; bytes_xfered
< data_len
;) {
310 if (smi
->consumed
>= smi
->length
) {
311 if (!sg_miter_next(smi
))
317 dat
= readq(host
->base
+ MIO_EMM_BUF_DAT(host
));
321 while (smi
->consumed
< smi
->length
&& shift
>= 0) {
322 ((u8
*)smi
->addr
)[smi
->consumed
] = (dat
>> shift
) & 0xff;
330 req
->data
->bytes_xfered
= bytes_xfered
;
331 req
->data
->error
= 0;
334 static void do_write(struct mmc_request
*req
)
336 req
->data
->bytes_xfered
= req
->data
->blocks
* req
->data
->blksz
;
337 req
->data
->error
= 0;
340 static void set_cmd_response(struct cvm_mmc_host
*host
, struct mmc_request
*req
,
345 if (!(rsp_sts
& MIO_EMM_RSP_STS_RSP_VAL
))
348 rsp_lo
= readq(host
->base
+ MIO_EMM_RSP_LO(host
));
350 switch (FIELD_GET(MIO_EMM_RSP_STS_RSP_TYPE
, rsp_sts
)) {
353 req
->cmd
->resp
[0] = (rsp_lo
>> 8) & 0xffffffff;
354 req
->cmd
->resp
[1] = 0;
355 req
->cmd
->resp
[2] = 0;
356 req
->cmd
->resp
[3] = 0;
359 req
->cmd
->resp
[3] = rsp_lo
& 0xffffffff;
360 req
->cmd
->resp
[2] = (rsp_lo
>> 32) & 0xffffffff;
361 rsp_hi
= readq(host
->base
+ MIO_EMM_RSP_HI(host
));
362 req
->cmd
->resp
[1] = rsp_hi
& 0xffffffff;
363 req
->cmd
->resp
[0] = (rsp_hi
>> 32) & 0xffffffff;
368 static int get_dma_dir(struct mmc_data
*data
)
370 return (data
->flags
& MMC_DATA_WRITE
) ? DMA_TO_DEVICE
: DMA_FROM_DEVICE
;
373 static int finish_dma_single(struct cvm_mmc_host
*host
, struct mmc_data
*data
)
375 data
->bytes_xfered
= data
->blocks
* data
->blksz
;
377 dma_unmap_sg(host
->dev
, data
->sg
, data
->sg_len
, get_dma_dir(data
));
381 static int finish_dma_sg(struct cvm_mmc_host
*host
, struct mmc_data
*data
)
386 /* Check if there are any pending requests left */
387 fifo_cfg
= readq(host
->dma_base
+ MIO_EMM_DMA_FIFO_CFG(host
));
388 count
= FIELD_GET(MIO_EMM_DMA_FIFO_CFG_COUNT
, fifo_cfg
);
390 dev_err(host
->dev
, "%u requests still pending\n", count
);
392 data
->bytes_xfered
= data
->blocks
* data
->blksz
;
395 /* Clear and disable FIFO */
396 writeq(BIT_ULL(16), host
->dma_base
+ MIO_EMM_DMA_FIFO_CFG(host
));
397 dma_unmap_sg(host
->dev
, data
->sg
, data
->sg_len
, get_dma_dir(data
));
401 static int finish_dma(struct cvm_mmc_host
*host
, struct mmc_data
*data
)
403 if (host
->use_sg
&& data
->sg_len
> 1)
404 return finish_dma_sg(host
, data
);
406 return finish_dma_single(host
, data
);
409 static int check_status(u64 rsp_sts
)
411 if (rsp_sts
& MIO_EMM_RSP_STS_RSP_BAD_STS
||
412 rsp_sts
& MIO_EMM_RSP_STS_RSP_CRC_ERR
||
413 rsp_sts
& MIO_EMM_RSP_STS_BLK_CRC_ERR
)
415 if (rsp_sts
& MIO_EMM_RSP_STS_RSP_TIMEOUT
||
416 rsp_sts
& MIO_EMM_RSP_STS_BLK_TIMEOUT
)
418 if (rsp_sts
& MIO_EMM_RSP_STS_DBUF_ERR
)
423 /* Try to clean up failed DMA. */
424 static void cleanup_dma(struct cvm_mmc_host
*host
, u64 rsp_sts
)
428 emm_dma
= readq(host
->base
+ MIO_EMM_DMA(host
));
429 emm_dma
|= FIELD_PREP(MIO_EMM_DMA_VAL
, 1) |
430 FIELD_PREP(MIO_EMM_DMA_DAT_NULL
, 1);
431 set_bus_id(&emm_dma
, get_bus_id(rsp_sts
));
432 writeq(emm_dma
, host
->base
+ MIO_EMM_DMA(host
));
435 irqreturn_t
cvm_mmc_interrupt(int irq
, void *dev_id
)
437 struct cvm_mmc_host
*host
= dev_id
;
438 struct mmc_request
*req
;
439 unsigned long flags
= 0;
440 u64 emm_int
, rsp_sts
;
443 if (host
->need_irq_handler_lock
)
444 spin_lock_irqsave(&host
->irq_handler_lock
, flags
);
446 __acquire(&host
->irq_handler_lock
);
448 /* Clear interrupt bits (write 1 clears ). */
449 emm_int
= readq(host
->base
+ MIO_EMM_INT(host
));
450 writeq(emm_int
, host
->base
+ MIO_EMM_INT(host
));
452 if (emm_int
& MIO_EMM_INT_SWITCH_ERR
)
453 check_switch_errors(host
);
455 req
= host
->current_req
;
459 rsp_sts
= readq(host
->base
+ MIO_EMM_RSP_STS(host
));
461 * dma_val set means DMA is still in progress. Don't touch
462 * the request and wait for the interrupt indicating that
463 * the DMA is finished.
465 if ((rsp_sts
& MIO_EMM_RSP_STS_DMA_VAL
) && host
->dma_active
)
468 if (!host
->dma_active
&& req
->data
&&
469 (emm_int
& MIO_EMM_INT_BUF_DONE
)) {
470 unsigned int type
= (rsp_sts
>> 7) & 3;
473 do_read(host
, req
, rsp_sts
& MIO_EMM_RSP_STS_DBUF
);
478 host_done
= emm_int
& MIO_EMM_INT_CMD_DONE
||
479 emm_int
& MIO_EMM_INT_DMA_DONE
||
480 emm_int
& MIO_EMM_INT_CMD_ERR
||
481 emm_int
& MIO_EMM_INT_DMA_ERR
;
483 if (!(host_done
&& req
->done
))
486 req
->cmd
->error
= check_status(rsp_sts
);
488 if (host
->dma_active
&& req
->data
)
489 if (!finish_dma(host
, req
->data
))
492 set_cmd_response(host
, req
, rsp_sts
);
493 if ((emm_int
& MIO_EMM_INT_DMA_ERR
) &&
494 (rsp_sts
& MIO_EMM_RSP_STS_DMA_PEND
))
495 cleanup_dma(host
, rsp_sts
);
497 host
->current_req
= NULL
;
501 if (host
->dmar_fixup_done
)
502 host
->dmar_fixup_done(host
);
504 host
->release_bus(host
);
506 if (host
->need_irq_handler_lock
)
507 spin_unlock_irqrestore(&host
->irq_handler_lock
, flags
);
509 __release(&host
->irq_handler_lock
);
510 return IRQ_RETVAL(emm_int
!= 0);
514 * Program DMA_CFG and if needed DMA_ADR.
515 * Returns 0 on error, DMA address otherwise.
517 static u64
prepare_dma_single(struct cvm_mmc_host
*host
, struct mmc_data
*data
)
522 count
= dma_map_sg(host
->dev
, data
->sg
, data
->sg_len
,
527 rw
= (data
->flags
& MMC_DATA_WRITE
) ? 1 : 0;
528 dma_cfg
= FIELD_PREP(MIO_EMM_DMA_CFG_EN
, 1) |
529 FIELD_PREP(MIO_EMM_DMA_CFG_RW
, rw
);
530 #ifdef __LITTLE_ENDIAN
531 dma_cfg
|= FIELD_PREP(MIO_EMM_DMA_CFG_ENDIAN
, 1);
533 dma_cfg
|= FIELD_PREP(MIO_EMM_DMA_CFG_SIZE
,
534 (sg_dma_len(&data
->sg
[0]) / 8) - 1);
536 addr
= sg_dma_address(&data
->sg
[0]);
537 if (!host
->big_dma_addr
)
538 dma_cfg
|= FIELD_PREP(MIO_EMM_DMA_CFG_ADR
, addr
);
539 writeq(dma_cfg
, host
->dma_base
+ MIO_EMM_DMA_CFG(host
));
541 pr_debug("[%s] sg_dma_len: %u total sg_elem: %d\n",
542 (rw
) ? "W" : "R", sg_dma_len(&data
->sg
[0]), count
);
544 if (host
->big_dma_addr
)
545 writeq(addr
, host
->dma_base
+ MIO_EMM_DMA_ADR(host
));
550 * Queue complete sg list into the FIFO.
551 * Returns 0 on error, 1 otherwise.
553 static u64
prepare_dma_sg(struct cvm_mmc_host
*host
, struct mmc_data
*data
)
555 struct scatterlist
*sg
;
559 count
= dma_map_sg(host
->dev
, data
->sg
, data
->sg_len
,
566 /* Enable FIFO by removing CLR bit */
567 writeq(0, host
->dma_base
+ MIO_EMM_DMA_FIFO_CFG(host
));
569 for_each_sg(data
->sg
, sg
, count
, i
) {
570 /* Program DMA address */
571 addr
= sg_dma_address(sg
);
574 writeq(addr
, host
->dma_base
+ MIO_EMM_DMA_FIFO_ADR(host
));
577 * If we have scatter-gather support we also have an extra
578 * register for the DMA addr, so no need to check
579 * host->big_dma_addr here.
581 rw
= (data
->flags
& MMC_DATA_WRITE
) ? 1 : 0;
582 fifo_cmd
= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_RW
, rw
);
584 /* enable interrupts on the last element */
585 fifo_cmd
|= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_INTDIS
,
586 (i
+ 1 == count
) ? 0 : 1);
588 #ifdef __LITTLE_ENDIAN
589 fifo_cmd
|= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_ENDIAN
, 1);
591 fifo_cmd
|= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_SIZE
,
592 sg_dma_len(sg
) / 8 - 1);
594 * The write copies the address and the command to the FIFO
595 * and increments the FIFO's COUNT field.
597 writeq(fifo_cmd
, host
->dma_base
+ MIO_EMM_DMA_FIFO_CMD(host
));
598 pr_debug("[%s] sg_dma_len: %u sg_elem: %d/%d\n",
599 (rw
) ? "W" : "R", sg_dma_len(sg
), i
, count
);
603 * In difference to prepare_dma_single we don't return the
604 * address here, as it would not make sense for scatter-gather.
605 * The dma fixup is only required on models that don't support
606 * scatter-gather, so that is not a problem.
612 dma_unmap_sg(host
->dev
, data
->sg
, data
->sg_len
, get_dma_dir(data
));
614 writeq(BIT_ULL(16), host
->dma_base
+ MIO_EMM_DMA_FIFO_CFG(host
));
618 static u64
prepare_dma(struct cvm_mmc_host
*host
, struct mmc_data
*data
)
620 if (host
->use_sg
&& data
->sg_len
> 1)
621 return prepare_dma_sg(host
, data
);
623 return prepare_dma_single(host
, data
);
626 static u64
prepare_ext_dma(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
628 struct cvm_mmc_slot
*slot
= mmc_priv(mmc
);
631 emm_dma
= FIELD_PREP(MIO_EMM_DMA_VAL
, 1) |
632 FIELD_PREP(MIO_EMM_DMA_SECTOR
,
633 mmc_card_is_blockaddr(mmc
->card
) ? 1 : 0) |
634 FIELD_PREP(MIO_EMM_DMA_RW
,
635 (mrq
->data
->flags
& MMC_DATA_WRITE
) ? 1 : 0) |
636 FIELD_PREP(MIO_EMM_DMA_BLOCK_CNT
, mrq
->data
->blocks
) |
637 FIELD_PREP(MIO_EMM_DMA_CARD_ADDR
, mrq
->cmd
->arg
);
638 set_bus_id(&emm_dma
, slot
->bus_id
);
640 if (mmc_card_mmc(mmc
->card
) || (mmc_card_sd(mmc
->card
) &&
641 (mmc
->card
->scr
.cmds
& SD_SCR_CMD23_SUPPORT
)))
642 emm_dma
|= FIELD_PREP(MIO_EMM_DMA_MULTI
, 1);
644 pr_debug("[%s] blocks: %u multi: %d\n",
645 (emm_dma
& MIO_EMM_DMA_RW
) ? "W" : "R",
646 mrq
->data
->blocks
, (emm_dma
& MIO_EMM_DMA_MULTI
) ? 1 : 0);
650 static void cvm_mmc_dma_request(struct mmc_host
*mmc
,
651 struct mmc_request
*mrq
)
653 struct cvm_mmc_slot
*slot
= mmc_priv(mmc
);
654 struct cvm_mmc_host
*host
= slot
->host
;
655 struct mmc_data
*data
;
658 if (!mrq
->data
|| !mrq
->data
->sg
|| !mrq
->data
->sg_len
||
659 !mrq
->stop
|| mrq
->stop
->opcode
!= MMC_STOP_TRANSMISSION
) {
660 dev_err(&mmc
->card
->dev
,
661 "Error: cmv_mmc_dma_request no data\n");
665 cvm_mmc_switch_to(slot
);
668 pr_debug("DMA request blocks: %d block_size: %d total_size: %d\n",
669 data
->blocks
, data
->blksz
, data
->blocks
* data
->blksz
);
670 if (data
->timeout_ns
)
671 set_wdog(slot
, data
->timeout_ns
);
673 WARN_ON(host
->current_req
);
674 host
->current_req
= mrq
;
676 emm_dma
= prepare_ext_dma(mmc
, mrq
);
677 addr
= prepare_dma(host
, data
);
679 dev_err(host
->dev
, "prepare_dma failed\n");
683 host
->dma_active
= true;
684 host
->int_enable(host
, MIO_EMM_INT_CMD_ERR
| MIO_EMM_INT_DMA_DONE
|
685 MIO_EMM_INT_DMA_ERR
);
687 if (host
->dmar_fixup
)
688 host
->dmar_fixup(host
, mrq
->cmd
, data
, addr
);
691 * If we have a valid SD card in the slot, we set the response
692 * bit mask to check for CRC errors and timeouts only.
693 * Otherwise, use the default power reset value.
695 if (mmc_card_sd(mmc
->card
))
696 writeq(0x00b00000ull
, host
->base
+ MIO_EMM_STS_MASK(host
));
698 writeq(0xe4390080ull
, host
->base
+ MIO_EMM_STS_MASK(host
));
699 writeq(emm_dma
, host
->base
+ MIO_EMM_DMA(host
));
703 mrq
->cmd
->error
= -EINVAL
;
706 host
->release_bus(host
);
709 static void do_read_request(struct cvm_mmc_host
*host
, struct mmc_request
*mrq
)
711 sg_miter_start(&host
->smi
, mrq
->data
->sg
, mrq
->data
->sg_len
,
712 SG_MITER_ATOMIC
| SG_MITER_TO_SG
);
715 static void do_write_request(struct cvm_mmc_host
*host
, struct mmc_request
*mrq
)
717 unsigned int data_len
= mrq
->data
->blocks
* mrq
->data
->blksz
;
718 struct sg_mapping_iter
*smi
= &host
->smi
;
719 unsigned int bytes_xfered
;
723 /* Copy data to the xmit buffer before issuing the command. */
724 sg_miter_start(smi
, mrq
->data
->sg
, mrq
->data
->sg_len
, SG_MITER_FROM_SG
);
726 /* Auto inc from offset zero, dbuf zero */
727 writeq(0x10000ull
, host
->base
+ MIO_EMM_BUF_IDX(host
));
729 for (bytes_xfered
= 0; bytes_xfered
< data_len
;) {
730 if (smi
->consumed
>= smi
->length
) {
731 if (!sg_miter_next(smi
))
736 while (smi
->consumed
< smi
->length
&& shift
>= 0) {
737 dat
|= (u64
)((u8
*)smi
->addr
)[smi
->consumed
] << shift
;
744 writeq(dat
, host
->base
+ MIO_EMM_BUF_DAT(host
));
752 static void cvm_mmc_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
754 struct cvm_mmc_slot
*slot
= mmc_priv(mmc
);
755 struct cvm_mmc_host
*host
= slot
->host
;
756 struct mmc_command
*cmd
= mrq
->cmd
;
757 struct cvm_mmc_cr_mods mods
;
758 u64 emm_cmd
, rsp_sts
;
762 * Note about locking:
763 * All MMC devices share the same bus and controller. Allow only a
764 * single user of the bootbus/MMC bus at a time. The lock is acquired
765 * on all entry points from the MMC layer.
767 * For requests the lock is only released after the completion
770 host
->acquire_bus(host
);
772 if (cmd
->opcode
== MMC_READ_MULTIPLE_BLOCK
||
773 cmd
->opcode
== MMC_WRITE_MULTIPLE_BLOCK
)
774 return cvm_mmc_dma_request(mmc
, mrq
);
776 cvm_mmc_switch_to(slot
);
778 mods
= cvm_mmc_get_cr_mods(cmd
);
780 WARN_ON(host
->current_req
);
781 host
->current_req
= mrq
;
784 if (cmd
->data
->flags
& MMC_DATA_READ
)
785 do_read_request(host
, mrq
);
787 do_write_request(host
, mrq
);
789 if (cmd
->data
->timeout_ns
)
790 set_wdog(slot
, cmd
->data
->timeout_ns
);
794 host
->dma_active
= false;
795 host
->int_enable(host
, MIO_EMM_INT_CMD_DONE
| MIO_EMM_INT_CMD_ERR
);
797 emm_cmd
= FIELD_PREP(MIO_EMM_CMD_VAL
, 1) |
798 FIELD_PREP(MIO_EMM_CMD_CTYPE_XOR
, mods
.ctype_xor
) |
799 FIELD_PREP(MIO_EMM_CMD_RTYPE_XOR
, mods
.rtype_xor
) |
800 FIELD_PREP(MIO_EMM_CMD_IDX
, cmd
->opcode
) |
801 FIELD_PREP(MIO_EMM_CMD_ARG
, cmd
->arg
);
802 set_bus_id(&emm_cmd
, slot
->bus_id
);
803 if (cmd
->data
&& mmc_cmd_type(cmd
) == MMC_CMD_ADTC
)
804 emm_cmd
|= FIELD_PREP(MIO_EMM_CMD_OFFSET
,
805 64 - ((cmd
->data
->blocks
* cmd
->data
->blksz
) / 8));
807 writeq(0, host
->base
+ MIO_EMM_STS_MASK(host
));
810 rsp_sts
= readq(host
->base
+ MIO_EMM_RSP_STS(host
));
811 if (rsp_sts
& MIO_EMM_RSP_STS_DMA_VAL
||
812 rsp_sts
& MIO_EMM_RSP_STS_CMD_VAL
||
813 rsp_sts
& MIO_EMM_RSP_STS_SWITCH_VAL
||
814 rsp_sts
& MIO_EMM_RSP_STS_DMA_PEND
) {
820 dev_err(host
->dev
, "Bad status: %llx before command write\n", rsp_sts
);
821 writeq(emm_cmd
, host
->base
+ MIO_EMM_CMD(host
));
824 static void cvm_mmc_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
826 struct cvm_mmc_slot
*slot
= mmc_priv(mmc
);
827 struct cvm_mmc_host
*host
= slot
->host
;
828 int clk_period
= 0, power_class
= 10, bus_width
= 0;
829 u64 clock
, emm_switch
;
831 host
->acquire_bus(host
);
832 cvm_mmc_switch_to(slot
);
834 /* Set the power state */
835 switch (ios
->power_mode
) {
840 cvm_mmc_reset_bus(slot
);
841 if (host
->global_pwr_gpiod
)
842 host
->set_shared_power(host
, 0);
843 else if (!IS_ERR(mmc
->supply
.vmmc
))
844 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
848 if (host
->global_pwr_gpiod
)
849 host
->set_shared_power(host
, 1);
850 else if (!IS_ERR(mmc
->supply
.vmmc
))
851 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, ios
->vdd
);
855 /* Convert bus width to HW definition */
856 switch (ios
->bus_width
) {
857 case MMC_BUS_WIDTH_8
:
860 case MMC_BUS_WIDTH_4
:
863 case MMC_BUS_WIDTH_1
:
868 /* DDR is available for 4/8 bit bus width */
869 if (ios
->bus_width
&& ios
->timing
== MMC_TIMING_MMC_DDR52
)
872 /* Change the clock frequency. */
874 if (clock
> 52000000)
879 clk_period
= (host
->sys_freq
+ clock
- 1) / (2 * clock
);
881 emm_switch
= FIELD_PREP(MIO_EMM_SWITCH_HS_TIMING
,
882 (ios
->timing
== MMC_TIMING_MMC_HS
)) |
883 FIELD_PREP(MIO_EMM_SWITCH_BUS_WIDTH
, bus_width
) |
884 FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS
, power_class
) |
885 FIELD_PREP(MIO_EMM_SWITCH_CLK_HI
, clk_period
) |
886 FIELD_PREP(MIO_EMM_SWITCH_CLK_LO
, clk_period
);
887 set_bus_id(&emm_switch
, slot
->bus_id
);
889 if (!switch_val_changed(slot
, emm_switch
))
893 do_switch(host
, emm_switch
);
894 slot
->cached_switch
= emm_switch
;
896 host
->release_bus(host
);
899 static const struct mmc_host_ops cvm_mmc_ops
= {
900 .request
= cvm_mmc_request
,
901 .set_ios
= cvm_mmc_set_ios
,
902 .get_ro
= mmc_gpio_get_ro
,
903 .get_cd
= mmc_gpio_get_cd
,
906 static void cvm_mmc_set_clock(struct cvm_mmc_slot
*slot
, unsigned int clock
)
908 struct mmc_host
*mmc
= slot
->mmc
;
910 clock
= min(clock
, mmc
->f_max
);
911 clock
= max(clock
, mmc
->f_min
);
915 static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot
*slot
)
917 struct cvm_mmc_host
*host
= slot
->host
;
920 /* Enable this bus slot. */
921 host
->emm_cfg
|= (1ull << slot
->bus_id
);
922 writeq(host
->emm_cfg
, slot
->host
->base
+ MIO_EMM_CFG(host
));
925 /* Program initial clock speed and power. */
926 cvm_mmc_set_clock(slot
, slot
->mmc
->f_min
);
927 emm_switch
= FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS
, 10);
928 emm_switch
|= FIELD_PREP(MIO_EMM_SWITCH_CLK_HI
,
929 (host
->sys_freq
/ slot
->clock
) / 2);
930 emm_switch
|= FIELD_PREP(MIO_EMM_SWITCH_CLK_LO
,
931 (host
->sys_freq
/ slot
->clock
) / 2);
933 /* Make the changes take effect on this bus slot. */
934 set_bus_id(&emm_switch
, slot
->bus_id
);
935 do_switch(host
, emm_switch
);
937 slot
->cached_switch
= emm_switch
;
940 * Set watchdog timeout value and default reset value
941 * for the mask register. Finally, set the CARD_RCA
942 * bit so that we can get the card address relative
943 * to the CMD register for CMD7 transactions.
946 writeq(0xe4390080ull
, host
->base
+ MIO_EMM_STS_MASK(host
));
947 writeq(1, host
->base
+ MIO_EMM_RCA(host
));
951 static int cvm_mmc_of_parse(struct device
*dev
, struct cvm_mmc_slot
*slot
)
953 u32 id
, cmd_skew
= 0, dat_skew
= 0, bus_width
= 0;
954 struct device_node
*node
= dev
->of_node
;
955 struct mmc_host
*mmc
= slot
->mmc
;
959 ret
= of_property_read_u32(node
, "reg", &id
);
961 dev_err(dev
, "Missing or invalid reg property on %pOF\n", node
);
965 if (id
>= CAVIUM_MAX_MMC
|| slot
->host
->slot
[id
]) {
966 dev_err(dev
, "Invalid reg property on %pOF\n", node
);
970 ret
= mmc_regulator_get_supply(mmc
);
974 * Legacy Octeon firmware has no regulator entry, fall-back to
975 * a hard-coded voltage to get a sane OCR.
977 if (IS_ERR(mmc
->supply
.vmmc
))
978 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
980 /* Common MMC bindings */
981 ret
= mmc_of_parse(mmc
);
986 if (!(mmc
->caps
& (MMC_CAP_8_BIT_DATA
| MMC_CAP_4_BIT_DATA
))) {
987 of_property_read_u32(node
, "cavium,bus-max-width", &bus_width
);
989 mmc
->caps
|= MMC_CAP_8_BIT_DATA
| MMC_CAP_4_BIT_DATA
;
990 else if (bus_width
== 4)
991 mmc
->caps
|= MMC_CAP_4_BIT_DATA
;
994 /* Set maximum and minimum frequency */
996 of_property_read_u32(node
, "spi-max-frequency", &mmc
->f_max
);
997 if (!mmc
->f_max
|| mmc
->f_max
> 52000000)
998 mmc
->f_max
= 52000000;
1001 /* Sampling register settings, period in picoseconds */
1002 clock_period
= 1000000000000ull / slot
->host
->sys_freq
;
1003 of_property_read_u32(node
, "cavium,cmd-clk-skew", &cmd_skew
);
1004 of_property_read_u32(node
, "cavium,dat-clk-skew", &dat_skew
);
1005 slot
->cmd_cnt
= (cmd_skew
+ clock_period
/ 2) / clock_period
;
1006 slot
->dat_cnt
= (dat_skew
+ clock_period
/ 2) / clock_period
;
1011 int cvm_mmc_of_slot_probe(struct device
*dev
, struct cvm_mmc_host
*host
)
1013 struct cvm_mmc_slot
*slot
;
1014 struct mmc_host
*mmc
;
1017 mmc
= mmc_alloc_host(sizeof(struct cvm_mmc_slot
), dev
);
1021 slot
= mmc_priv(mmc
);
1025 ret
= cvm_mmc_of_parse(dev
, slot
);
1030 /* Set up host parameters */
1031 mmc
->ops
= &cvm_mmc_ops
;
1034 * We only have a 3.3v supply, we cannot support any
1035 * of the UHS modes. We do support the high speed DDR
1036 * modes up to 52MHz.
1038 * Disable bounce buffers for max_segs = 1
1040 mmc
->caps
|= MMC_CAP_MMC_HIGHSPEED
| MMC_CAP_SD_HIGHSPEED
|
1041 MMC_CAP_CMD23
| MMC_CAP_POWER_OFF_CARD
| MMC_CAP_3_3V_DDR
;
1048 /* DMA size field can address up to 8 MB */
1049 mmc
->max_seg_size
= min_t(unsigned int, 8 * 1024 * 1024,
1050 dma_get_max_seg_size(host
->dev
));
1051 mmc
->max_req_size
= mmc
->max_seg_size
;
1052 /* External DMA is in 512 byte blocks */
1053 mmc
->max_blk_size
= 512;
1054 /* DMA block count field is 15 bits */
1055 mmc
->max_blk_count
= 32767;
1057 slot
->clock
= mmc
->f_min
;
1059 slot
->cached_rca
= 1;
1061 host
->acquire_bus(host
);
1062 host
->slot
[id
] = slot
;
1063 cvm_mmc_switch_to(slot
);
1064 cvm_mmc_init_lowlevel(slot
);
1065 host
->release_bus(host
);
1067 ret
= mmc_add_host(mmc
);
1069 dev_err(dev
, "mmc_add_host() returned %d\n", ret
);
1070 slot
->host
->slot
[id
] = NULL
;
1076 mmc_free_host(slot
->mmc
);
1080 int cvm_mmc_of_slot_remove(struct cvm_mmc_slot
*slot
)
1082 mmc_remove_host(slot
->mmc
);
1083 slot
->host
->slot
[slot
->bus_id
] = NULL
;
1084 mmc_free_host(slot
->mmc
);