1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015, The Linux Foundation. All rights reserved.
5 #include <linux/delay.h>
6 #include <linux/highmem.h>
8 #include <linux/iopoll.h>
9 #include <linux/module.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/slab.h>
12 #include <linux/scatterlist.h>
13 #include <linux/platform_device.h>
14 #include <linux/ktime.h>
16 #include <linux/mmc/mmc.h>
17 #include <linux/mmc/host.h>
18 #include <linux/mmc/card.h>
26 struct mmc_request
*mrq
;
28 #define CQHCI_EXTERNAL_TIMEOUT BIT(0)
29 #define CQHCI_COMPLETED BIT(1)
30 #define CQHCI_HOST_CRC BIT(2)
31 #define CQHCI_HOST_TIMEOUT BIT(3)
32 #define CQHCI_HOST_OTHER BIT(4)
35 static inline u8
*get_desc(struct cqhci_host
*cq_host
, u8 tag
)
37 return cq_host
->desc_base
+ (tag
* cq_host
->slot_sz
);
40 static inline u8
*get_link_desc(struct cqhci_host
*cq_host
, u8 tag
)
42 u8
*desc
= get_desc(cq_host
, tag
);
44 return desc
+ cq_host
->task_desc_len
;
47 static inline dma_addr_t
get_trans_desc_dma(struct cqhci_host
*cq_host
, u8 tag
)
49 return cq_host
->trans_desc_dma_base
+
50 (cq_host
->mmc
->max_segs
* tag
*
51 cq_host
->trans_desc_len
);
54 static inline u8
*get_trans_desc(struct cqhci_host
*cq_host
, u8 tag
)
56 return cq_host
->trans_desc_base
+
57 (cq_host
->trans_desc_len
* cq_host
->mmc
->max_segs
* tag
);
60 static void setup_trans_desc(struct cqhci_host
*cq_host
, u8 tag
)
63 dma_addr_t trans_temp
;
65 link_temp
= get_link_desc(cq_host
, tag
);
66 trans_temp
= get_trans_desc_dma(cq_host
, tag
);
68 memset(link_temp
, 0, cq_host
->link_desc_len
);
69 if (cq_host
->link_desc_len
> 8)
72 if (tag
== DCMD_SLOT
&& (cq_host
->mmc
->caps2
& MMC_CAP2_CQE_DCMD
)) {
73 *link_temp
= CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
77 *link_temp
= CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
80 __le64
*data_addr
= (__le64 __force
*)(link_temp
+ 4);
82 data_addr
[0] = cpu_to_le64(trans_temp
);
84 __le32
*data_addr
= (__le32 __force
*)(link_temp
+ 4);
86 data_addr
[0] = cpu_to_le32(trans_temp
);
90 static void cqhci_set_irqs(struct cqhci_host
*cq_host
, u32 set
)
92 cqhci_writel(cq_host
, set
, CQHCI_ISTE
);
93 cqhci_writel(cq_host
, set
, CQHCI_ISGE
);
96 #define DRV_NAME "cqhci"
98 #define CQHCI_DUMP(f, x...) \
99 pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
101 static void cqhci_dumpregs(struct cqhci_host
*cq_host
)
103 struct mmc_host
*mmc
= cq_host
->mmc
;
105 CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
107 CQHCI_DUMP("Caps: 0x%08x | Version: 0x%08x\n",
108 cqhci_readl(cq_host
, CQHCI_CAP
),
109 cqhci_readl(cq_host
, CQHCI_VER
));
110 CQHCI_DUMP("Config: 0x%08x | Control: 0x%08x\n",
111 cqhci_readl(cq_host
, CQHCI_CFG
),
112 cqhci_readl(cq_host
, CQHCI_CTL
));
113 CQHCI_DUMP("Int stat: 0x%08x | Int enab: 0x%08x\n",
114 cqhci_readl(cq_host
, CQHCI_IS
),
115 cqhci_readl(cq_host
, CQHCI_ISTE
));
116 CQHCI_DUMP("Int sig: 0x%08x | Int Coal: 0x%08x\n",
117 cqhci_readl(cq_host
, CQHCI_ISGE
),
118 cqhci_readl(cq_host
, CQHCI_IC
));
119 CQHCI_DUMP("TDL base: 0x%08x | TDL up32: 0x%08x\n",
120 cqhci_readl(cq_host
, CQHCI_TDLBA
),
121 cqhci_readl(cq_host
, CQHCI_TDLBAU
));
122 CQHCI_DUMP("Doorbell: 0x%08x | TCN: 0x%08x\n",
123 cqhci_readl(cq_host
, CQHCI_TDBR
),
124 cqhci_readl(cq_host
, CQHCI_TCN
));
125 CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
126 cqhci_readl(cq_host
, CQHCI_DQS
),
127 cqhci_readl(cq_host
, CQHCI_DPT
));
128 CQHCI_DUMP("Task clr: 0x%08x | SSC1: 0x%08x\n",
129 cqhci_readl(cq_host
, CQHCI_TCLR
),
130 cqhci_readl(cq_host
, CQHCI_SSC1
));
131 CQHCI_DUMP("SSC2: 0x%08x | DCMD rsp: 0x%08x\n",
132 cqhci_readl(cq_host
, CQHCI_SSC2
),
133 cqhci_readl(cq_host
, CQHCI_CRDCT
));
134 CQHCI_DUMP("RED mask: 0x%08x | TERRI: 0x%08x\n",
135 cqhci_readl(cq_host
, CQHCI_RMEM
),
136 cqhci_readl(cq_host
, CQHCI_TERRI
));
137 CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n",
138 cqhci_readl(cq_host
, CQHCI_CRI
),
139 cqhci_readl(cq_host
, CQHCI_CRA
));
141 if (cq_host
->ops
->dumpregs
)
142 cq_host
->ops
->dumpregs(mmc
);
144 CQHCI_DUMP(": ===========================================\n");
148 * The allocated descriptor table for task, link & transfer descritors
151 * |task desc | |->|----------|
152 * |----------| | |trans desc|
153 * |link desc-|->| |----------|
156 * no. of slots max-segs
159 * The idea here is to create the [task+trans] table and mark & point the
160 * link desc to the transfer desc table on a per slot basis.
162 static int cqhci_host_alloc_tdl(struct cqhci_host
*cq_host
)
166 /* task descriptor can be 64/128 bit irrespective of arch */
167 if (cq_host
->caps
& CQHCI_TASK_DESC_SZ_128
) {
168 cqhci_writel(cq_host
, cqhci_readl(cq_host
, CQHCI_CFG
) |
169 CQHCI_TASK_DESC_SZ
, CQHCI_CFG
);
170 cq_host
->task_desc_len
= 16;
172 cq_host
->task_desc_len
= 8;
176 * 96 bits length of transfer desc instead of 128 bits which means
177 * ADMA would expect next valid descriptor at the 96th bit
180 if (cq_host
->dma64
) {
181 if (cq_host
->quirks
& CQHCI_QUIRK_SHORT_TXFR_DESC_SZ
)
182 cq_host
->trans_desc_len
= 12;
184 cq_host
->trans_desc_len
= 16;
185 cq_host
->link_desc_len
= 16;
187 cq_host
->trans_desc_len
= 8;
188 cq_host
->link_desc_len
= 8;
191 /* total size of a slot: 1 task & 1 transfer (link) */
192 cq_host
->slot_sz
= cq_host
->task_desc_len
+ cq_host
->link_desc_len
;
194 cq_host
->desc_size
= cq_host
->slot_sz
* cq_host
->num_slots
;
196 cq_host
->data_size
= cq_host
->trans_desc_len
* cq_host
->mmc
->max_segs
*
197 cq_host
->mmc
->cqe_qdepth
;
199 pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
200 mmc_hostname(cq_host
->mmc
), cq_host
->desc_size
, cq_host
->data_size
,
204 * allocate a dma-mapped chunk of memory for the descriptors
205 * allocate a dma-mapped chunk of memory for link descriptors
206 * setup each link-desc memory offset per slot-number to
207 * the descriptor table.
209 cq_host
->desc_base
= dmam_alloc_coherent(mmc_dev(cq_host
->mmc
),
211 &cq_host
->desc_dma_base
,
213 if (!cq_host
->desc_base
)
216 cq_host
->trans_desc_base
= dmam_alloc_coherent(mmc_dev(cq_host
->mmc
),
218 &cq_host
->trans_desc_dma_base
,
220 if (!cq_host
->trans_desc_base
) {
221 dmam_free_coherent(mmc_dev(cq_host
->mmc
), cq_host
->desc_size
,
223 cq_host
->desc_dma_base
);
224 cq_host
->desc_base
= NULL
;
225 cq_host
->desc_dma_base
= 0;
229 pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
230 mmc_hostname(cq_host
->mmc
), cq_host
->desc_base
, cq_host
->trans_desc_base
,
231 (unsigned long long)cq_host
->desc_dma_base
,
232 (unsigned long long)cq_host
->trans_desc_dma_base
);
234 for (; i
< (cq_host
->num_slots
); i
++)
235 setup_trans_desc(cq_host
, i
);
240 static void __cqhci_enable(struct cqhci_host
*cq_host
)
242 struct mmc_host
*mmc
= cq_host
->mmc
;
245 cqcfg
= cqhci_readl(cq_host
, CQHCI_CFG
);
247 /* Configuration must not be changed while enabled */
248 if (cqcfg
& CQHCI_ENABLE
) {
249 cqcfg
&= ~CQHCI_ENABLE
;
250 cqhci_writel(cq_host
, cqcfg
, CQHCI_CFG
);
253 cqcfg
&= ~(CQHCI_DCMD
| CQHCI_TASK_DESC_SZ
);
255 if (mmc
->caps2
& MMC_CAP2_CQE_DCMD
)
258 if (cq_host
->caps
& CQHCI_TASK_DESC_SZ_128
)
259 cqcfg
|= CQHCI_TASK_DESC_SZ
;
261 cqhci_writel(cq_host
, cqcfg
, CQHCI_CFG
);
263 cqhci_writel(cq_host
, lower_32_bits(cq_host
->desc_dma_base
),
265 cqhci_writel(cq_host
, upper_32_bits(cq_host
->desc_dma_base
),
268 cqhci_writel(cq_host
, cq_host
->rca
, CQHCI_SSC2
);
270 cqhci_set_irqs(cq_host
, 0);
272 cqcfg
|= CQHCI_ENABLE
;
274 cqhci_writel(cq_host
, cqcfg
, CQHCI_CFG
);
278 if (cq_host
->ops
->enable
)
279 cq_host
->ops
->enable(mmc
);
281 /* Ensure all writes are done before interrupts are enabled */
284 cqhci_set_irqs(cq_host
, CQHCI_IS_MASK
);
286 cq_host
->activated
= true;
289 static void __cqhci_disable(struct cqhci_host
*cq_host
)
293 cqcfg
= cqhci_readl(cq_host
, CQHCI_CFG
);
294 cqcfg
&= ~CQHCI_ENABLE
;
295 cqhci_writel(cq_host
, cqcfg
, CQHCI_CFG
);
297 cq_host
->mmc
->cqe_on
= false;
299 cq_host
->activated
= false;
302 int cqhci_deactivate(struct mmc_host
*mmc
)
304 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
306 if (cq_host
->enabled
&& cq_host
->activated
)
307 __cqhci_disable(cq_host
);
311 EXPORT_SYMBOL(cqhci_deactivate
);
313 int cqhci_resume(struct mmc_host
*mmc
)
315 /* Re-enable is done upon first request */
318 EXPORT_SYMBOL(cqhci_resume
);
320 static int cqhci_enable(struct mmc_host
*mmc
, struct mmc_card
*card
)
322 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
325 if (!card
->ext_csd
.cmdq_en
)
328 if (cq_host
->enabled
)
331 cq_host
->rca
= card
->rca
;
333 err
= cqhci_host_alloc_tdl(cq_host
);
335 pr_err("%s: Failed to enable CQE, error %d\n",
336 mmc_hostname(mmc
), err
);
340 __cqhci_enable(cq_host
);
342 cq_host
->enabled
= true;
345 cqhci_dumpregs(cq_host
);
350 /* CQHCI is idle and should halt immediately, so set a small timeout */
351 #define CQHCI_OFF_TIMEOUT 100
353 static u32
cqhci_read_ctl(struct cqhci_host
*cq_host
)
355 return cqhci_readl(cq_host
, CQHCI_CTL
);
358 static void cqhci_off(struct mmc_host
*mmc
)
360 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
364 if (!cq_host
->enabled
|| !mmc
->cqe_on
|| cq_host
->recovery_halt
)
367 if (cq_host
->ops
->disable
)
368 cq_host
->ops
->disable(mmc
, false);
370 cqhci_writel(cq_host
, CQHCI_HALT
, CQHCI_CTL
);
372 err
= readx_poll_timeout(cqhci_read_ctl
, cq_host
, reg
,
373 reg
& CQHCI_HALT
, 0, CQHCI_OFF_TIMEOUT
);
375 pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc
));
377 pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc
));
379 if (cq_host
->ops
->post_disable
)
380 cq_host
->ops
->post_disable(mmc
);
385 static void cqhci_disable(struct mmc_host
*mmc
)
387 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
389 if (!cq_host
->enabled
)
394 __cqhci_disable(cq_host
);
396 dmam_free_coherent(mmc_dev(mmc
), cq_host
->data_size
,
397 cq_host
->trans_desc_base
,
398 cq_host
->trans_desc_dma_base
);
400 dmam_free_coherent(mmc_dev(mmc
), cq_host
->desc_size
,
402 cq_host
->desc_dma_base
);
404 cq_host
->trans_desc_base
= NULL
;
405 cq_host
->desc_base
= NULL
;
407 cq_host
->enabled
= false;
410 static void cqhci_prep_task_desc(struct mmc_request
*mrq
,
411 u64
*data
, bool intr
)
413 u32 req_flags
= mrq
->data
->flags
;
415 *data
= CQHCI_VALID(1) |
419 CQHCI_FORCED_PROG(!!(req_flags
& MMC_DATA_FORCED_PRG
)) |
420 CQHCI_DATA_TAG(!!(req_flags
& MMC_DATA_DAT_TAG
)) |
421 CQHCI_DATA_DIR(!!(req_flags
& MMC_DATA_READ
)) |
422 CQHCI_PRIORITY(!!(req_flags
& MMC_DATA_PRIO
)) |
423 CQHCI_QBAR(!!(req_flags
& MMC_DATA_QBR
)) |
424 CQHCI_REL_WRITE(!!(req_flags
& MMC_DATA_REL_WR
)) |
425 CQHCI_BLK_COUNT(mrq
->data
->blocks
) |
426 CQHCI_BLK_ADDR((u64
)mrq
->data
->blk_addr
);
428 pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx\n",
429 mmc_hostname(mrq
->host
), mrq
->tag
, (unsigned long long)*data
);
432 static int cqhci_dma_map(struct mmc_host
*host
, struct mmc_request
*mrq
)
435 struct mmc_data
*data
= mrq
->data
;
440 sg_count
= dma_map_sg(mmc_dev(host
), data
->sg
,
442 (data
->flags
& MMC_DATA_WRITE
) ?
443 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
445 pr_err("%s: sg-len: %d\n", __func__
, data
->sg_len
);
452 static void cqhci_set_tran_desc(u8
*desc
, dma_addr_t addr
, int len
, bool end
,
455 __le32
*attr
= (__le32 __force
*)desc
;
457 *attr
= (CQHCI_VALID(1) |
458 CQHCI_END(end
? 1 : 0) |
461 CQHCI_DAT_LENGTH(len
));
464 __le64
*dataddr
= (__le64 __force
*)(desc
+ 4);
466 dataddr
[0] = cpu_to_le64(addr
);
468 __le32
*dataddr
= (__le32 __force
*)(desc
+ 4);
470 dataddr
[0] = cpu_to_le32(addr
);
474 static int cqhci_prep_tran_desc(struct mmc_request
*mrq
,
475 struct cqhci_host
*cq_host
, int tag
)
477 struct mmc_data
*data
= mrq
->data
;
478 int i
, sg_count
, len
;
480 bool dma64
= cq_host
->dma64
;
483 struct scatterlist
*sg
;
485 sg_count
= cqhci_dma_map(mrq
->host
, mrq
);
487 pr_err("%s: %s: unable to map sg lists, %d\n",
488 mmc_hostname(mrq
->host
), __func__
, sg_count
);
492 desc
= get_trans_desc(cq_host
, tag
);
494 for_each_sg(data
->sg
, sg
, sg_count
, i
) {
495 addr
= sg_dma_address(sg
);
496 len
= sg_dma_len(sg
);
498 if ((i
+1) == sg_count
)
500 cqhci_set_tran_desc(desc
, addr
, len
, end
, dma64
);
501 desc
+= cq_host
->trans_desc_len
;
507 static void cqhci_prep_dcmd_desc(struct mmc_host
*mmc
,
508 struct mmc_request
*mrq
)
510 u64
*task_desc
= NULL
;
515 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
518 if (!(mrq
->cmd
->flags
& MMC_RSP_PRESENT
)) {
522 if (mrq
->cmd
->flags
& MMC_RSP_R1B
) {
531 task_desc
= (__le64 __force
*)get_desc(cq_host
, cq_host
->dcmd_slot
);
532 memset(task_desc
, 0, cq_host
->task_desc_len
);
533 data
|= (CQHCI_VALID(1) |
538 CQHCI_CMD_INDEX(mrq
->cmd
->opcode
) |
539 CQHCI_CMD_TIMING(timing
) | CQHCI_RESP_TYPE(resp_type
));
540 if (cq_host
->ops
->update_dcmd_desc
)
541 cq_host
->ops
->update_dcmd_desc(mmc
, mrq
, &data
);
543 desc
= (u8
*)task_desc
;
544 pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
545 mmc_hostname(mmc
), mrq
->cmd
->opcode
, timing
, resp_type
);
546 dataddr
= (__le64 __force
*)(desc
+ 4);
547 dataddr
[0] = cpu_to_le64((u64
)mrq
->cmd
->arg
);
551 static void cqhci_post_req(struct mmc_host
*host
, struct mmc_request
*mrq
)
553 struct mmc_data
*data
= mrq
->data
;
556 dma_unmap_sg(mmc_dev(host
), data
->sg
, data
->sg_len
,
557 (data
->flags
& MMC_DATA_READ
) ?
558 DMA_FROM_DEVICE
: DMA_TO_DEVICE
);
562 static inline int cqhci_tag(struct mmc_request
*mrq
)
564 return mrq
->cmd
? DCMD_SLOT
: mrq
->tag
;
567 static int cqhci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
571 u64
*task_desc
= NULL
;
572 int tag
= cqhci_tag(mrq
);
573 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
576 if (!cq_host
->enabled
) {
577 pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc
));
581 /* First request after resume has to re-enable */
582 if (!cq_host
->activated
)
583 __cqhci_enable(cq_host
);
586 if (cq_host
->ops
->pre_enable
)
587 cq_host
->ops
->pre_enable(mmc
);
589 cqhci_writel(cq_host
, 0, CQHCI_CTL
);
591 pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc
));
592 if (cqhci_readl(cq_host
, CQHCI_CTL
) && CQHCI_HALT
) {
593 pr_err("%s: cqhci: CQE failed to exit halt state\n",
596 if (cq_host
->ops
->enable
)
597 cq_host
->ops
->enable(mmc
);
601 task_desc
= (__le64 __force
*)get_desc(cq_host
, tag
);
602 cqhci_prep_task_desc(mrq
, &data
, 1);
603 *task_desc
= cpu_to_le64(data
);
604 err
= cqhci_prep_tran_desc(mrq
, cq_host
, tag
);
606 pr_err("%s: cqhci: failed to setup tx desc: %d\n",
607 mmc_hostname(mmc
), err
);
611 cqhci_prep_dcmd_desc(mmc
, mrq
);
614 spin_lock_irqsave(&cq_host
->lock
, flags
);
616 if (cq_host
->recovery_halt
) {
621 cq_host
->slot
[tag
].mrq
= mrq
;
622 cq_host
->slot
[tag
].flags
= 0;
625 /* Make sure descriptors are ready before ringing the doorbell */
627 cqhci_writel(cq_host
, 1 << tag
, CQHCI_TDBR
);
628 if (!(cqhci_readl(cq_host
, CQHCI_TDBR
) & (1 << tag
)))
629 pr_debug("%s: cqhci: doorbell not set for tag %d\n",
630 mmc_hostname(mmc
), tag
);
632 spin_unlock_irqrestore(&cq_host
->lock
, flags
);
635 cqhci_post_req(mmc
, mrq
);
640 static void cqhci_recovery_needed(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
643 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
645 if (!cq_host
->recovery_halt
) {
646 cq_host
->recovery_halt
= true;
647 pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc
));
648 wake_up(&cq_host
->wait_queue
);
649 if (notify
&& mrq
->recovery_notifier
)
650 mrq
->recovery_notifier(mrq
);
654 static unsigned int cqhci_error_flags(int error1
, int error2
)
656 int error
= error1
? error1
: error2
;
660 return CQHCI_HOST_CRC
;
662 return CQHCI_HOST_TIMEOUT
;
664 return CQHCI_HOST_OTHER
;
668 static void cqhci_error_irq(struct mmc_host
*mmc
, u32 status
, int cmd_error
,
671 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
672 struct cqhci_slot
*slot
;
676 spin_lock(&cq_host
->lock
);
678 terri
= cqhci_readl(cq_host
, CQHCI_TERRI
);
680 pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
681 mmc_hostname(mmc
), status
, cmd_error
, data_error
, terri
);
683 /* Forget about errors when recovery has already been triggered */
684 if (cq_host
->recovery_halt
)
687 if (!cq_host
->qcnt
) {
688 WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
689 mmc_hostname(mmc
), status
, cmd_error
, data_error
,
694 if (CQHCI_TERRI_C_VALID(terri
)) {
695 tag
= CQHCI_TERRI_C_TASK(terri
);
696 slot
= &cq_host
->slot
[tag
];
698 slot
->flags
= cqhci_error_flags(cmd_error
, data_error
);
699 cqhci_recovery_needed(mmc
, slot
->mrq
, true);
703 if (CQHCI_TERRI_D_VALID(terri
)) {
704 tag
= CQHCI_TERRI_D_TASK(terri
);
705 slot
= &cq_host
->slot
[tag
];
707 slot
->flags
= cqhci_error_flags(data_error
, cmd_error
);
708 cqhci_recovery_needed(mmc
, slot
->mrq
, true);
712 if (!cq_host
->recovery_halt
) {
714 * The only way to guarantee forward progress is to mark at
715 * least one task in error, so if none is indicated, pick one.
717 for (tag
= 0; tag
< NUM_SLOTS
; tag
++) {
718 slot
= &cq_host
->slot
[tag
];
721 slot
->flags
= cqhci_error_flags(data_error
, cmd_error
);
722 cqhci_recovery_needed(mmc
, slot
->mrq
, true);
728 spin_unlock(&cq_host
->lock
);
731 static void cqhci_finish_mrq(struct mmc_host
*mmc
, unsigned int tag
)
733 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
734 struct cqhci_slot
*slot
= &cq_host
->slot
[tag
];
735 struct mmc_request
*mrq
= slot
->mrq
;
736 struct mmc_data
*data
;
739 WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
740 mmc_hostname(mmc
), tag
);
744 /* No completions allowed during recovery */
745 if (cq_host
->recovery_halt
) {
746 slot
->flags
|= CQHCI_COMPLETED
;
757 data
->bytes_xfered
= 0;
759 data
->bytes_xfered
= data
->blksz
* data
->blocks
;
762 mmc_cqe_request_done(mmc
, mrq
);
765 irqreturn_t
cqhci_irq(struct mmc_host
*mmc
, u32 intmask
, int cmd_error
,
769 unsigned long tag
= 0, comp_status
;
770 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
772 status
= cqhci_readl(cq_host
, CQHCI_IS
);
773 cqhci_writel(cq_host
, status
, CQHCI_IS
);
775 pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc
), status
);
777 if ((status
& CQHCI_IS_RED
) || cmd_error
|| data_error
)
778 cqhci_error_irq(mmc
, status
, cmd_error
, data_error
);
780 if (status
& CQHCI_IS_TCC
) {
781 /* read TCN and complete the request */
782 comp_status
= cqhci_readl(cq_host
, CQHCI_TCN
);
783 cqhci_writel(cq_host
, comp_status
, CQHCI_TCN
);
784 pr_debug("%s: cqhci: TCN: 0x%08lx\n",
785 mmc_hostname(mmc
), comp_status
);
787 spin_lock(&cq_host
->lock
);
789 for_each_set_bit(tag
, &comp_status
, cq_host
->num_slots
) {
790 /* complete the corresponding mrq */
791 pr_debug("%s: cqhci: completing tag %lu\n",
792 mmc_hostname(mmc
), tag
);
793 cqhci_finish_mrq(mmc
, tag
);
796 if (cq_host
->waiting_for_idle
&& !cq_host
->qcnt
) {
797 cq_host
->waiting_for_idle
= false;
798 wake_up(&cq_host
->wait_queue
);
801 spin_unlock(&cq_host
->lock
);
804 if (status
& CQHCI_IS_TCL
)
805 wake_up(&cq_host
->wait_queue
);
807 if (status
& CQHCI_IS_HAC
)
808 wake_up(&cq_host
->wait_queue
);
812 EXPORT_SYMBOL(cqhci_irq
);
814 static bool cqhci_is_idle(struct cqhci_host
*cq_host
, int *ret
)
819 spin_lock_irqsave(&cq_host
->lock
, flags
);
820 is_idle
= !cq_host
->qcnt
|| cq_host
->recovery_halt
;
821 *ret
= cq_host
->recovery_halt
? -EBUSY
: 0;
822 cq_host
->waiting_for_idle
= !is_idle
;
823 spin_unlock_irqrestore(&cq_host
->lock
, flags
);
828 static int cqhci_wait_for_idle(struct mmc_host
*mmc
)
830 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
833 wait_event(cq_host
->wait_queue
, cqhci_is_idle(cq_host
, &ret
));
838 static bool cqhci_timeout(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
839 bool *recovery_needed
)
841 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
842 int tag
= cqhci_tag(mrq
);
843 struct cqhci_slot
*slot
= &cq_host
->slot
[tag
];
847 spin_lock_irqsave(&cq_host
->lock
, flags
);
848 timed_out
= slot
->mrq
== mrq
;
850 slot
->flags
|= CQHCI_EXTERNAL_TIMEOUT
;
851 cqhci_recovery_needed(mmc
, mrq
, false);
852 *recovery_needed
= cq_host
->recovery_halt
;
854 spin_unlock_irqrestore(&cq_host
->lock
, flags
);
857 pr_err("%s: cqhci: timeout for tag %d\n",
858 mmc_hostname(mmc
), tag
);
859 cqhci_dumpregs(cq_host
);
865 static bool cqhci_tasks_cleared(struct cqhci_host
*cq_host
)
867 return !(cqhci_readl(cq_host
, CQHCI_CTL
) & CQHCI_CLEAR_ALL_TASKS
);
870 static bool cqhci_clear_all_tasks(struct mmc_host
*mmc
, unsigned int timeout
)
872 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
876 cqhci_set_irqs(cq_host
, CQHCI_IS_TCL
);
878 ctl
= cqhci_readl(cq_host
, CQHCI_CTL
);
879 ctl
|= CQHCI_CLEAR_ALL_TASKS
;
880 cqhci_writel(cq_host
, ctl
, CQHCI_CTL
);
882 wait_event_timeout(cq_host
->wait_queue
, cqhci_tasks_cleared(cq_host
),
883 msecs_to_jiffies(timeout
) + 1);
885 cqhci_set_irqs(cq_host
, 0);
887 ret
= cqhci_tasks_cleared(cq_host
);
890 pr_debug("%s: cqhci: Failed to clear tasks\n",
896 static bool cqhci_halted(struct cqhci_host
*cq_host
)
898 return cqhci_readl(cq_host
, CQHCI_CTL
) & CQHCI_HALT
;
901 static bool cqhci_halt(struct mmc_host
*mmc
, unsigned int timeout
)
903 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
907 if (cqhci_halted(cq_host
))
910 cqhci_set_irqs(cq_host
, CQHCI_IS_HAC
);
912 ctl
= cqhci_readl(cq_host
, CQHCI_CTL
);
914 cqhci_writel(cq_host
, ctl
, CQHCI_CTL
);
916 wait_event_timeout(cq_host
->wait_queue
, cqhci_halted(cq_host
),
917 msecs_to_jiffies(timeout
) + 1);
919 cqhci_set_irqs(cq_host
, 0);
921 ret
= cqhci_halted(cq_host
);
924 pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc
));
930 * After halting we expect to be able to use the command line. We interpret the
931 * failure to halt to mean the data lines might still be in use (and the upper
932 * layers will need to send a STOP command), so we set the timeout based on a
933 * generous command timeout.
935 #define CQHCI_START_HALT_TIMEOUT 5
937 static void cqhci_recovery_start(struct mmc_host
*mmc
)
939 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
941 pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc
), __func__
);
943 WARN_ON(!cq_host
->recovery_halt
);
945 cqhci_halt(mmc
, CQHCI_START_HALT_TIMEOUT
);
947 if (cq_host
->ops
->disable
)
948 cq_host
->ops
->disable(mmc
, true);
953 static int cqhci_error_from_flags(unsigned int flags
)
958 /* CRC errors might indicate re-tuning so prefer to report that */
959 if (flags
& CQHCI_HOST_CRC
)
962 if (flags
& (CQHCI_EXTERNAL_TIMEOUT
| CQHCI_HOST_TIMEOUT
))
968 static void cqhci_recover_mrq(struct cqhci_host
*cq_host
, unsigned int tag
)
970 struct cqhci_slot
*slot
= &cq_host
->slot
[tag
];
971 struct mmc_request
*mrq
= slot
->mrq
;
972 struct mmc_data
*data
;
983 data
->bytes_xfered
= 0;
984 data
->error
= cqhci_error_from_flags(slot
->flags
);
986 mrq
->cmd
->error
= cqhci_error_from_flags(slot
->flags
);
989 mmc_cqe_request_done(cq_host
->mmc
, mrq
);
992 static void cqhci_recover_mrqs(struct cqhci_host
*cq_host
)
996 for (i
= 0; i
< cq_host
->num_slots
; i
++)
997 cqhci_recover_mrq(cq_host
, i
);
1001 * By now the command and data lines should be unused so there is no reason for
1002 * CQHCI to take a long time to halt, but if it doesn't halt there could be
1003 * problems clearing tasks, so be generous.
1005 #define CQHCI_FINISH_HALT_TIMEOUT 20
1007 /* CQHCI could be expected to clear it's internal state pretty quickly */
1008 #define CQHCI_CLEAR_TIMEOUT 20
1010 static void cqhci_recovery_finish(struct mmc_host
*mmc
)
1012 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
1013 unsigned long flags
;
1017 pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc
), __func__
);
1019 WARN_ON(!cq_host
->recovery_halt
);
1021 ok
= cqhci_halt(mmc
, CQHCI_FINISH_HALT_TIMEOUT
);
1023 if (!cqhci_clear_all_tasks(mmc
, CQHCI_CLEAR_TIMEOUT
))
1027 * The specification contradicts itself, by saying that tasks cannot be
1028 * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
1029 * be disabled/re-enabled, but not to disable before clearing tasks.
1033 pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc
));
1034 cqcfg
= cqhci_readl(cq_host
, CQHCI_CFG
);
1035 cqcfg
&= ~CQHCI_ENABLE
;
1036 cqhci_writel(cq_host
, cqcfg
, CQHCI_CFG
);
1037 cqcfg
|= CQHCI_ENABLE
;
1038 cqhci_writel(cq_host
, cqcfg
, CQHCI_CFG
);
1039 /* Be sure that there are no tasks */
1040 ok
= cqhci_halt(mmc
, CQHCI_FINISH_HALT_TIMEOUT
);
1041 if (!cqhci_clear_all_tasks(mmc
, CQHCI_CLEAR_TIMEOUT
))
1046 cqhci_recover_mrqs(cq_host
);
1048 WARN_ON(cq_host
->qcnt
);
1050 spin_lock_irqsave(&cq_host
->lock
, flags
);
1052 cq_host
->recovery_halt
= false;
1053 mmc
->cqe_on
= false;
1054 spin_unlock_irqrestore(&cq_host
->lock
, flags
);
1056 /* Ensure all writes are done before interrupts are re-enabled */
1059 cqhci_writel(cq_host
, CQHCI_IS_HAC
| CQHCI_IS_TCL
, CQHCI_IS
);
1061 cqhci_set_irqs(cq_host
, CQHCI_IS_MASK
);
1063 pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc
));
1066 static const struct mmc_cqe_ops cqhci_cqe_ops
= {
1067 .cqe_enable
= cqhci_enable
,
1068 .cqe_disable
= cqhci_disable
,
1069 .cqe_request
= cqhci_request
,
1070 .cqe_post_req
= cqhci_post_req
,
1071 .cqe_off
= cqhci_off
,
1072 .cqe_wait_for_idle
= cqhci_wait_for_idle
,
1073 .cqe_timeout
= cqhci_timeout
,
1074 .cqe_recovery_start
= cqhci_recovery_start
,
1075 .cqe_recovery_finish
= cqhci_recovery_finish
,
1078 struct cqhci_host
*cqhci_pltfm_init(struct platform_device
*pdev
)
1080 struct cqhci_host
*cq_host
;
1081 struct resource
*cqhci_memres
= NULL
;
1083 /* check and setup CMDQ interface */
1084 cqhci_memres
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
1086 if (!cqhci_memres
) {
1087 dev_dbg(&pdev
->dev
, "CMDQ not supported\n");
1088 return ERR_PTR(-EINVAL
);
1091 cq_host
= devm_kzalloc(&pdev
->dev
, sizeof(*cq_host
), GFP_KERNEL
);
1093 return ERR_PTR(-ENOMEM
);
1094 cq_host
->mmio
= devm_ioremap(&pdev
->dev
,
1095 cqhci_memres
->start
,
1096 resource_size(cqhci_memres
));
1097 if (!cq_host
->mmio
) {
1098 dev_err(&pdev
->dev
, "failed to remap cqhci regs\n");
1099 return ERR_PTR(-EBUSY
);
1101 dev_dbg(&pdev
->dev
, "CMDQ ioremap: done\n");
1105 EXPORT_SYMBOL(cqhci_pltfm_init
);
1107 static unsigned int cqhci_ver_major(struct cqhci_host
*cq_host
)
1109 return CQHCI_VER_MAJOR(cqhci_readl(cq_host
, CQHCI_VER
));
1112 static unsigned int cqhci_ver_minor(struct cqhci_host
*cq_host
)
1114 u32 ver
= cqhci_readl(cq_host
, CQHCI_VER
);
1116 return CQHCI_VER_MINOR1(ver
) * 10 + CQHCI_VER_MINOR2(ver
);
1119 int cqhci_init(struct cqhci_host
*cq_host
, struct mmc_host
*mmc
,
1124 cq_host
->dma64
= dma64
;
1126 cq_host
->mmc
->cqe_private
= cq_host
;
1128 cq_host
->num_slots
= NUM_SLOTS
;
1129 cq_host
->dcmd_slot
= DCMD_SLOT
;
1131 mmc
->cqe_ops
= &cqhci_cqe_ops
;
1133 mmc
->cqe_qdepth
= NUM_SLOTS
;
1134 if (mmc
->caps2
& MMC_CAP2_CQE_DCMD
)
1135 mmc
->cqe_qdepth
-= 1;
1137 cq_host
->slot
= devm_kcalloc(mmc_dev(mmc
), cq_host
->num_slots
,
1138 sizeof(*cq_host
->slot
), GFP_KERNEL
);
1139 if (!cq_host
->slot
) {
1144 spin_lock_init(&cq_host
->lock
);
1146 init_completion(&cq_host
->halt_comp
);
1147 init_waitqueue_head(&cq_host
->wait_queue
);
1149 pr_info("%s: CQHCI version %u.%02u\n",
1150 mmc_hostname(mmc
), cqhci_ver_major(cq_host
),
1151 cqhci_ver_minor(cq_host
));
1156 pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
1157 mmc_hostname(mmc
), cqhci_ver_major(cq_host
),
1158 cqhci_ver_minor(cq_host
), err
);
1161 EXPORT_SYMBOL(cqhci_init
);
1163 MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
1164 MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
1165 MODULE_LICENSE("GPL v2");