1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015, The Linux Foundation. All rights reserved.
5 #include <linux/delay.h>
6 #include <linux/highmem.h>
8 #include <linux/module.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/slab.h>
11 #include <linux/scatterlist.h>
12 #include <linux/platform_device.h>
13 #include <linux/ktime.h>
15 #include <linux/mmc/mmc.h>
16 #include <linux/mmc/host.h>
17 #include <linux/mmc/card.h>
25 struct mmc_request
*mrq
;
27 #define CQHCI_EXTERNAL_TIMEOUT BIT(0)
28 #define CQHCI_COMPLETED BIT(1)
29 #define CQHCI_HOST_CRC BIT(2)
30 #define CQHCI_HOST_TIMEOUT BIT(3)
31 #define CQHCI_HOST_OTHER BIT(4)
34 static inline u8
*get_desc(struct cqhci_host
*cq_host
, u8 tag
)
36 return cq_host
->desc_base
+ (tag
* cq_host
->slot_sz
);
39 static inline u8
*get_link_desc(struct cqhci_host
*cq_host
, u8 tag
)
41 u8
*desc
= get_desc(cq_host
, tag
);
43 return desc
+ cq_host
->task_desc_len
;
46 static inline dma_addr_t
get_trans_desc_dma(struct cqhci_host
*cq_host
, u8 tag
)
48 return cq_host
->trans_desc_dma_base
+
49 (cq_host
->mmc
->max_segs
* tag
*
50 cq_host
->trans_desc_len
);
53 static inline u8
*get_trans_desc(struct cqhci_host
*cq_host
, u8 tag
)
55 return cq_host
->trans_desc_base
+
56 (cq_host
->trans_desc_len
* cq_host
->mmc
->max_segs
* tag
);
59 static void setup_trans_desc(struct cqhci_host
*cq_host
, u8 tag
)
62 dma_addr_t trans_temp
;
64 link_temp
= get_link_desc(cq_host
, tag
);
65 trans_temp
= get_trans_desc_dma(cq_host
, tag
);
67 memset(link_temp
, 0, cq_host
->link_desc_len
);
68 if (cq_host
->link_desc_len
> 8)
71 if (tag
== DCMD_SLOT
&& (cq_host
->mmc
->caps2
& MMC_CAP2_CQE_DCMD
)) {
72 *link_temp
= CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
76 *link_temp
= CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
79 __le64
*data_addr
= (__le64 __force
*)(link_temp
+ 4);
81 data_addr
[0] = cpu_to_le64(trans_temp
);
83 __le32
*data_addr
= (__le32 __force
*)(link_temp
+ 4);
85 data_addr
[0] = cpu_to_le32(trans_temp
);
89 static void cqhci_set_irqs(struct cqhci_host
*cq_host
, u32 set
)
91 cqhci_writel(cq_host
, set
, CQHCI_ISTE
);
92 cqhci_writel(cq_host
, set
, CQHCI_ISGE
);
95 #define DRV_NAME "cqhci"
97 #define CQHCI_DUMP(f, x...) \
98 pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
100 static void cqhci_dumpregs(struct cqhci_host
*cq_host
)
102 struct mmc_host
*mmc
= cq_host
->mmc
;
104 CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
106 CQHCI_DUMP("Caps: 0x%08x | Version: 0x%08x\n",
107 cqhci_readl(cq_host
, CQHCI_CAP
),
108 cqhci_readl(cq_host
, CQHCI_VER
));
109 CQHCI_DUMP("Config: 0x%08x | Control: 0x%08x\n",
110 cqhci_readl(cq_host
, CQHCI_CFG
),
111 cqhci_readl(cq_host
, CQHCI_CTL
));
112 CQHCI_DUMP("Int stat: 0x%08x | Int enab: 0x%08x\n",
113 cqhci_readl(cq_host
, CQHCI_IS
),
114 cqhci_readl(cq_host
, CQHCI_ISTE
));
115 CQHCI_DUMP("Int sig: 0x%08x | Int Coal: 0x%08x\n",
116 cqhci_readl(cq_host
, CQHCI_ISGE
),
117 cqhci_readl(cq_host
, CQHCI_IC
));
118 CQHCI_DUMP("TDL base: 0x%08x | TDL up32: 0x%08x\n",
119 cqhci_readl(cq_host
, CQHCI_TDLBA
),
120 cqhci_readl(cq_host
, CQHCI_TDLBAU
));
121 CQHCI_DUMP("Doorbell: 0x%08x | TCN: 0x%08x\n",
122 cqhci_readl(cq_host
, CQHCI_TDBR
),
123 cqhci_readl(cq_host
, CQHCI_TCN
));
124 CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
125 cqhci_readl(cq_host
, CQHCI_DQS
),
126 cqhci_readl(cq_host
, CQHCI_DPT
));
127 CQHCI_DUMP("Task clr: 0x%08x | SSC1: 0x%08x\n",
128 cqhci_readl(cq_host
, CQHCI_TCLR
),
129 cqhci_readl(cq_host
, CQHCI_SSC1
));
130 CQHCI_DUMP("SSC2: 0x%08x | DCMD rsp: 0x%08x\n",
131 cqhci_readl(cq_host
, CQHCI_SSC2
),
132 cqhci_readl(cq_host
, CQHCI_CRDCT
));
133 CQHCI_DUMP("RED mask: 0x%08x | TERRI: 0x%08x\n",
134 cqhci_readl(cq_host
, CQHCI_RMEM
),
135 cqhci_readl(cq_host
, CQHCI_TERRI
));
136 CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n",
137 cqhci_readl(cq_host
, CQHCI_CRI
),
138 cqhci_readl(cq_host
, CQHCI_CRA
));
140 if (cq_host
->ops
->dumpregs
)
141 cq_host
->ops
->dumpregs(mmc
);
143 CQHCI_DUMP(": ===========================================\n");
147 * The allocated descriptor table for task, link & transfer descritors
150 * |task desc | |->|----------|
151 * |----------| | |trans desc|
152 * |link desc-|->| |----------|
155 * no. of slots max-segs
158 * The idea here is to create the [task+trans] table and mark & point the
159 * link desc to the transfer desc table on a per slot basis.
161 static int cqhci_host_alloc_tdl(struct cqhci_host
*cq_host
)
165 /* task descriptor can be 64/128 bit irrespective of arch */
166 if (cq_host
->caps
& CQHCI_TASK_DESC_SZ_128
) {
167 cqhci_writel(cq_host
, cqhci_readl(cq_host
, CQHCI_CFG
) |
168 CQHCI_TASK_DESC_SZ
, CQHCI_CFG
);
169 cq_host
->task_desc_len
= 16;
171 cq_host
->task_desc_len
= 8;
175 * 96 bits length of transfer desc instead of 128 bits which means
176 * ADMA would expect next valid descriptor at the 96th bit
179 if (cq_host
->dma64
) {
180 if (cq_host
->quirks
& CQHCI_QUIRK_SHORT_TXFR_DESC_SZ
)
181 cq_host
->trans_desc_len
= 12;
183 cq_host
->trans_desc_len
= 16;
184 cq_host
->link_desc_len
= 16;
186 cq_host
->trans_desc_len
= 8;
187 cq_host
->link_desc_len
= 8;
190 /* total size of a slot: 1 task & 1 transfer (link) */
191 cq_host
->slot_sz
= cq_host
->task_desc_len
+ cq_host
->link_desc_len
;
193 cq_host
->desc_size
= cq_host
->slot_sz
* cq_host
->num_slots
;
195 cq_host
->data_size
= cq_host
->trans_desc_len
* cq_host
->mmc
->max_segs
*
196 cq_host
->mmc
->cqe_qdepth
;
198 pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
199 mmc_hostname(cq_host
->mmc
), cq_host
->desc_size
, cq_host
->data_size
,
203 * allocate a dma-mapped chunk of memory for the descriptors
204 * allocate a dma-mapped chunk of memory for link descriptors
205 * setup each link-desc memory offset per slot-number to
206 * the descriptor table.
208 cq_host
->desc_base
= dmam_alloc_coherent(mmc_dev(cq_host
->mmc
),
210 &cq_host
->desc_dma_base
,
212 if (!cq_host
->desc_base
)
215 cq_host
->trans_desc_base
= dmam_alloc_coherent(mmc_dev(cq_host
->mmc
),
217 &cq_host
->trans_desc_dma_base
,
219 if (!cq_host
->trans_desc_base
) {
220 dmam_free_coherent(mmc_dev(cq_host
->mmc
), cq_host
->desc_size
,
222 cq_host
->desc_dma_base
);
223 cq_host
->desc_base
= NULL
;
224 cq_host
->desc_dma_base
= 0;
228 pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
229 mmc_hostname(cq_host
->mmc
), cq_host
->desc_base
, cq_host
->trans_desc_base
,
230 (unsigned long long)cq_host
->desc_dma_base
,
231 (unsigned long long)cq_host
->trans_desc_dma_base
);
233 for (; i
< (cq_host
->num_slots
); i
++)
234 setup_trans_desc(cq_host
, i
);
239 static void __cqhci_enable(struct cqhci_host
*cq_host
)
241 struct mmc_host
*mmc
= cq_host
->mmc
;
244 cqcfg
= cqhci_readl(cq_host
, CQHCI_CFG
);
246 /* Configuration must not be changed while enabled */
247 if (cqcfg
& CQHCI_ENABLE
) {
248 cqcfg
&= ~CQHCI_ENABLE
;
249 cqhci_writel(cq_host
, cqcfg
, CQHCI_CFG
);
252 cqcfg
&= ~(CQHCI_DCMD
| CQHCI_TASK_DESC_SZ
);
254 if (mmc
->caps2
& MMC_CAP2_CQE_DCMD
)
257 if (cq_host
->caps
& CQHCI_TASK_DESC_SZ_128
)
258 cqcfg
|= CQHCI_TASK_DESC_SZ
;
260 cqhci_writel(cq_host
, cqcfg
, CQHCI_CFG
);
262 cqhci_writel(cq_host
, lower_32_bits(cq_host
->desc_dma_base
),
264 cqhci_writel(cq_host
, upper_32_bits(cq_host
->desc_dma_base
),
267 cqhci_writel(cq_host
, cq_host
->rca
, CQHCI_SSC2
);
269 cqhci_set_irqs(cq_host
, 0);
271 cqcfg
|= CQHCI_ENABLE
;
273 cqhci_writel(cq_host
, cqcfg
, CQHCI_CFG
);
277 if (cq_host
->ops
->enable
)
278 cq_host
->ops
->enable(mmc
);
280 /* Ensure all writes are done before interrupts are enabled */
283 cqhci_set_irqs(cq_host
, CQHCI_IS_MASK
);
285 cq_host
->activated
= true;
288 static void __cqhci_disable(struct cqhci_host
*cq_host
)
292 cqcfg
= cqhci_readl(cq_host
, CQHCI_CFG
);
293 cqcfg
&= ~CQHCI_ENABLE
;
294 cqhci_writel(cq_host
, cqcfg
, CQHCI_CFG
);
296 cq_host
->mmc
->cqe_on
= false;
298 cq_host
->activated
= false;
301 int cqhci_suspend(struct mmc_host
*mmc
)
303 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
305 if (cq_host
->enabled
)
306 __cqhci_disable(cq_host
);
310 EXPORT_SYMBOL(cqhci_suspend
);
312 int cqhci_resume(struct mmc_host
*mmc
)
314 /* Re-enable is done upon first request */
317 EXPORT_SYMBOL(cqhci_resume
);
319 static int cqhci_enable(struct mmc_host
*mmc
, struct mmc_card
*card
)
321 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
324 if (cq_host
->enabled
)
327 cq_host
->rca
= card
->rca
;
329 err
= cqhci_host_alloc_tdl(cq_host
);
333 __cqhci_enable(cq_host
);
335 cq_host
->enabled
= true;
338 cqhci_dumpregs(cq_host
);
343 /* CQHCI is idle and should halt immediately, so set a small timeout */
344 #define CQHCI_OFF_TIMEOUT 100
346 static void cqhci_off(struct mmc_host
*mmc
)
348 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
353 if (!cq_host
->enabled
|| !mmc
->cqe_on
|| cq_host
->recovery_halt
)
356 if (cq_host
->ops
->disable
)
357 cq_host
->ops
->disable(mmc
, false);
359 cqhci_writel(cq_host
, CQHCI_HALT
, CQHCI_CTL
);
361 timeout
= ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT
);
363 timed_out
= ktime_compare(ktime_get(), timeout
) > 0;
364 reg
= cqhci_readl(cq_host
, CQHCI_CTL
);
365 if ((reg
& CQHCI_HALT
) || timed_out
)
370 pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc
));
372 pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc
));
377 static void cqhci_disable(struct mmc_host
*mmc
)
379 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
381 if (!cq_host
->enabled
)
386 __cqhci_disable(cq_host
);
388 dmam_free_coherent(mmc_dev(mmc
), cq_host
->data_size
,
389 cq_host
->trans_desc_base
,
390 cq_host
->trans_desc_dma_base
);
392 dmam_free_coherent(mmc_dev(mmc
), cq_host
->desc_size
,
394 cq_host
->desc_dma_base
);
396 cq_host
->trans_desc_base
= NULL
;
397 cq_host
->desc_base
= NULL
;
399 cq_host
->enabled
= false;
402 static void cqhci_prep_task_desc(struct mmc_request
*mrq
,
403 u64
*data
, bool intr
)
405 u32 req_flags
= mrq
->data
->flags
;
407 *data
= CQHCI_VALID(1) |
411 CQHCI_FORCED_PROG(!!(req_flags
& MMC_DATA_FORCED_PRG
)) |
412 CQHCI_DATA_TAG(!!(req_flags
& MMC_DATA_DAT_TAG
)) |
413 CQHCI_DATA_DIR(!!(req_flags
& MMC_DATA_READ
)) |
414 CQHCI_PRIORITY(!!(req_flags
& MMC_DATA_PRIO
)) |
415 CQHCI_QBAR(!!(req_flags
& MMC_DATA_QBR
)) |
416 CQHCI_REL_WRITE(!!(req_flags
& MMC_DATA_REL_WR
)) |
417 CQHCI_BLK_COUNT(mrq
->data
->blocks
) |
418 CQHCI_BLK_ADDR((u64
)mrq
->data
->blk_addr
);
420 pr_debug("%s: cqhci: tag %d task descriptor 0x016%llx\n",
421 mmc_hostname(mrq
->host
), mrq
->tag
, (unsigned long long)*data
);
424 static int cqhci_dma_map(struct mmc_host
*host
, struct mmc_request
*mrq
)
427 struct mmc_data
*data
= mrq
->data
;
432 sg_count
= dma_map_sg(mmc_dev(host
), data
->sg
,
434 (data
->flags
& MMC_DATA_WRITE
) ?
435 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
437 pr_err("%s: sg-len: %d\n", __func__
, data
->sg_len
);
444 static void cqhci_set_tran_desc(u8
*desc
, dma_addr_t addr
, int len
, bool end
,
447 __le32
*attr
= (__le32 __force
*)desc
;
449 *attr
= (CQHCI_VALID(1) |
450 CQHCI_END(end
? 1 : 0) |
453 CQHCI_DAT_LENGTH(len
));
456 __le64
*dataddr
= (__le64 __force
*)(desc
+ 4);
458 dataddr
[0] = cpu_to_le64(addr
);
460 __le32
*dataddr
= (__le32 __force
*)(desc
+ 4);
462 dataddr
[0] = cpu_to_le32(addr
);
466 static int cqhci_prep_tran_desc(struct mmc_request
*mrq
,
467 struct cqhci_host
*cq_host
, int tag
)
469 struct mmc_data
*data
= mrq
->data
;
470 int i
, sg_count
, len
;
472 bool dma64
= cq_host
->dma64
;
475 struct scatterlist
*sg
;
477 sg_count
= cqhci_dma_map(mrq
->host
, mrq
);
479 pr_err("%s: %s: unable to map sg lists, %d\n",
480 mmc_hostname(mrq
->host
), __func__
, sg_count
);
484 desc
= get_trans_desc(cq_host
, tag
);
486 for_each_sg(data
->sg
, sg
, sg_count
, i
) {
487 addr
= sg_dma_address(sg
);
488 len
= sg_dma_len(sg
);
490 if ((i
+1) == sg_count
)
492 cqhci_set_tran_desc(desc
, addr
, len
, end
, dma64
);
493 desc
+= cq_host
->trans_desc_len
;
499 static void cqhci_prep_dcmd_desc(struct mmc_host
*mmc
,
500 struct mmc_request
*mrq
)
502 u64
*task_desc
= NULL
;
507 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
510 if (!(mrq
->cmd
->flags
& MMC_RSP_PRESENT
)) {
514 if (mrq
->cmd
->flags
& MMC_RSP_R1B
) {
523 task_desc
= (__le64 __force
*)get_desc(cq_host
, cq_host
->dcmd_slot
);
524 memset(task_desc
, 0, cq_host
->task_desc_len
);
525 data
|= (CQHCI_VALID(1) |
530 CQHCI_CMD_INDEX(mrq
->cmd
->opcode
) |
531 CQHCI_CMD_TIMING(timing
) | CQHCI_RESP_TYPE(resp_type
));
532 if (cq_host
->ops
->update_dcmd_desc
)
533 cq_host
->ops
->update_dcmd_desc(mmc
, mrq
, &data
);
535 desc
= (u8
*)task_desc
;
536 pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
537 mmc_hostname(mmc
), mrq
->cmd
->opcode
, timing
, resp_type
);
538 dataddr
= (__le64 __force
*)(desc
+ 4);
539 dataddr
[0] = cpu_to_le64((u64
)mrq
->cmd
->arg
);
543 static void cqhci_post_req(struct mmc_host
*host
, struct mmc_request
*mrq
)
545 struct mmc_data
*data
= mrq
->data
;
548 dma_unmap_sg(mmc_dev(host
), data
->sg
, data
->sg_len
,
549 (data
->flags
& MMC_DATA_READ
) ?
550 DMA_FROM_DEVICE
: DMA_TO_DEVICE
);
554 static inline int cqhci_tag(struct mmc_request
*mrq
)
556 return mrq
->cmd
? DCMD_SLOT
: mrq
->tag
;
559 static int cqhci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
563 u64
*task_desc
= NULL
;
564 int tag
= cqhci_tag(mrq
);
565 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
568 if (!cq_host
->enabled
) {
569 pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc
));
573 /* First request after resume has to re-enable */
574 if (!cq_host
->activated
)
575 __cqhci_enable(cq_host
);
578 cqhci_writel(cq_host
, 0, CQHCI_CTL
);
580 pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc
));
581 if (cqhci_readl(cq_host
, CQHCI_CTL
) && CQHCI_HALT
) {
582 pr_err("%s: cqhci: CQE failed to exit halt state\n",
585 if (cq_host
->ops
->enable
)
586 cq_host
->ops
->enable(mmc
);
590 task_desc
= (__le64 __force
*)get_desc(cq_host
, tag
);
591 cqhci_prep_task_desc(mrq
, &data
, 1);
592 *task_desc
= cpu_to_le64(data
);
593 err
= cqhci_prep_tran_desc(mrq
, cq_host
, tag
);
595 pr_err("%s: cqhci: failed to setup tx desc: %d\n",
596 mmc_hostname(mmc
), err
);
600 cqhci_prep_dcmd_desc(mmc
, mrq
);
603 spin_lock_irqsave(&cq_host
->lock
, flags
);
605 if (cq_host
->recovery_halt
) {
610 cq_host
->slot
[tag
].mrq
= mrq
;
611 cq_host
->slot
[tag
].flags
= 0;
614 /* Make sure descriptors are ready before ringing the doorbell */
616 cqhci_writel(cq_host
, 1 << tag
, CQHCI_TDBR
);
617 if (!(cqhci_readl(cq_host
, CQHCI_TDBR
) & (1 << tag
)))
618 pr_debug("%s: cqhci: doorbell not set for tag %d\n",
619 mmc_hostname(mmc
), tag
);
621 spin_unlock_irqrestore(&cq_host
->lock
, flags
);
624 cqhci_post_req(mmc
, mrq
);
629 static void cqhci_recovery_needed(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
632 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
634 if (!cq_host
->recovery_halt
) {
635 cq_host
->recovery_halt
= true;
636 pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc
));
637 wake_up(&cq_host
->wait_queue
);
638 if (notify
&& mrq
->recovery_notifier
)
639 mrq
->recovery_notifier(mrq
);
643 static unsigned int cqhci_error_flags(int error1
, int error2
)
645 int error
= error1
? error1
: error2
;
649 return CQHCI_HOST_CRC
;
651 return CQHCI_HOST_TIMEOUT
;
653 return CQHCI_HOST_OTHER
;
657 static void cqhci_error_irq(struct mmc_host
*mmc
, u32 status
, int cmd_error
,
660 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
661 struct cqhci_slot
*slot
;
665 spin_lock(&cq_host
->lock
);
667 terri
= cqhci_readl(cq_host
, CQHCI_TERRI
);
669 pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
670 mmc_hostname(mmc
), status
, cmd_error
, data_error
, terri
);
672 /* Forget about errors when recovery has already been triggered */
673 if (cq_host
->recovery_halt
)
676 if (!cq_host
->qcnt
) {
677 WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
678 mmc_hostname(mmc
), status
, cmd_error
, data_error
,
683 if (CQHCI_TERRI_C_VALID(terri
)) {
684 tag
= CQHCI_TERRI_C_TASK(terri
);
685 slot
= &cq_host
->slot
[tag
];
687 slot
->flags
= cqhci_error_flags(cmd_error
, data_error
);
688 cqhci_recovery_needed(mmc
, slot
->mrq
, true);
692 if (CQHCI_TERRI_D_VALID(terri
)) {
693 tag
= CQHCI_TERRI_D_TASK(terri
);
694 slot
= &cq_host
->slot
[tag
];
696 slot
->flags
= cqhci_error_flags(data_error
, cmd_error
);
697 cqhci_recovery_needed(mmc
, slot
->mrq
, true);
701 if (!cq_host
->recovery_halt
) {
703 * The only way to guarantee forward progress is to mark at
704 * least one task in error, so if none is indicated, pick one.
706 for (tag
= 0; tag
< NUM_SLOTS
; tag
++) {
707 slot
= &cq_host
->slot
[tag
];
710 slot
->flags
= cqhci_error_flags(data_error
, cmd_error
);
711 cqhci_recovery_needed(mmc
, slot
->mrq
, true);
717 spin_unlock(&cq_host
->lock
);
720 static void cqhci_finish_mrq(struct mmc_host
*mmc
, unsigned int tag
)
722 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
723 struct cqhci_slot
*slot
= &cq_host
->slot
[tag
];
724 struct mmc_request
*mrq
= slot
->mrq
;
725 struct mmc_data
*data
;
728 WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
729 mmc_hostname(mmc
), tag
);
733 /* No completions allowed during recovery */
734 if (cq_host
->recovery_halt
) {
735 slot
->flags
|= CQHCI_COMPLETED
;
746 data
->bytes_xfered
= 0;
748 data
->bytes_xfered
= data
->blksz
* data
->blocks
;
751 mmc_cqe_request_done(mmc
, mrq
);
754 irqreturn_t
cqhci_irq(struct mmc_host
*mmc
, u32 intmask
, int cmd_error
,
758 unsigned long tag
= 0, comp_status
;
759 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
761 status
= cqhci_readl(cq_host
, CQHCI_IS
);
762 cqhci_writel(cq_host
, status
, CQHCI_IS
);
764 pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc
), status
);
766 if ((status
& CQHCI_IS_RED
) || cmd_error
|| data_error
)
767 cqhci_error_irq(mmc
, status
, cmd_error
, data_error
);
769 if (status
& CQHCI_IS_TCC
) {
770 /* read TCN and complete the request */
771 comp_status
= cqhci_readl(cq_host
, CQHCI_TCN
);
772 cqhci_writel(cq_host
, comp_status
, CQHCI_TCN
);
773 pr_debug("%s: cqhci: TCN: 0x%08lx\n",
774 mmc_hostname(mmc
), comp_status
);
776 spin_lock(&cq_host
->lock
);
778 for_each_set_bit(tag
, &comp_status
, cq_host
->num_slots
) {
779 /* complete the corresponding mrq */
780 pr_debug("%s: cqhci: completing tag %lu\n",
781 mmc_hostname(mmc
), tag
);
782 cqhci_finish_mrq(mmc
, tag
);
785 if (cq_host
->waiting_for_idle
&& !cq_host
->qcnt
) {
786 cq_host
->waiting_for_idle
= false;
787 wake_up(&cq_host
->wait_queue
);
790 spin_unlock(&cq_host
->lock
);
793 if (status
& CQHCI_IS_TCL
)
794 wake_up(&cq_host
->wait_queue
);
796 if (status
& CQHCI_IS_HAC
)
797 wake_up(&cq_host
->wait_queue
);
801 EXPORT_SYMBOL(cqhci_irq
);
803 static bool cqhci_is_idle(struct cqhci_host
*cq_host
, int *ret
)
808 spin_lock_irqsave(&cq_host
->lock
, flags
);
809 is_idle
= !cq_host
->qcnt
|| cq_host
->recovery_halt
;
810 *ret
= cq_host
->recovery_halt
? -EBUSY
: 0;
811 cq_host
->waiting_for_idle
= !is_idle
;
812 spin_unlock_irqrestore(&cq_host
->lock
, flags
);
817 static int cqhci_wait_for_idle(struct mmc_host
*mmc
)
819 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
822 wait_event(cq_host
->wait_queue
, cqhci_is_idle(cq_host
, &ret
));
827 static bool cqhci_timeout(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
828 bool *recovery_needed
)
830 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
831 int tag
= cqhci_tag(mrq
);
832 struct cqhci_slot
*slot
= &cq_host
->slot
[tag
];
836 spin_lock_irqsave(&cq_host
->lock
, flags
);
837 timed_out
= slot
->mrq
== mrq
;
839 slot
->flags
|= CQHCI_EXTERNAL_TIMEOUT
;
840 cqhci_recovery_needed(mmc
, mrq
, false);
841 *recovery_needed
= cq_host
->recovery_halt
;
843 spin_unlock_irqrestore(&cq_host
->lock
, flags
);
846 pr_err("%s: cqhci: timeout for tag %d\n",
847 mmc_hostname(mmc
), tag
);
848 cqhci_dumpregs(cq_host
);
854 static bool cqhci_tasks_cleared(struct cqhci_host
*cq_host
)
856 return !(cqhci_readl(cq_host
, CQHCI_CTL
) & CQHCI_CLEAR_ALL_TASKS
);
859 static bool cqhci_clear_all_tasks(struct mmc_host
*mmc
, unsigned int timeout
)
861 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
865 cqhci_set_irqs(cq_host
, CQHCI_IS_TCL
);
867 ctl
= cqhci_readl(cq_host
, CQHCI_CTL
);
868 ctl
|= CQHCI_CLEAR_ALL_TASKS
;
869 cqhci_writel(cq_host
, ctl
, CQHCI_CTL
);
871 wait_event_timeout(cq_host
->wait_queue
, cqhci_tasks_cleared(cq_host
),
872 msecs_to_jiffies(timeout
) + 1);
874 cqhci_set_irqs(cq_host
, 0);
876 ret
= cqhci_tasks_cleared(cq_host
);
879 pr_debug("%s: cqhci: Failed to clear tasks\n",
885 static bool cqhci_halted(struct cqhci_host
*cq_host
)
887 return cqhci_readl(cq_host
, CQHCI_CTL
) & CQHCI_HALT
;
890 static bool cqhci_halt(struct mmc_host
*mmc
, unsigned int timeout
)
892 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
896 if (cqhci_halted(cq_host
))
899 cqhci_set_irqs(cq_host
, CQHCI_IS_HAC
);
901 ctl
= cqhci_readl(cq_host
, CQHCI_CTL
);
903 cqhci_writel(cq_host
, ctl
, CQHCI_CTL
);
905 wait_event_timeout(cq_host
->wait_queue
, cqhci_halted(cq_host
),
906 msecs_to_jiffies(timeout
) + 1);
908 cqhci_set_irqs(cq_host
, 0);
910 ret
= cqhci_halted(cq_host
);
913 pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc
));
919 * After halting we expect to be able to use the command line. We interpret the
920 * failure to halt to mean the data lines might still be in use (and the upper
921 * layers will need to send a STOP command), so we set the timeout based on a
922 * generous command timeout.
924 #define CQHCI_START_HALT_TIMEOUT 5
926 static void cqhci_recovery_start(struct mmc_host
*mmc
)
928 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
930 pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc
), __func__
);
932 WARN_ON(!cq_host
->recovery_halt
);
934 cqhci_halt(mmc
, CQHCI_START_HALT_TIMEOUT
);
936 if (cq_host
->ops
->disable
)
937 cq_host
->ops
->disable(mmc
, true);
942 static int cqhci_error_from_flags(unsigned int flags
)
947 /* CRC errors might indicate re-tuning so prefer to report that */
948 if (flags
& CQHCI_HOST_CRC
)
951 if (flags
& (CQHCI_EXTERNAL_TIMEOUT
| CQHCI_HOST_TIMEOUT
))
957 static void cqhci_recover_mrq(struct cqhci_host
*cq_host
, unsigned int tag
)
959 struct cqhci_slot
*slot
= &cq_host
->slot
[tag
];
960 struct mmc_request
*mrq
= slot
->mrq
;
961 struct mmc_data
*data
;
972 data
->bytes_xfered
= 0;
973 data
->error
= cqhci_error_from_flags(slot
->flags
);
975 mrq
->cmd
->error
= cqhci_error_from_flags(slot
->flags
);
978 mmc_cqe_request_done(cq_host
->mmc
, mrq
);
981 static void cqhci_recover_mrqs(struct cqhci_host
*cq_host
)
985 for (i
= 0; i
< cq_host
->num_slots
; i
++)
986 cqhci_recover_mrq(cq_host
, i
);
990 * By now the command and data lines should be unused so there is no reason for
991 * CQHCI to take a long time to halt, but if it doesn't halt there could be
992 * problems clearing tasks, so be generous.
994 #define CQHCI_FINISH_HALT_TIMEOUT 20
996 /* CQHCI could be expected to clear it's internal state pretty quickly */
997 #define CQHCI_CLEAR_TIMEOUT 20
999 static void cqhci_recovery_finish(struct mmc_host
*mmc
)
1001 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
1002 unsigned long flags
;
1006 pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc
), __func__
);
1008 WARN_ON(!cq_host
->recovery_halt
);
1010 ok
= cqhci_halt(mmc
, CQHCI_FINISH_HALT_TIMEOUT
);
1012 if (!cqhci_clear_all_tasks(mmc
, CQHCI_CLEAR_TIMEOUT
))
1016 * The specification contradicts itself, by saying that tasks cannot be
1017 * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
1018 * be disabled/re-enabled, but not to disable before clearing tasks.
1022 pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc
));
1023 cqcfg
= cqhci_readl(cq_host
, CQHCI_CFG
);
1024 cqcfg
&= ~CQHCI_ENABLE
;
1025 cqhci_writel(cq_host
, cqcfg
, CQHCI_CFG
);
1026 cqcfg
|= CQHCI_ENABLE
;
1027 cqhci_writel(cq_host
, cqcfg
, CQHCI_CFG
);
1028 /* Be sure that there are no tasks */
1029 ok
= cqhci_halt(mmc
, CQHCI_FINISH_HALT_TIMEOUT
);
1030 if (!cqhci_clear_all_tasks(mmc
, CQHCI_CLEAR_TIMEOUT
))
1035 cqhci_recover_mrqs(cq_host
);
1037 WARN_ON(cq_host
->qcnt
);
1039 spin_lock_irqsave(&cq_host
->lock
, flags
);
1041 cq_host
->recovery_halt
= false;
1042 mmc
->cqe_on
= false;
1043 spin_unlock_irqrestore(&cq_host
->lock
, flags
);
1045 /* Ensure all writes are done before interrupts are re-enabled */
1048 cqhci_writel(cq_host
, CQHCI_IS_HAC
| CQHCI_IS_TCL
, CQHCI_IS
);
1050 cqhci_set_irqs(cq_host
, CQHCI_IS_MASK
);
1052 pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc
));
1055 static const struct mmc_cqe_ops cqhci_cqe_ops
= {
1056 .cqe_enable
= cqhci_enable
,
1057 .cqe_disable
= cqhci_disable
,
1058 .cqe_request
= cqhci_request
,
1059 .cqe_post_req
= cqhci_post_req
,
1060 .cqe_off
= cqhci_off
,
1061 .cqe_wait_for_idle
= cqhci_wait_for_idle
,
1062 .cqe_timeout
= cqhci_timeout
,
1063 .cqe_recovery_start
= cqhci_recovery_start
,
1064 .cqe_recovery_finish
= cqhci_recovery_finish
,
1067 struct cqhci_host
*cqhci_pltfm_init(struct platform_device
*pdev
)
1069 struct cqhci_host
*cq_host
;
1070 struct resource
*cqhci_memres
= NULL
;
1072 /* check and setup CMDQ interface */
1073 cqhci_memres
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
1075 if (!cqhci_memres
) {
1076 dev_dbg(&pdev
->dev
, "CMDQ not supported\n");
1077 return ERR_PTR(-EINVAL
);
1080 cq_host
= devm_kzalloc(&pdev
->dev
, sizeof(*cq_host
), GFP_KERNEL
);
1082 return ERR_PTR(-ENOMEM
);
1083 cq_host
->mmio
= devm_ioremap(&pdev
->dev
,
1084 cqhci_memres
->start
,
1085 resource_size(cqhci_memres
));
1086 if (!cq_host
->mmio
) {
1087 dev_err(&pdev
->dev
, "failed to remap cqhci regs\n");
1088 return ERR_PTR(-EBUSY
);
1090 dev_dbg(&pdev
->dev
, "CMDQ ioremap: done\n");
1094 EXPORT_SYMBOL(cqhci_pltfm_init
);
1096 static unsigned int cqhci_ver_major(struct cqhci_host
*cq_host
)
1098 return CQHCI_VER_MAJOR(cqhci_readl(cq_host
, CQHCI_VER
));
1101 static unsigned int cqhci_ver_minor(struct cqhci_host
*cq_host
)
1103 u32 ver
= cqhci_readl(cq_host
, CQHCI_VER
);
1105 return CQHCI_VER_MINOR1(ver
) * 10 + CQHCI_VER_MINOR2(ver
);
1108 int cqhci_init(struct cqhci_host
*cq_host
, struct mmc_host
*mmc
,
1113 cq_host
->dma64
= dma64
;
1115 cq_host
->mmc
->cqe_private
= cq_host
;
1117 cq_host
->num_slots
= NUM_SLOTS
;
1118 cq_host
->dcmd_slot
= DCMD_SLOT
;
1120 mmc
->cqe_ops
= &cqhci_cqe_ops
;
1122 mmc
->cqe_qdepth
= NUM_SLOTS
;
1123 if (mmc
->caps2
& MMC_CAP2_CQE_DCMD
)
1124 mmc
->cqe_qdepth
-= 1;
1126 cq_host
->slot
= devm_kcalloc(mmc_dev(mmc
), cq_host
->num_slots
,
1127 sizeof(*cq_host
->slot
), GFP_KERNEL
);
1128 if (!cq_host
->slot
) {
1133 spin_lock_init(&cq_host
->lock
);
1135 init_completion(&cq_host
->halt_comp
);
1136 init_waitqueue_head(&cq_host
->wait_queue
);
1138 pr_info("%s: CQHCI version %u.%02u\n",
1139 mmc_hostname(mmc
), cqhci_ver_major(cq_host
),
1140 cqhci_ver_minor(cq_host
));
1145 pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
1146 mmc_hostname(mmc
), cqhci_ver_major(cq_host
),
1147 cqhci_ver_minor(cq_host
), err
);
1150 EXPORT_SYMBOL(cqhci_init
);
1152 MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
1153 MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
1154 MODULE_LICENSE("GPL v2");