1 /* Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
13 #include <linux/delay.h>
14 #include <linux/highmem.h>
16 #include <linux/iopoll.h>
17 #include <linux/module.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/slab.h>
20 #include <linux/scatterlist.h>
21 #include <linux/platform_device.h>
22 #include <linux/ktime.h>
24 #include <linux/mmc/mmc.h>
25 #include <linux/mmc/host.h>
26 #include <linux/mmc/card.h>
34 struct mmc_request
*mrq
;
36 #define CQHCI_EXTERNAL_TIMEOUT BIT(0)
37 #define CQHCI_COMPLETED BIT(1)
38 #define CQHCI_HOST_CRC BIT(2)
39 #define CQHCI_HOST_TIMEOUT BIT(3)
40 #define CQHCI_HOST_OTHER BIT(4)
43 static inline u8
*get_desc(struct cqhci_host
*cq_host
, u8 tag
)
45 return cq_host
->desc_base
+ (tag
* cq_host
->slot_sz
);
48 static inline u8
*get_link_desc(struct cqhci_host
*cq_host
, u8 tag
)
50 u8
*desc
= get_desc(cq_host
, tag
);
52 return desc
+ cq_host
->task_desc_len
;
55 static inline dma_addr_t
get_trans_desc_dma(struct cqhci_host
*cq_host
, u8 tag
)
57 return cq_host
->trans_desc_dma_base
+
58 (cq_host
->mmc
->max_segs
* tag
*
59 cq_host
->trans_desc_len
);
62 static inline u8
*get_trans_desc(struct cqhci_host
*cq_host
, u8 tag
)
64 return cq_host
->trans_desc_base
+
65 (cq_host
->trans_desc_len
* cq_host
->mmc
->max_segs
* tag
);
68 static void setup_trans_desc(struct cqhci_host
*cq_host
, u8 tag
)
71 dma_addr_t trans_temp
;
73 link_temp
= get_link_desc(cq_host
, tag
);
74 trans_temp
= get_trans_desc_dma(cq_host
, tag
);
76 memset(link_temp
, 0, cq_host
->link_desc_len
);
77 if (cq_host
->link_desc_len
> 8)
80 if (tag
== DCMD_SLOT
&& (cq_host
->mmc
->caps2
& MMC_CAP2_CQE_DCMD
)) {
81 *link_temp
= CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
85 *link_temp
= CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
88 __le64
*data_addr
= (__le64 __force
*)(link_temp
+ 4);
90 data_addr
[0] = cpu_to_le64(trans_temp
);
92 __le32
*data_addr
= (__le32 __force
*)(link_temp
+ 4);
94 data_addr
[0] = cpu_to_le32(trans_temp
);
98 static void cqhci_set_irqs(struct cqhci_host
*cq_host
, u32 set
)
100 cqhci_writel(cq_host
, set
, CQHCI_ISTE
);
101 cqhci_writel(cq_host
, set
, CQHCI_ISGE
);
104 #define DRV_NAME "cqhci"
106 #define CQHCI_DUMP(f, x...) \
107 pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
109 static void cqhci_dumpregs(struct cqhci_host
*cq_host
)
111 struct mmc_host
*mmc
= cq_host
->mmc
;
113 CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
115 CQHCI_DUMP("Caps: 0x%08x | Version: 0x%08x\n",
116 cqhci_readl(cq_host
, CQHCI_CAP
),
117 cqhci_readl(cq_host
, CQHCI_VER
));
118 CQHCI_DUMP("Config: 0x%08x | Control: 0x%08x\n",
119 cqhci_readl(cq_host
, CQHCI_CFG
),
120 cqhci_readl(cq_host
, CQHCI_CTL
));
121 CQHCI_DUMP("Int stat: 0x%08x | Int enab: 0x%08x\n",
122 cqhci_readl(cq_host
, CQHCI_IS
),
123 cqhci_readl(cq_host
, CQHCI_ISTE
));
124 CQHCI_DUMP("Int sig: 0x%08x | Int Coal: 0x%08x\n",
125 cqhci_readl(cq_host
, CQHCI_ISGE
),
126 cqhci_readl(cq_host
, CQHCI_IC
));
127 CQHCI_DUMP("TDL base: 0x%08x | TDL up32: 0x%08x\n",
128 cqhci_readl(cq_host
, CQHCI_TDLBA
),
129 cqhci_readl(cq_host
, CQHCI_TDLBAU
));
130 CQHCI_DUMP("Doorbell: 0x%08x | TCN: 0x%08x\n",
131 cqhci_readl(cq_host
, CQHCI_TDBR
),
132 cqhci_readl(cq_host
, CQHCI_TCN
));
133 CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
134 cqhci_readl(cq_host
, CQHCI_DQS
),
135 cqhci_readl(cq_host
, CQHCI_DPT
));
136 CQHCI_DUMP("Task clr: 0x%08x | SSC1: 0x%08x\n",
137 cqhci_readl(cq_host
, CQHCI_TCLR
),
138 cqhci_readl(cq_host
, CQHCI_SSC1
));
139 CQHCI_DUMP("SSC2: 0x%08x | DCMD rsp: 0x%08x\n",
140 cqhci_readl(cq_host
, CQHCI_SSC2
),
141 cqhci_readl(cq_host
, CQHCI_CRDCT
));
142 CQHCI_DUMP("RED mask: 0x%08x | TERRI: 0x%08x\n",
143 cqhci_readl(cq_host
, CQHCI_RMEM
),
144 cqhci_readl(cq_host
, CQHCI_TERRI
));
145 CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n",
146 cqhci_readl(cq_host
, CQHCI_CRI
),
147 cqhci_readl(cq_host
, CQHCI_CRA
));
149 if (cq_host
->ops
->dumpregs
)
150 cq_host
->ops
->dumpregs(mmc
);
152 CQHCI_DUMP(": ===========================================\n");
156 * The allocated descriptor table for task, link & transfer descritors
159 * |task desc | |->|----------|
160 * |----------| | |trans desc|
161 * |link desc-|->| |----------|
164 * no. of slots max-segs
167 * The idea here is to create the [task+trans] table and mark & point the
168 * link desc to the transfer desc table on a per slot basis.
170 static int cqhci_host_alloc_tdl(struct cqhci_host
*cq_host
)
174 /* task descriptor can be 64/128 bit irrespective of arch */
175 if (cq_host
->caps
& CQHCI_TASK_DESC_SZ_128
) {
176 cqhci_writel(cq_host
, cqhci_readl(cq_host
, CQHCI_CFG
) |
177 CQHCI_TASK_DESC_SZ
, CQHCI_CFG
);
178 cq_host
->task_desc_len
= 16;
180 cq_host
->task_desc_len
= 8;
184 * 96 bits length of transfer desc instead of 128 bits which means
185 * ADMA would expect next valid descriptor at the 96th bit
188 if (cq_host
->dma64
) {
189 if (cq_host
->quirks
& CQHCI_QUIRK_SHORT_TXFR_DESC_SZ
)
190 cq_host
->trans_desc_len
= 12;
192 cq_host
->trans_desc_len
= 16;
193 cq_host
->link_desc_len
= 16;
195 cq_host
->trans_desc_len
= 8;
196 cq_host
->link_desc_len
= 8;
199 /* total size of a slot: 1 task & 1 transfer (link) */
200 cq_host
->slot_sz
= cq_host
->task_desc_len
+ cq_host
->link_desc_len
;
202 cq_host
->desc_size
= cq_host
->slot_sz
* cq_host
->num_slots
;
204 cq_host
->data_size
= cq_host
->trans_desc_len
* cq_host
->mmc
->max_segs
*
205 cq_host
->mmc
->cqe_qdepth
;
207 pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
208 mmc_hostname(cq_host
->mmc
), cq_host
->desc_size
, cq_host
->data_size
,
212 * allocate a dma-mapped chunk of memory for the descriptors
213 * allocate a dma-mapped chunk of memory for link descriptors
214 * setup each link-desc memory offset per slot-number to
215 * the descriptor table.
217 cq_host
->desc_base
= dmam_alloc_coherent(mmc_dev(cq_host
->mmc
),
219 &cq_host
->desc_dma_base
,
221 if (!cq_host
->desc_base
)
224 cq_host
->trans_desc_base
= dmam_alloc_coherent(mmc_dev(cq_host
->mmc
),
226 &cq_host
->trans_desc_dma_base
,
228 if (!cq_host
->trans_desc_base
) {
229 dmam_free_coherent(mmc_dev(cq_host
->mmc
), cq_host
->desc_size
,
231 cq_host
->desc_dma_base
);
232 cq_host
->desc_base
= NULL
;
233 cq_host
->desc_dma_base
= 0;
237 pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
238 mmc_hostname(cq_host
->mmc
), cq_host
->desc_base
, cq_host
->trans_desc_base
,
239 (unsigned long long)cq_host
->desc_dma_base
,
240 (unsigned long long)cq_host
->trans_desc_dma_base
);
242 for (; i
< (cq_host
->num_slots
); i
++)
243 setup_trans_desc(cq_host
, i
);
248 static void __cqhci_enable(struct cqhci_host
*cq_host
)
250 struct mmc_host
*mmc
= cq_host
->mmc
;
253 cqcfg
= cqhci_readl(cq_host
, CQHCI_CFG
);
255 /* Configuration must not be changed while enabled */
256 if (cqcfg
& CQHCI_ENABLE
) {
257 cqcfg
&= ~CQHCI_ENABLE
;
258 cqhci_writel(cq_host
, cqcfg
, CQHCI_CFG
);
261 cqcfg
&= ~(CQHCI_DCMD
| CQHCI_TASK_DESC_SZ
);
263 if (mmc
->caps2
& MMC_CAP2_CQE_DCMD
)
266 if (cq_host
->caps
& CQHCI_TASK_DESC_SZ_128
)
267 cqcfg
|= CQHCI_TASK_DESC_SZ
;
269 cqhci_writel(cq_host
, cqcfg
, CQHCI_CFG
);
271 cqhci_writel(cq_host
, lower_32_bits(cq_host
->desc_dma_base
),
273 cqhci_writel(cq_host
, upper_32_bits(cq_host
->desc_dma_base
),
276 cqhci_writel(cq_host
, cq_host
->rca
, CQHCI_SSC2
);
278 cqhci_set_irqs(cq_host
, 0);
280 cqcfg
|= CQHCI_ENABLE
;
282 cqhci_writel(cq_host
, cqcfg
, CQHCI_CFG
);
286 if (cq_host
->ops
->enable
)
287 cq_host
->ops
->enable(mmc
);
289 /* Ensure all writes are done before interrupts are enabled */
292 cqhci_set_irqs(cq_host
, CQHCI_IS_MASK
);
294 cq_host
->activated
= true;
297 static void __cqhci_disable(struct cqhci_host
*cq_host
)
301 cqcfg
= cqhci_readl(cq_host
, CQHCI_CFG
);
302 cqcfg
&= ~CQHCI_ENABLE
;
303 cqhci_writel(cq_host
, cqcfg
, CQHCI_CFG
);
305 cq_host
->mmc
->cqe_on
= false;
307 cq_host
->activated
= false;
310 int cqhci_suspend(struct mmc_host
*mmc
)
312 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
314 if (cq_host
->enabled
)
315 __cqhci_disable(cq_host
);
319 EXPORT_SYMBOL(cqhci_suspend
);
321 int cqhci_resume(struct mmc_host
*mmc
)
323 /* Re-enable is done upon first request */
326 EXPORT_SYMBOL(cqhci_resume
);
328 static int cqhci_enable(struct mmc_host
*mmc
, struct mmc_card
*card
)
330 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
333 if (cq_host
->enabled
)
336 cq_host
->rca
= card
->rca
;
338 err
= cqhci_host_alloc_tdl(cq_host
);
342 __cqhci_enable(cq_host
);
344 cq_host
->enabled
= true;
347 cqhci_dumpregs(cq_host
);
352 /* CQHCI is idle and should halt immediately, so set a small timeout */
353 #define CQHCI_OFF_TIMEOUT 100
355 static u32
cqhci_read_ctl(struct cqhci_host
*cq_host
)
357 return cqhci_readl(cq_host
, CQHCI_CTL
);
360 static void cqhci_off(struct mmc_host
*mmc
)
362 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
366 if (!cq_host
->enabled
|| !mmc
->cqe_on
|| cq_host
->recovery_halt
)
369 if (cq_host
->ops
->disable
)
370 cq_host
->ops
->disable(mmc
, false);
372 cqhci_writel(cq_host
, CQHCI_HALT
, CQHCI_CTL
);
374 err
= readx_poll_timeout(cqhci_read_ctl
, cq_host
, reg
,
375 reg
& CQHCI_HALT
, 0, CQHCI_OFF_TIMEOUT
);
377 pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc
));
379 pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc
));
384 static void cqhci_disable(struct mmc_host
*mmc
)
386 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
388 if (!cq_host
->enabled
)
393 __cqhci_disable(cq_host
);
395 dmam_free_coherent(mmc_dev(mmc
), cq_host
->data_size
,
396 cq_host
->trans_desc_base
,
397 cq_host
->trans_desc_dma_base
);
399 dmam_free_coherent(mmc_dev(mmc
), cq_host
->desc_size
,
401 cq_host
->desc_dma_base
);
403 cq_host
->trans_desc_base
= NULL
;
404 cq_host
->desc_base
= NULL
;
406 cq_host
->enabled
= false;
409 static void cqhci_prep_task_desc(struct mmc_request
*mrq
,
410 u64
*data
, bool intr
)
412 u32 req_flags
= mrq
->data
->flags
;
414 *data
= CQHCI_VALID(1) |
418 CQHCI_FORCED_PROG(!!(req_flags
& MMC_DATA_FORCED_PRG
)) |
419 CQHCI_DATA_TAG(!!(req_flags
& MMC_DATA_DAT_TAG
)) |
420 CQHCI_DATA_DIR(!!(req_flags
& MMC_DATA_READ
)) |
421 CQHCI_PRIORITY(!!(req_flags
& MMC_DATA_PRIO
)) |
422 CQHCI_QBAR(!!(req_flags
& MMC_DATA_QBR
)) |
423 CQHCI_REL_WRITE(!!(req_flags
& MMC_DATA_REL_WR
)) |
424 CQHCI_BLK_COUNT(mrq
->data
->blocks
) |
425 CQHCI_BLK_ADDR((u64
)mrq
->data
->blk_addr
);
427 pr_debug("%s: cqhci: tag %d task descriptor 0x016%llx\n",
428 mmc_hostname(mrq
->host
), mrq
->tag
, (unsigned long long)*data
);
431 static int cqhci_dma_map(struct mmc_host
*host
, struct mmc_request
*mrq
)
434 struct mmc_data
*data
= mrq
->data
;
439 sg_count
= dma_map_sg(mmc_dev(host
), data
->sg
,
441 (data
->flags
& MMC_DATA_WRITE
) ?
442 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
444 pr_err("%s: sg-len: %d\n", __func__
, data
->sg_len
);
451 static void cqhci_set_tran_desc(u8
*desc
, dma_addr_t addr
, int len
, bool end
,
454 __le32
*attr
= (__le32 __force
*)desc
;
456 *attr
= (CQHCI_VALID(1) |
457 CQHCI_END(end
? 1 : 0) |
460 CQHCI_DAT_LENGTH(len
));
463 __le64
*dataddr
= (__le64 __force
*)(desc
+ 4);
465 dataddr
[0] = cpu_to_le64(addr
);
467 __le32
*dataddr
= (__le32 __force
*)(desc
+ 4);
469 dataddr
[0] = cpu_to_le32(addr
);
473 static int cqhci_prep_tran_desc(struct mmc_request
*mrq
,
474 struct cqhci_host
*cq_host
, int tag
)
476 struct mmc_data
*data
= mrq
->data
;
477 int i
, sg_count
, len
;
479 bool dma64
= cq_host
->dma64
;
482 struct scatterlist
*sg
;
484 sg_count
= cqhci_dma_map(mrq
->host
, mrq
);
486 pr_err("%s: %s: unable to map sg lists, %d\n",
487 mmc_hostname(mrq
->host
), __func__
, sg_count
);
491 desc
= get_trans_desc(cq_host
, tag
);
493 for_each_sg(data
->sg
, sg
, sg_count
, i
) {
494 addr
= sg_dma_address(sg
);
495 len
= sg_dma_len(sg
);
497 if ((i
+1) == sg_count
)
499 cqhci_set_tran_desc(desc
, addr
, len
, end
, dma64
);
500 desc
+= cq_host
->trans_desc_len
;
506 static void cqhci_prep_dcmd_desc(struct mmc_host
*mmc
,
507 struct mmc_request
*mrq
)
509 u64
*task_desc
= NULL
;
514 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
517 if (!(mrq
->cmd
->flags
& MMC_RSP_PRESENT
)) {
521 if (mrq
->cmd
->flags
& MMC_RSP_R1B
) {
530 task_desc
= (__le64 __force
*)get_desc(cq_host
, cq_host
->dcmd_slot
);
531 memset(task_desc
, 0, cq_host
->task_desc_len
);
532 data
|= (CQHCI_VALID(1) |
537 CQHCI_CMD_INDEX(mrq
->cmd
->opcode
) |
538 CQHCI_CMD_TIMING(timing
) | CQHCI_RESP_TYPE(resp_type
));
540 desc
= (u8
*)task_desc
;
541 pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
542 mmc_hostname(mmc
), mrq
->cmd
->opcode
, timing
, resp_type
);
543 dataddr
= (__le64 __force
*)(desc
+ 4);
544 dataddr
[0] = cpu_to_le64((u64
)mrq
->cmd
->arg
);
548 static void cqhci_post_req(struct mmc_host
*host
, struct mmc_request
*mrq
)
550 struct mmc_data
*data
= mrq
->data
;
553 dma_unmap_sg(mmc_dev(host
), data
->sg
, data
->sg_len
,
554 (data
->flags
& MMC_DATA_READ
) ?
555 DMA_FROM_DEVICE
: DMA_TO_DEVICE
);
559 static inline int cqhci_tag(struct mmc_request
*mrq
)
561 return mrq
->cmd
? DCMD_SLOT
: mrq
->tag
;
564 static int cqhci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
568 u64
*task_desc
= NULL
;
569 int tag
= cqhci_tag(mrq
);
570 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
573 if (!cq_host
->enabled
) {
574 pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc
));
578 /* First request after resume has to re-enable */
579 if (!cq_host
->activated
)
580 __cqhci_enable(cq_host
);
583 cqhci_writel(cq_host
, 0, CQHCI_CTL
);
585 pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc
));
586 if (cqhci_readl(cq_host
, CQHCI_CTL
) && CQHCI_HALT
) {
587 pr_err("%s: cqhci: CQE failed to exit halt state\n",
590 if (cq_host
->ops
->enable
)
591 cq_host
->ops
->enable(mmc
);
595 task_desc
= (__le64 __force
*)get_desc(cq_host
, tag
);
596 cqhci_prep_task_desc(mrq
, &data
, 1);
597 *task_desc
= cpu_to_le64(data
);
598 err
= cqhci_prep_tran_desc(mrq
, cq_host
, tag
);
600 pr_err("%s: cqhci: failed to setup tx desc: %d\n",
601 mmc_hostname(mmc
), err
);
605 cqhci_prep_dcmd_desc(mmc
, mrq
);
608 spin_lock_irqsave(&cq_host
->lock
, flags
);
610 if (cq_host
->recovery_halt
) {
615 cq_host
->slot
[tag
].mrq
= mrq
;
616 cq_host
->slot
[tag
].flags
= 0;
619 /* Make sure descriptors are ready before ringing the doorbell */
621 cqhci_writel(cq_host
, 1 << tag
, CQHCI_TDBR
);
622 if (!(cqhci_readl(cq_host
, CQHCI_TDBR
) & (1 << tag
)))
623 pr_debug("%s: cqhci: doorbell not set for tag %d\n",
624 mmc_hostname(mmc
), tag
);
626 spin_unlock_irqrestore(&cq_host
->lock
, flags
);
629 cqhci_post_req(mmc
, mrq
);
634 static void cqhci_recovery_needed(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
637 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
639 if (!cq_host
->recovery_halt
) {
640 cq_host
->recovery_halt
= true;
641 pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc
));
642 wake_up(&cq_host
->wait_queue
);
643 if (notify
&& mrq
->recovery_notifier
)
644 mrq
->recovery_notifier(mrq
);
648 static unsigned int cqhci_error_flags(int error1
, int error2
)
650 int error
= error1
? error1
: error2
;
654 return CQHCI_HOST_CRC
;
656 return CQHCI_HOST_TIMEOUT
;
658 return CQHCI_HOST_OTHER
;
662 static void cqhci_error_irq(struct mmc_host
*mmc
, u32 status
, int cmd_error
,
665 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
666 struct cqhci_slot
*slot
;
670 spin_lock(&cq_host
->lock
);
672 terri
= cqhci_readl(cq_host
, CQHCI_TERRI
);
674 pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
675 mmc_hostname(mmc
), status
, cmd_error
, data_error
, terri
);
677 /* Forget about errors when recovery has already been triggered */
678 if (cq_host
->recovery_halt
)
681 if (!cq_host
->qcnt
) {
682 WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
683 mmc_hostname(mmc
), status
, cmd_error
, data_error
,
688 if (CQHCI_TERRI_C_VALID(terri
)) {
689 tag
= CQHCI_TERRI_C_TASK(terri
);
690 slot
= &cq_host
->slot
[tag
];
692 slot
->flags
= cqhci_error_flags(cmd_error
, data_error
);
693 cqhci_recovery_needed(mmc
, slot
->mrq
, true);
697 if (CQHCI_TERRI_D_VALID(terri
)) {
698 tag
= CQHCI_TERRI_D_TASK(terri
);
699 slot
= &cq_host
->slot
[tag
];
701 slot
->flags
= cqhci_error_flags(data_error
, cmd_error
);
702 cqhci_recovery_needed(mmc
, slot
->mrq
, true);
706 if (!cq_host
->recovery_halt
) {
708 * The only way to guarantee forward progress is to mark at
709 * least one task in error, so if none is indicated, pick one.
711 for (tag
= 0; tag
< NUM_SLOTS
; tag
++) {
712 slot
= &cq_host
->slot
[tag
];
715 slot
->flags
= cqhci_error_flags(data_error
, cmd_error
);
716 cqhci_recovery_needed(mmc
, slot
->mrq
, true);
722 spin_unlock(&cq_host
->lock
);
725 static void cqhci_finish_mrq(struct mmc_host
*mmc
, unsigned int tag
)
727 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
728 struct cqhci_slot
*slot
= &cq_host
->slot
[tag
];
729 struct mmc_request
*mrq
= slot
->mrq
;
730 struct mmc_data
*data
;
733 WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
734 mmc_hostname(mmc
), tag
);
738 /* No completions allowed during recovery */
739 if (cq_host
->recovery_halt
) {
740 slot
->flags
|= CQHCI_COMPLETED
;
751 data
->bytes_xfered
= 0;
753 data
->bytes_xfered
= data
->blksz
* data
->blocks
;
756 mmc_cqe_request_done(mmc
, mrq
);
759 irqreturn_t
cqhci_irq(struct mmc_host
*mmc
, u32 intmask
, int cmd_error
,
763 unsigned long tag
= 0, comp_status
;
764 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
766 status
= cqhci_readl(cq_host
, CQHCI_IS
);
767 cqhci_writel(cq_host
, status
, CQHCI_IS
);
769 pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc
), status
);
771 if ((status
& CQHCI_IS_RED
) || cmd_error
|| data_error
)
772 cqhci_error_irq(mmc
, status
, cmd_error
, data_error
);
774 if (status
& CQHCI_IS_TCC
) {
775 /* read TCN and complete the request */
776 comp_status
= cqhci_readl(cq_host
, CQHCI_TCN
);
777 cqhci_writel(cq_host
, comp_status
, CQHCI_TCN
);
778 pr_debug("%s: cqhci: TCN: 0x%08lx\n",
779 mmc_hostname(mmc
), comp_status
);
781 spin_lock(&cq_host
->lock
);
783 for_each_set_bit(tag
, &comp_status
, cq_host
->num_slots
) {
784 /* complete the corresponding mrq */
785 pr_debug("%s: cqhci: completing tag %lu\n",
786 mmc_hostname(mmc
), tag
);
787 cqhci_finish_mrq(mmc
, tag
);
790 if (cq_host
->waiting_for_idle
&& !cq_host
->qcnt
) {
791 cq_host
->waiting_for_idle
= false;
792 wake_up(&cq_host
->wait_queue
);
795 spin_unlock(&cq_host
->lock
);
798 if (status
& CQHCI_IS_TCL
)
799 wake_up(&cq_host
->wait_queue
);
801 if (status
& CQHCI_IS_HAC
)
802 wake_up(&cq_host
->wait_queue
);
806 EXPORT_SYMBOL(cqhci_irq
);
808 static bool cqhci_is_idle(struct cqhci_host
*cq_host
, int *ret
)
813 spin_lock_irqsave(&cq_host
->lock
, flags
);
814 is_idle
= !cq_host
->qcnt
|| cq_host
->recovery_halt
;
815 *ret
= cq_host
->recovery_halt
? -EBUSY
: 0;
816 cq_host
->waiting_for_idle
= !is_idle
;
817 spin_unlock_irqrestore(&cq_host
->lock
, flags
);
822 static int cqhci_wait_for_idle(struct mmc_host
*mmc
)
824 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
827 wait_event(cq_host
->wait_queue
, cqhci_is_idle(cq_host
, &ret
));
832 static bool cqhci_timeout(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
833 bool *recovery_needed
)
835 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
836 int tag
= cqhci_tag(mrq
);
837 struct cqhci_slot
*slot
= &cq_host
->slot
[tag
];
841 spin_lock_irqsave(&cq_host
->lock
, flags
);
842 timed_out
= slot
->mrq
== mrq
;
844 slot
->flags
|= CQHCI_EXTERNAL_TIMEOUT
;
845 cqhci_recovery_needed(mmc
, mrq
, false);
846 *recovery_needed
= cq_host
->recovery_halt
;
848 spin_unlock_irqrestore(&cq_host
->lock
, flags
);
851 pr_err("%s: cqhci: timeout for tag %d\n",
852 mmc_hostname(mmc
), tag
);
853 cqhci_dumpregs(cq_host
);
859 static bool cqhci_tasks_cleared(struct cqhci_host
*cq_host
)
861 return !(cqhci_readl(cq_host
, CQHCI_CTL
) & CQHCI_CLEAR_ALL_TASKS
);
864 static bool cqhci_clear_all_tasks(struct mmc_host
*mmc
, unsigned int timeout
)
866 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
870 cqhci_set_irqs(cq_host
, CQHCI_IS_TCL
);
872 ctl
= cqhci_readl(cq_host
, CQHCI_CTL
);
873 ctl
|= CQHCI_CLEAR_ALL_TASKS
;
874 cqhci_writel(cq_host
, ctl
, CQHCI_CTL
);
876 wait_event_timeout(cq_host
->wait_queue
, cqhci_tasks_cleared(cq_host
),
877 msecs_to_jiffies(timeout
) + 1);
879 cqhci_set_irqs(cq_host
, 0);
881 ret
= cqhci_tasks_cleared(cq_host
);
884 pr_debug("%s: cqhci: Failed to clear tasks\n",
890 static bool cqhci_halted(struct cqhci_host
*cq_host
)
892 return cqhci_readl(cq_host
, CQHCI_CTL
) & CQHCI_HALT
;
895 static bool cqhci_halt(struct mmc_host
*mmc
, unsigned int timeout
)
897 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
901 if (cqhci_halted(cq_host
))
904 cqhci_set_irqs(cq_host
, CQHCI_IS_HAC
);
906 ctl
= cqhci_readl(cq_host
, CQHCI_CTL
);
908 cqhci_writel(cq_host
, ctl
, CQHCI_CTL
);
910 wait_event_timeout(cq_host
->wait_queue
, cqhci_halted(cq_host
),
911 msecs_to_jiffies(timeout
) + 1);
913 cqhci_set_irqs(cq_host
, 0);
915 ret
= cqhci_halted(cq_host
);
918 pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc
));
924 * After halting we expect to be able to use the command line. We interpret the
925 * failure to halt to mean the data lines might still be in use (and the upper
926 * layers will need to send a STOP command), so we set the timeout based on a
927 * generous command timeout.
929 #define CQHCI_START_HALT_TIMEOUT 5
931 static void cqhci_recovery_start(struct mmc_host
*mmc
)
933 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
935 pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc
), __func__
);
937 WARN_ON(!cq_host
->recovery_halt
);
939 cqhci_halt(mmc
, CQHCI_START_HALT_TIMEOUT
);
941 if (cq_host
->ops
->disable
)
942 cq_host
->ops
->disable(mmc
, true);
947 static int cqhci_error_from_flags(unsigned int flags
)
952 /* CRC errors might indicate re-tuning so prefer to report that */
953 if (flags
& CQHCI_HOST_CRC
)
956 if (flags
& (CQHCI_EXTERNAL_TIMEOUT
| CQHCI_HOST_TIMEOUT
))
962 static void cqhci_recover_mrq(struct cqhci_host
*cq_host
, unsigned int tag
)
964 struct cqhci_slot
*slot
= &cq_host
->slot
[tag
];
965 struct mmc_request
*mrq
= slot
->mrq
;
966 struct mmc_data
*data
;
977 data
->bytes_xfered
= 0;
978 data
->error
= cqhci_error_from_flags(slot
->flags
);
980 mrq
->cmd
->error
= cqhci_error_from_flags(slot
->flags
);
983 mmc_cqe_request_done(cq_host
->mmc
, mrq
);
986 static void cqhci_recover_mrqs(struct cqhci_host
*cq_host
)
990 for (i
= 0; i
< cq_host
->num_slots
; i
++)
991 cqhci_recover_mrq(cq_host
, i
);
995 * By now the command and data lines should be unused so there is no reason for
996 * CQHCI to take a long time to halt, but if it doesn't halt there could be
997 * problems clearing tasks, so be generous.
999 #define CQHCI_FINISH_HALT_TIMEOUT 20
1001 /* CQHCI could be expected to clear it's internal state pretty quickly */
1002 #define CQHCI_CLEAR_TIMEOUT 20
1004 static void cqhci_recovery_finish(struct mmc_host
*mmc
)
1006 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
1007 unsigned long flags
;
1011 pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc
), __func__
);
1013 WARN_ON(!cq_host
->recovery_halt
);
1015 ok
= cqhci_halt(mmc
, CQHCI_FINISH_HALT_TIMEOUT
);
1017 if (!cqhci_clear_all_tasks(mmc
, CQHCI_CLEAR_TIMEOUT
))
1021 * The specification contradicts itself, by saying that tasks cannot be
1022 * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
1023 * be disabled/re-enabled, but not to disable before clearing tasks.
1027 pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc
));
1028 cqcfg
= cqhci_readl(cq_host
, CQHCI_CFG
);
1029 cqcfg
&= ~CQHCI_ENABLE
;
1030 cqhci_writel(cq_host
, cqcfg
, CQHCI_CFG
);
1031 cqcfg
|= CQHCI_ENABLE
;
1032 cqhci_writel(cq_host
, cqcfg
, CQHCI_CFG
);
1033 /* Be sure that there are no tasks */
1034 ok
= cqhci_halt(mmc
, CQHCI_FINISH_HALT_TIMEOUT
);
1035 if (!cqhci_clear_all_tasks(mmc
, CQHCI_CLEAR_TIMEOUT
))
1040 cqhci_recover_mrqs(cq_host
);
1042 WARN_ON(cq_host
->qcnt
);
1044 spin_lock_irqsave(&cq_host
->lock
, flags
);
1046 cq_host
->recovery_halt
= false;
1047 mmc
->cqe_on
= false;
1048 spin_unlock_irqrestore(&cq_host
->lock
, flags
);
1050 /* Ensure all writes are done before interrupts are re-enabled */
1053 cqhci_writel(cq_host
, CQHCI_IS_HAC
| CQHCI_IS_TCL
, CQHCI_IS
);
1055 cqhci_set_irqs(cq_host
, CQHCI_IS_MASK
);
1057 pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc
));
1060 static const struct mmc_cqe_ops cqhci_cqe_ops
= {
1061 .cqe_enable
= cqhci_enable
,
1062 .cqe_disable
= cqhci_disable
,
1063 .cqe_request
= cqhci_request
,
1064 .cqe_post_req
= cqhci_post_req
,
1065 .cqe_off
= cqhci_off
,
1066 .cqe_wait_for_idle
= cqhci_wait_for_idle
,
1067 .cqe_timeout
= cqhci_timeout
,
1068 .cqe_recovery_start
= cqhci_recovery_start
,
1069 .cqe_recovery_finish
= cqhci_recovery_finish
,
1072 struct cqhci_host
*cqhci_pltfm_init(struct platform_device
*pdev
)
1074 struct cqhci_host
*cq_host
;
1075 struct resource
*cqhci_memres
= NULL
;
1077 /* check and setup CMDQ interface */
1078 cqhci_memres
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
1080 if (!cqhci_memres
) {
1081 dev_dbg(&pdev
->dev
, "CMDQ not supported\n");
1082 return ERR_PTR(-EINVAL
);
1085 cq_host
= devm_kzalloc(&pdev
->dev
, sizeof(*cq_host
), GFP_KERNEL
);
1087 return ERR_PTR(-ENOMEM
);
1088 cq_host
->mmio
= devm_ioremap(&pdev
->dev
,
1089 cqhci_memres
->start
,
1090 resource_size(cqhci_memres
));
1091 if (!cq_host
->mmio
) {
1092 dev_err(&pdev
->dev
, "failed to remap cqhci regs\n");
1093 return ERR_PTR(-EBUSY
);
1095 dev_dbg(&pdev
->dev
, "CMDQ ioremap: done\n");
1099 EXPORT_SYMBOL(cqhci_pltfm_init
);
1101 static unsigned int cqhci_ver_major(struct cqhci_host
*cq_host
)
1103 return CQHCI_VER_MAJOR(cqhci_readl(cq_host
, CQHCI_VER
));
1106 static unsigned int cqhci_ver_minor(struct cqhci_host
*cq_host
)
1108 u32 ver
= cqhci_readl(cq_host
, CQHCI_VER
);
1110 return CQHCI_VER_MINOR1(ver
) * 10 + CQHCI_VER_MINOR2(ver
);
1113 int cqhci_init(struct cqhci_host
*cq_host
, struct mmc_host
*mmc
,
1118 cq_host
->dma64
= dma64
;
1120 cq_host
->mmc
->cqe_private
= cq_host
;
1122 cq_host
->num_slots
= NUM_SLOTS
;
1123 cq_host
->dcmd_slot
= DCMD_SLOT
;
1125 mmc
->cqe_ops
= &cqhci_cqe_ops
;
1127 mmc
->cqe_qdepth
= NUM_SLOTS
;
1128 if (mmc
->caps2
& MMC_CAP2_CQE_DCMD
)
1129 mmc
->cqe_qdepth
-= 1;
1131 cq_host
->slot
= devm_kcalloc(mmc_dev(mmc
), cq_host
->num_slots
,
1132 sizeof(*cq_host
->slot
), GFP_KERNEL
);
1133 if (!cq_host
->slot
) {
1138 spin_lock_init(&cq_host
->lock
);
1140 init_completion(&cq_host
->halt_comp
);
1141 init_waitqueue_head(&cq_host
->wait_queue
);
1143 pr_info("%s: CQHCI version %u.%02u\n",
1144 mmc_hostname(mmc
), cqhci_ver_major(cq_host
),
1145 cqhci_ver_minor(cq_host
));
1150 pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
1151 mmc_hostname(mmc
), cqhci_ver_major(cq_host
),
1152 cqhci_ver_minor(cq_host
), err
);
1155 EXPORT_SYMBOL(cqhci_init
);
1157 MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
1158 MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
1159 MODULE_LICENSE("GPL v2");