1 /* Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
13 #include <linux/delay.h>
14 #include <linux/highmem.h>
16 #include <linux/module.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/slab.h>
19 #include <linux/scatterlist.h>
20 #include <linux/platform_device.h>
21 #include <linux/ktime.h>
23 #include <linux/mmc/mmc.h>
24 #include <linux/mmc/host.h>
25 #include <linux/mmc/card.h>
33 struct mmc_request
*mrq
;
35 #define CQHCI_EXTERNAL_TIMEOUT BIT(0)
36 #define CQHCI_COMPLETED BIT(1)
37 #define CQHCI_HOST_CRC BIT(2)
38 #define CQHCI_HOST_TIMEOUT BIT(3)
39 #define CQHCI_HOST_OTHER BIT(4)
42 static inline u8
*get_desc(struct cqhci_host
*cq_host
, u8 tag
)
44 return cq_host
->desc_base
+ (tag
* cq_host
->slot_sz
);
47 static inline u8
*get_link_desc(struct cqhci_host
*cq_host
, u8 tag
)
49 u8
*desc
= get_desc(cq_host
, tag
);
51 return desc
+ cq_host
->task_desc_len
;
54 static inline dma_addr_t
get_trans_desc_dma(struct cqhci_host
*cq_host
, u8 tag
)
56 return cq_host
->trans_desc_dma_base
+
57 (cq_host
->mmc
->max_segs
* tag
*
58 cq_host
->trans_desc_len
);
61 static inline u8
*get_trans_desc(struct cqhci_host
*cq_host
, u8 tag
)
63 return cq_host
->trans_desc_base
+
64 (cq_host
->trans_desc_len
* cq_host
->mmc
->max_segs
* tag
);
67 static void setup_trans_desc(struct cqhci_host
*cq_host
, u8 tag
)
70 dma_addr_t trans_temp
;
72 link_temp
= get_link_desc(cq_host
, tag
);
73 trans_temp
= get_trans_desc_dma(cq_host
, tag
);
75 memset(link_temp
, 0, cq_host
->link_desc_len
);
76 if (cq_host
->link_desc_len
> 8)
79 if (tag
== DCMD_SLOT
&& (cq_host
->mmc
->caps2
& MMC_CAP2_CQE_DCMD
)) {
80 *link_temp
= CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
84 *link_temp
= CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
87 __le64
*data_addr
= (__le64 __force
*)(link_temp
+ 4);
89 data_addr
[0] = cpu_to_le64(trans_temp
);
91 __le32
*data_addr
= (__le32 __force
*)(link_temp
+ 4);
93 data_addr
[0] = cpu_to_le32(trans_temp
);
97 static void cqhci_set_irqs(struct cqhci_host
*cq_host
, u32 set
)
99 cqhci_writel(cq_host
, set
, CQHCI_ISTE
);
100 cqhci_writel(cq_host
, set
, CQHCI_ISGE
);
103 #define DRV_NAME "cqhci"
105 #define CQHCI_DUMP(f, x...) \
106 pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
108 static void cqhci_dumpregs(struct cqhci_host
*cq_host
)
110 struct mmc_host
*mmc
= cq_host
->mmc
;
112 CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
114 CQHCI_DUMP("Caps: 0x%08x | Version: 0x%08x\n",
115 cqhci_readl(cq_host
, CQHCI_CAP
),
116 cqhci_readl(cq_host
, CQHCI_VER
));
117 CQHCI_DUMP("Config: 0x%08x | Control: 0x%08x\n",
118 cqhci_readl(cq_host
, CQHCI_CFG
),
119 cqhci_readl(cq_host
, CQHCI_CTL
));
120 CQHCI_DUMP("Int stat: 0x%08x | Int enab: 0x%08x\n",
121 cqhci_readl(cq_host
, CQHCI_IS
),
122 cqhci_readl(cq_host
, CQHCI_ISTE
));
123 CQHCI_DUMP("Int sig: 0x%08x | Int Coal: 0x%08x\n",
124 cqhci_readl(cq_host
, CQHCI_ISGE
),
125 cqhci_readl(cq_host
, CQHCI_IC
));
126 CQHCI_DUMP("TDL base: 0x%08x | TDL up32: 0x%08x\n",
127 cqhci_readl(cq_host
, CQHCI_TDLBA
),
128 cqhci_readl(cq_host
, CQHCI_TDLBAU
));
129 CQHCI_DUMP("Doorbell: 0x%08x | TCN: 0x%08x\n",
130 cqhci_readl(cq_host
, CQHCI_TDBR
),
131 cqhci_readl(cq_host
, CQHCI_TCN
));
132 CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
133 cqhci_readl(cq_host
, CQHCI_DQS
),
134 cqhci_readl(cq_host
, CQHCI_DPT
));
135 CQHCI_DUMP("Task clr: 0x%08x | SSC1: 0x%08x\n",
136 cqhci_readl(cq_host
, CQHCI_TCLR
),
137 cqhci_readl(cq_host
, CQHCI_SSC1
));
138 CQHCI_DUMP("SSC2: 0x%08x | DCMD rsp: 0x%08x\n",
139 cqhci_readl(cq_host
, CQHCI_SSC2
),
140 cqhci_readl(cq_host
, CQHCI_CRDCT
));
141 CQHCI_DUMP("RED mask: 0x%08x | TERRI: 0x%08x\n",
142 cqhci_readl(cq_host
, CQHCI_RMEM
),
143 cqhci_readl(cq_host
, CQHCI_TERRI
));
144 CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n",
145 cqhci_readl(cq_host
, CQHCI_CRI
),
146 cqhci_readl(cq_host
, CQHCI_CRA
));
148 if (cq_host
->ops
->dumpregs
)
149 cq_host
->ops
->dumpregs(mmc
);
151 CQHCI_DUMP(": ===========================================\n");
155 * The allocated descriptor table for task, link & transfer descritors
158 * |task desc | |->|----------|
159 * |----------| | |trans desc|
160 * |link desc-|->| |----------|
163 * no. of slots max-segs
166 * The idea here is to create the [task+trans] table and mark & point the
167 * link desc to the transfer desc table on a per slot basis.
169 static int cqhci_host_alloc_tdl(struct cqhci_host
*cq_host
)
173 /* task descriptor can be 64/128 bit irrespective of arch */
174 if (cq_host
->caps
& CQHCI_TASK_DESC_SZ_128
) {
175 cqhci_writel(cq_host
, cqhci_readl(cq_host
, CQHCI_CFG
) |
176 CQHCI_TASK_DESC_SZ
, CQHCI_CFG
);
177 cq_host
->task_desc_len
= 16;
179 cq_host
->task_desc_len
= 8;
183 * 96 bits length of transfer desc instead of 128 bits which means
184 * ADMA would expect next valid descriptor at the 96th bit
187 if (cq_host
->dma64
) {
188 if (cq_host
->quirks
& CQHCI_QUIRK_SHORT_TXFR_DESC_SZ
)
189 cq_host
->trans_desc_len
= 12;
191 cq_host
->trans_desc_len
= 16;
192 cq_host
->link_desc_len
= 16;
194 cq_host
->trans_desc_len
= 8;
195 cq_host
->link_desc_len
= 8;
198 /* total size of a slot: 1 task & 1 transfer (link) */
199 cq_host
->slot_sz
= cq_host
->task_desc_len
+ cq_host
->link_desc_len
;
201 cq_host
->desc_size
= cq_host
->slot_sz
* cq_host
->num_slots
;
203 cq_host
->data_size
= cq_host
->trans_desc_len
* cq_host
->mmc
->max_segs
*
204 (cq_host
->num_slots
- 1);
206 pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
207 mmc_hostname(cq_host
->mmc
), cq_host
->desc_size
, cq_host
->data_size
,
211 * allocate a dma-mapped chunk of memory for the descriptors
212 * allocate a dma-mapped chunk of memory for link descriptors
213 * setup each link-desc memory offset per slot-number to
214 * the descriptor table.
216 cq_host
->desc_base
= dmam_alloc_coherent(mmc_dev(cq_host
->mmc
),
218 &cq_host
->desc_dma_base
,
220 cq_host
->trans_desc_base
= dmam_alloc_coherent(mmc_dev(cq_host
->mmc
),
222 &cq_host
->trans_desc_dma_base
,
224 if (!cq_host
->desc_base
|| !cq_host
->trans_desc_base
)
227 pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
228 mmc_hostname(cq_host
->mmc
), cq_host
->desc_base
, cq_host
->trans_desc_base
,
229 (unsigned long long)cq_host
->desc_dma_base
,
230 (unsigned long long)cq_host
->trans_desc_dma_base
);
232 for (; i
< (cq_host
->num_slots
); i
++)
233 setup_trans_desc(cq_host
, i
);
238 static void __cqhci_enable(struct cqhci_host
*cq_host
)
240 struct mmc_host
*mmc
= cq_host
->mmc
;
243 cqcfg
= cqhci_readl(cq_host
, CQHCI_CFG
);
245 /* Configuration must not be changed while enabled */
246 if (cqcfg
& CQHCI_ENABLE
) {
247 cqcfg
&= ~CQHCI_ENABLE
;
248 cqhci_writel(cq_host
, cqcfg
, CQHCI_CFG
);
251 cqcfg
&= ~(CQHCI_DCMD
| CQHCI_TASK_DESC_SZ
);
253 if (mmc
->caps2
& MMC_CAP2_CQE_DCMD
)
256 if (cq_host
->caps
& CQHCI_TASK_DESC_SZ_128
)
257 cqcfg
|= CQHCI_TASK_DESC_SZ
;
259 cqhci_writel(cq_host
, cqcfg
, CQHCI_CFG
);
261 cqhci_writel(cq_host
, lower_32_bits(cq_host
->desc_dma_base
),
263 cqhci_writel(cq_host
, upper_32_bits(cq_host
->desc_dma_base
),
266 cqhci_writel(cq_host
, cq_host
->rca
, CQHCI_SSC2
);
268 cqhci_set_irqs(cq_host
, 0);
270 cqcfg
|= CQHCI_ENABLE
;
272 cqhci_writel(cq_host
, cqcfg
, CQHCI_CFG
);
276 if (cq_host
->ops
->enable
)
277 cq_host
->ops
->enable(mmc
);
279 /* Ensure all writes are done before interrupts are enabled */
282 cqhci_set_irqs(cq_host
, CQHCI_IS_MASK
);
284 cq_host
->activated
= true;
287 static void __cqhci_disable(struct cqhci_host
*cq_host
)
291 cqcfg
= cqhci_readl(cq_host
, CQHCI_CFG
);
292 cqcfg
&= ~CQHCI_ENABLE
;
293 cqhci_writel(cq_host
, cqcfg
, CQHCI_CFG
);
295 cq_host
->mmc
->cqe_on
= false;
297 cq_host
->activated
= false;
300 int cqhci_suspend(struct mmc_host
*mmc
)
302 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
304 if (cq_host
->enabled
)
305 __cqhci_disable(cq_host
);
309 EXPORT_SYMBOL(cqhci_suspend
);
311 int cqhci_resume(struct mmc_host
*mmc
)
313 /* Re-enable is done upon first request */
316 EXPORT_SYMBOL(cqhci_resume
);
318 static int cqhci_enable(struct mmc_host
*mmc
, struct mmc_card
*card
)
320 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
323 if (cq_host
->enabled
)
326 cq_host
->rca
= card
->rca
;
328 err
= cqhci_host_alloc_tdl(cq_host
);
332 __cqhci_enable(cq_host
);
334 cq_host
->enabled
= true;
337 cqhci_dumpregs(cq_host
);
342 /* CQHCI is idle and should halt immediately, so set a small timeout */
343 #define CQHCI_OFF_TIMEOUT 100
345 static void cqhci_off(struct mmc_host
*mmc
)
347 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
352 if (!cq_host
->enabled
|| !mmc
->cqe_on
|| cq_host
->recovery_halt
)
355 if (cq_host
->ops
->disable
)
356 cq_host
->ops
->disable(mmc
, false);
358 cqhci_writel(cq_host
, CQHCI_HALT
, CQHCI_CTL
);
360 timeout
= ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT
);
362 timed_out
= ktime_compare(ktime_get(), timeout
) > 0;
363 reg
= cqhci_readl(cq_host
, CQHCI_CTL
);
364 if ((reg
& CQHCI_HALT
) || timed_out
)
369 pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc
));
371 pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc
));
376 static void cqhci_disable(struct mmc_host
*mmc
)
378 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
380 if (!cq_host
->enabled
)
385 __cqhci_disable(cq_host
);
387 dmam_free_coherent(mmc_dev(mmc
), cq_host
->data_size
,
388 cq_host
->trans_desc_base
,
389 cq_host
->trans_desc_dma_base
);
391 dmam_free_coherent(mmc_dev(mmc
), cq_host
->desc_size
,
393 cq_host
->desc_dma_base
);
395 cq_host
->trans_desc_base
= NULL
;
396 cq_host
->desc_base
= NULL
;
398 cq_host
->enabled
= false;
401 static void cqhci_prep_task_desc(struct mmc_request
*mrq
,
402 u64
*data
, bool intr
)
404 u32 req_flags
= mrq
->data
->flags
;
406 *data
= CQHCI_VALID(1) |
410 CQHCI_FORCED_PROG(!!(req_flags
& MMC_DATA_FORCED_PRG
)) |
411 CQHCI_DATA_TAG(!!(req_flags
& MMC_DATA_DAT_TAG
)) |
412 CQHCI_DATA_DIR(!!(req_flags
& MMC_DATA_READ
)) |
413 CQHCI_PRIORITY(!!(req_flags
& MMC_DATA_PRIO
)) |
414 CQHCI_QBAR(!!(req_flags
& MMC_DATA_QBR
)) |
415 CQHCI_REL_WRITE(!!(req_flags
& MMC_DATA_REL_WR
)) |
416 CQHCI_BLK_COUNT(mrq
->data
->blocks
) |
417 CQHCI_BLK_ADDR((u64
)mrq
->data
->blk_addr
);
419 pr_debug("%s: cqhci: tag %d task descriptor 0x016%llx\n",
420 mmc_hostname(mrq
->host
), mrq
->tag
, (unsigned long long)*data
);
423 static int cqhci_dma_map(struct mmc_host
*host
, struct mmc_request
*mrq
)
426 struct mmc_data
*data
= mrq
->data
;
431 sg_count
= dma_map_sg(mmc_dev(host
), data
->sg
,
433 (data
->flags
& MMC_DATA_WRITE
) ?
434 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
436 pr_err("%s: sg-len: %d\n", __func__
, data
->sg_len
);
443 static void cqhci_set_tran_desc(u8
*desc
, dma_addr_t addr
, int len
, bool end
,
446 __le32
*attr
= (__le32 __force
*)desc
;
448 *attr
= (CQHCI_VALID(1) |
449 CQHCI_END(end
? 1 : 0) |
452 CQHCI_DAT_LENGTH(len
));
455 __le64
*dataddr
= (__le64 __force
*)(desc
+ 4);
457 dataddr
[0] = cpu_to_le64(addr
);
459 __le32
*dataddr
= (__le32 __force
*)(desc
+ 4);
461 dataddr
[0] = cpu_to_le32(addr
);
465 static int cqhci_prep_tran_desc(struct mmc_request
*mrq
,
466 struct cqhci_host
*cq_host
, int tag
)
468 struct mmc_data
*data
= mrq
->data
;
469 int i
, sg_count
, len
;
471 bool dma64
= cq_host
->dma64
;
474 struct scatterlist
*sg
;
476 sg_count
= cqhci_dma_map(mrq
->host
, mrq
);
478 pr_err("%s: %s: unable to map sg lists, %d\n",
479 mmc_hostname(mrq
->host
), __func__
, sg_count
);
483 desc
= get_trans_desc(cq_host
, tag
);
485 for_each_sg(data
->sg
, sg
, sg_count
, i
) {
486 addr
= sg_dma_address(sg
);
487 len
= sg_dma_len(sg
);
489 if ((i
+1) == sg_count
)
491 cqhci_set_tran_desc(desc
, addr
, len
, end
, dma64
);
492 desc
+= cq_host
->trans_desc_len
;
498 static void cqhci_prep_dcmd_desc(struct mmc_host
*mmc
,
499 struct mmc_request
*mrq
)
501 u64
*task_desc
= NULL
;
506 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
509 if (!(mrq
->cmd
->flags
& MMC_RSP_PRESENT
)) {
513 if (mrq
->cmd
->flags
& MMC_RSP_R1B
) {
522 task_desc
= (__le64 __force
*)get_desc(cq_host
, cq_host
->dcmd_slot
);
523 memset(task_desc
, 0, cq_host
->task_desc_len
);
524 data
|= (CQHCI_VALID(1) |
529 CQHCI_CMD_INDEX(mrq
->cmd
->opcode
) |
530 CQHCI_CMD_TIMING(timing
) | CQHCI_RESP_TYPE(resp_type
));
532 desc
= (u8
*)task_desc
;
533 pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
534 mmc_hostname(mmc
), mrq
->cmd
->opcode
, timing
, resp_type
);
535 dataddr
= (__le64 __force
*)(desc
+ 4);
536 dataddr
[0] = cpu_to_le64((u64
)mrq
->cmd
->arg
);
540 static void cqhci_post_req(struct mmc_host
*host
, struct mmc_request
*mrq
)
542 struct mmc_data
*data
= mrq
->data
;
545 dma_unmap_sg(mmc_dev(host
), data
->sg
, data
->sg_len
,
546 (data
->flags
& MMC_DATA_READ
) ?
547 DMA_FROM_DEVICE
: DMA_TO_DEVICE
);
551 static inline int cqhci_tag(struct mmc_request
*mrq
)
553 return mrq
->cmd
? DCMD_SLOT
: mrq
->tag
;
556 static int cqhci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
560 u64
*task_desc
= NULL
;
561 int tag
= cqhci_tag(mrq
);
562 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
565 if (!cq_host
->enabled
) {
566 pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc
));
570 /* First request after resume has to re-enable */
571 if (!cq_host
->activated
)
572 __cqhci_enable(cq_host
);
575 cqhci_writel(cq_host
, 0, CQHCI_CTL
);
577 pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc
));
578 if (cqhci_readl(cq_host
, CQHCI_CTL
) && CQHCI_HALT
) {
579 pr_err("%s: cqhci: CQE failed to exit halt state\n",
582 if (cq_host
->ops
->enable
)
583 cq_host
->ops
->enable(mmc
);
587 task_desc
= (__le64 __force
*)get_desc(cq_host
, tag
);
588 cqhci_prep_task_desc(mrq
, &data
, 1);
589 *task_desc
= cpu_to_le64(data
);
590 err
= cqhci_prep_tran_desc(mrq
, cq_host
, tag
);
592 pr_err("%s: cqhci: failed to setup tx desc: %d\n",
593 mmc_hostname(mmc
), err
);
597 cqhci_prep_dcmd_desc(mmc
, mrq
);
600 spin_lock_irqsave(&cq_host
->lock
, flags
);
602 if (cq_host
->recovery_halt
) {
607 cq_host
->slot
[tag
].mrq
= mrq
;
608 cq_host
->slot
[tag
].flags
= 0;
612 cqhci_writel(cq_host
, 1 << tag
, CQHCI_TDBR
);
613 if (!(cqhci_readl(cq_host
, CQHCI_TDBR
) & (1 << tag
)))
614 pr_debug("%s: cqhci: doorbell not set for tag %d\n",
615 mmc_hostname(mmc
), tag
);
617 spin_unlock_irqrestore(&cq_host
->lock
, flags
);
620 cqhci_post_req(mmc
, mrq
);
625 static void cqhci_recovery_needed(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
628 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
630 if (!cq_host
->recovery_halt
) {
631 cq_host
->recovery_halt
= true;
632 pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc
));
633 wake_up(&cq_host
->wait_queue
);
634 if (notify
&& mrq
->recovery_notifier
)
635 mrq
->recovery_notifier(mrq
);
639 static unsigned int cqhci_error_flags(int error1
, int error2
)
641 int error
= error1
? error1
: error2
;
645 return CQHCI_HOST_CRC
;
647 return CQHCI_HOST_TIMEOUT
;
649 return CQHCI_HOST_OTHER
;
653 static void cqhci_error_irq(struct mmc_host
*mmc
, u32 status
, int cmd_error
,
656 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
657 struct cqhci_slot
*slot
;
661 spin_lock(&cq_host
->lock
);
663 terri
= cqhci_readl(cq_host
, CQHCI_TERRI
);
665 pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
666 mmc_hostname(mmc
), status
, cmd_error
, data_error
, terri
);
668 /* Forget about errors when recovery has already been triggered */
669 if (cq_host
->recovery_halt
)
672 if (!cq_host
->qcnt
) {
673 WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
674 mmc_hostname(mmc
), status
, cmd_error
, data_error
,
679 if (CQHCI_TERRI_C_VALID(terri
)) {
680 tag
= CQHCI_TERRI_C_TASK(terri
);
681 slot
= &cq_host
->slot
[tag
];
683 slot
->flags
= cqhci_error_flags(cmd_error
, data_error
);
684 cqhci_recovery_needed(mmc
, slot
->mrq
, true);
688 if (CQHCI_TERRI_D_VALID(terri
)) {
689 tag
= CQHCI_TERRI_D_TASK(terri
);
690 slot
= &cq_host
->slot
[tag
];
692 slot
->flags
= cqhci_error_flags(data_error
, cmd_error
);
693 cqhci_recovery_needed(mmc
, slot
->mrq
, true);
697 if (!cq_host
->recovery_halt
) {
699 * The only way to guarantee forward progress is to mark at
700 * least one task in error, so if none is indicated, pick one.
702 for (tag
= 0; tag
< NUM_SLOTS
; tag
++) {
703 slot
= &cq_host
->slot
[tag
];
706 slot
->flags
= cqhci_error_flags(data_error
, cmd_error
);
707 cqhci_recovery_needed(mmc
, slot
->mrq
, true);
713 spin_unlock(&cq_host
->lock
);
716 static void cqhci_finish_mrq(struct mmc_host
*mmc
, unsigned int tag
)
718 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
719 struct cqhci_slot
*slot
= &cq_host
->slot
[tag
];
720 struct mmc_request
*mrq
= slot
->mrq
;
721 struct mmc_data
*data
;
724 WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
725 mmc_hostname(mmc
), tag
);
729 /* No completions allowed during recovery */
730 if (cq_host
->recovery_halt
) {
731 slot
->flags
|= CQHCI_COMPLETED
;
742 data
->bytes_xfered
= 0;
744 data
->bytes_xfered
= data
->blksz
* data
->blocks
;
747 mmc_cqe_request_done(mmc
, mrq
);
750 irqreturn_t
cqhci_irq(struct mmc_host
*mmc
, u32 intmask
, int cmd_error
,
754 unsigned long tag
= 0, comp_status
;
755 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
757 status
= cqhci_readl(cq_host
, CQHCI_IS
);
758 cqhci_writel(cq_host
, status
, CQHCI_IS
);
760 pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc
), status
);
762 if ((status
& CQHCI_IS_RED
) || cmd_error
|| data_error
)
763 cqhci_error_irq(mmc
, status
, cmd_error
, data_error
);
765 if (status
& CQHCI_IS_TCC
) {
766 /* read TCN and complete the request */
767 comp_status
= cqhci_readl(cq_host
, CQHCI_TCN
);
768 cqhci_writel(cq_host
, comp_status
, CQHCI_TCN
);
769 pr_debug("%s: cqhci: TCN: 0x%08lx\n",
770 mmc_hostname(mmc
), comp_status
);
772 spin_lock(&cq_host
->lock
);
774 for_each_set_bit(tag
, &comp_status
, cq_host
->num_slots
) {
775 /* complete the corresponding mrq */
776 pr_debug("%s: cqhci: completing tag %lu\n",
777 mmc_hostname(mmc
), tag
);
778 cqhci_finish_mrq(mmc
, tag
);
781 if (cq_host
->waiting_for_idle
&& !cq_host
->qcnt
) {
782 cq_host
->waiting_for_idle
= false;
783 wake_up(&cq_host
->wait_queue
);
786 spin_unlock(&cq_host
->lock
);
789 if (status
& CQHCI_IS_TCL
)
790 wake_up(&cq_host
->wait_queue
);
792 if (status
& CQHCI_IS_HAC
)
793 wake_up(&cq_host
->wait_queue
);
797 EXPORT_SYMBOL(cqhci_irq
);
799 static bool cqhci_is_idle(struct cqhci_host
*cq_host
, int *ret
)
804 spin_lock_irqsave(&cq_host
->lock
, flags
);
805 is_idle
= !cq_host
->qcnt
|| cq_host
->recovery_halt
;
806 *ret
= cq_host
->recovery_halt
? -EBUSY
: 0;
807 cq_host
->waiting_for_idle
= !is_idle
;
808 spin_unlock_irqrestore(&cq_host
->lock
, flags
);
813 static int cqhci_wait_for_idle(struct mmc_host
*mmc
)
815 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
818 wait_event(cq_host
->wait_queue
, cqhci_is_idle(cq_host
, &ret
));
823 static bool cqhci_timeout(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
824 bool *recovery_needed
)
826 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
827 int tag
= cqhci_tag(mrq
);
828 struct cqhci_slot
*slot
= &cq_host
->slot
[tag
];
832 spin_lock_irqsave(&cq_host
->lock
, flags
);
833 timed_out
= slot
->mrq
== mrq
;
835 slot
->flags
|= CQHCI_EXTERNAL_TIMEOUT
;
836 cqhci_recovery_needed(mmc
, mrq
, false);
837 *recovery_needed
= cq_host
->recovery_halt
;
839 spin_unlock_irqrestore(&cq_host
->lock
, flags
);
842 pr_err("%s: cqhci: timeout for tag %d\n",
843 mmc_hostname(mmc
), tag
);
844 cqhci_dumpregs(cq_host
);
850 static bool cqhci_tasks_cleared(struct cqhci_host
*cq_host
)
852 return !(cqhci_readl(cq_host
, CQHCI_CTL
) & CQHCI_CLEAR_ALL_TASKS
);
855 static bool cqhci_clear_all_tasks(struct mmc_host
*mmc
, unsigned int timeout
)
857 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
861 cqhci_set_irqs(cq_host
, CQHCI_IS_TCL
);
863 ctl
= cqhci_readl(cq_host
, CQHCI_CTL
);
864 ctl
|= CQHCI_CLEAR_ALL_TASKS
;
865 cqhci_writel(cq_host
, ctl
, CQHCI_CTL
);
867 wait_event_timeout(cq_host
->wait_queue
, cqhci_tasks_cleared(cq_host
),
868 msecs_to_jiffies(timeout
) + 1);
870 cqhci_set_irqs(cq_host
, 0);
872 ret
= cqhci_tasks_cleared(cq_host
);
875 pr_debug("%s: cqhci: Failed to clear tasks\n",
881 static bool cqhci_halted(struct cqhci_host
*cq_host
)
883 return cqhci_readl(cq_host
, CQHCI_CTL
) & CQHCI_HALT
;
886 static bool cqhci_halt(struct mmc_host
*mmc
, unsigned int timeout
)
888 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
892 if (cqhci_halted(cq_host
))
895 cqhci_set_irqs(cq_host
, CQHCI_IS_HAC
);
897 ctl
= cqhci_readl(cq_host
, CQHCI_CTL
);
899 cqhci_writel(cq_host
, ctl
, CQHCI_CTL
);
901 wait_event_timeout(cq_host
->wait_queue
, cqhci_halted(cq_host
),
902 msecs_to_jiffies(timeout
) + 1);
904 cqhci_set_irqs(cq_host
, 0);
906 ret
= cqhci_halted(cq_host
);
909 pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc
));
915 * After halting we expect to be able to use the command line. We interpret the
916 * failure to halt to mean the data lines might still be in use (and the upper
917 * layers will need to send a STOP command), so we set the timeout based on a
918 * generous command timeout.
920 #define CQHCI_START_HALT_TIMEOUT 5
922 static void cqhci_recovery_start(struct mmc_host
*mmc
)
924 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
926 pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc
), __func__
);
928 WARN_ON(!cq_host
->recovery_halt
);
930 cqhci_halt(mmc
, CQHCI_START_HALT_TIMEOUT
);
932 if (cq_host
->ops
->disable
)
933 cq_host
->ops
->disable(mmc
, true);
938 static int cqhci_error_from_flags(unsigned int flags
)
943 /* CRC errors might indicate re-tuning so prefer to report that */
944 if (flags
& CQHCI_HOST_CRC
)
947 if (flags
& (CQHCI_EXTERNAL_TIMEOUT
| CQHCI_HOST_TIMEOUT
))
953 static void cqhci_recover_mrq(struct cqhci_host
*cq_host
, unsigned int tag
)
955 struct cqhci_slot
*slot
= &cq_host
->slot
[tag
];
956 struct mmc_request
*mrq
= slot
->mrq
;
957 struct mmc_data
*data
;
968 data
->bytes_xfered
= 0;
969 data
->error
= cqhci_error_from_flags(slot
->flags
);
971 mrq
->cmd
->error
= cqhci_error_from_flags(slot
->flags
);
974 mmc_cqe_request_done(cq_host
->mmc
, mrq
);
977 static void cqhci_recover_mrqs(struct cqhci_host
*cq_host
)
981 for (i
= 0; i
< cq_host
->num_slots
; i
++)
982 cqhci_recover_mrq(cq_host
, i
);
986 * By now the command and data lines should be unused so there is no reason for
987 * CQHCI to take a long time to halt, but if it doesn't halt there could be
988 * problems clearing tasks, so be generous.
990 #define CQHCI_FINISH_HALT_TIMEOUT 20
992 /* CQHCI could be expected to clear it's internal state pretty quickly */
993 #define CQHCI_CLEAR_TIMEOUT 20
995 static void cqhci_recovery_finish(struct mmc_host
*mmc
)
997 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
1002 pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc
), __func__
);
1004 WARN_ON(!cq_host
->recovery_halt
);
1006 ok
= cqhci_halt(mmc
, CQHCI_FINISH_HALT_TIMEOUT
);
1008 if (!cqhci_clear_all_tasks(mmc
, CQHCI_CLEAR_TIMEOUT
))
1012 * The specification contradicts itself, by saying that tasks cannot be
1013 * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
1014 * be disabled/re-enabled, but not to disable before clearing tasks.
1018 pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc
));
1019 cqcfg
= cqhci_readl(cq_host
, CQHCI_CFG
);
1020 cqcfg
&= ~CQHCI_ENABLE
;
1021 cqhci_writel(cq_host
, cqcfg
, CQHCI_CFG
);
1022 cqcfg
|= CQHCI_ENABLE
;
1023 cqhci_writel(cq_host
, cqcfg
, CQHCI_CFG
);
1024 /* Be sure that there are no tasks */
1025 ok
= cqhci_halt(mmc
, CQHCI_FINISH_HALT_TIMEOUT
);
1026 if (!cqhci_clear_all_tasks(mmc
, CQHCI_CLEAR_TIMEOUT
))
1031 cqhci_recover_mrqs(cq_host
);
1033 WARN_ON(cq_host
->qcnt
);
1035 spin_lock_irqsave(&cq_host
->lock
, flags
);
1037 cq_host
->recovery_halt
= false;
1038 mmc
->cqe_on
= false;
1039 spin_unlock_irqrestore(&cq_host
->lock
, flags
);
1041 /* Ensure all writes are done before interrupts are re-enabled */
1044 cqhci_writel(cq_host
, CQHCI_IS_HAC
| CQHCI_IS_TCL
, CQHCI_IS
);
1046 cqhci_set_irqs(cq_host
, CQHCI_IS_MASK
);
1048 pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc
));
1051 static const struct mmc_cqe_ops cqhci_cqe_ops
= {
1052 .cqe_enable
= cqhci_enable
,
1053 .cqe_disable
= cqhci_disable
,
1054 .cqe_request
= cqhci_request
,
1055 .cqe_post_req
= cqhci_post_req
,
1056 .cqe_off
= cqhci_off
,
1057 .cqe_wait_for_idle
= cqhci_wait_for_idle
,
1058 .cqe_timeout
= cqhci_timeout
,
1059 .cqe_recovery_start
= cqhci_recovery_start
,
1060 .cqe_recovery_finish
= cqhci_recovery_finish
,
1063 struct cqhci_host
*cqhci_pltfm_init(struct platform_device
*pdev
)
1065 struct cqhci_host
*cq_host
;
1066 struct resource
*cqhci_memres
= NULL
;
1068 /* check and setup CMDQ interface */
1069 cqhci_memres
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
1071 if (!cqhci_memres
) {
1072 dev_dbg(&pdev
->dev
, "CMDQ not supported\n");
1073 return ERR_PTR(-EINVAL
);
1076 cq_host
= devm_kzalloc(&pdev
->dev
, sizeof(*cq_host
), GFP_KERNEL
);
1078 return ERR_PTR(-ENOMEM
);
1079 cq_host
->mmio
= devm_ioremap(&pdev
->dev
,
1080 cqhci_memres
->start
,
1081 resource_size(cqhci_memres
));
1082 if (!cq_host
->mmio
) {
1083 dev_err(&pdev
->dev
, "failed to remap cqhci regs\n");
1084 return ERR_PTR(-EBUSY
);
1086 dev_dbg(&pdev
->dev
, "CMDQ ioremap: done\n");
1090 EXPORT_SYMBOL(cqhci_pltfm_init
);
1092 static unsigned int cqhci_ver_major(struct cqhci_host
*cq_host
)
1094 return CQHCI_VER_MAJOR(cqhci_readl(cq_host
, CQHCI_VER
));
1097 static unsigned int cqhci_ver_minor(struct cqhci_host
*cq_host
)
1099 u32 ver
= cqhci_readl(cq_host
, CQHCI_VER
);
1101 return CQHCI_VER_MINOR1(ver
) * 10 + CQHCI_VER_MINOR2(ver
);
1104 int cqhci_init(struct cqhci_host
*cq_host
, struct mmc_host
*mmc
,
1109 cq_host
->dma64
= dma64
;
1111 cq_host
->mmc
->cqe_private
= cq_host
;
1113 cq_host
->num_slots
= NUM_SLOTS
;
1114 cq_host
->dcmd_slot
= DCMD_SLOT
;
1116 mmc
->cqe_ops
= &cqhci_cqe_ops
;
1118 mmc
->cqe_qdepth
= NUM_SLOTS
;
1119 if (mmc
->caps2
& MMC_CAP2_CQE_DCMD
)
1120 mmc
->cqe_qdepth
-= 1;
1122 cq_host
->slot
= devm_kcalloc(mmc_dev(mmc
), cq_host
->num_slots
,
1123 sizeof(*cq_host
->slot
), GFP_KERNEL
);
1124 if (!cq_host
->slot
) {
1129 spin_lock_init(&cq_host
->lock
);
1131 init_completion(&cq_host
->halt_comp
);
1132 init_waitqueue_head(&cq_host
->wait_queue
);
1134 pr_info("%s: CQHCI version %u.%02u\n",
1135 mmc_hostname(mmc
), cqhci_ver_major(cq_host
),
1136 cqhci_ver_minor(cq_host
));
1141 pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
1142 mmc_hostname(mmc
), cqhci_ver_major(cq_host
),
1143 cqhci_ver_minor(cq_host
), err
);
1146 EXPORT_SYMBOL(cqhci_init
);
1148 MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
1149 MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
1150 MODULE_LICENSE("GPL v2");