1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
7 #include <linux/device.h>
8 #include <linux/dma-direction.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/list.h>
12 #include <linux/mhi.h>
13 #include <linux/module.h>
14 #include <linux/skbuff.h>
15 #include <linux/slab.h>
18 int __must_check
mhi_read_reg(struct mhi_controller
*mhi_cntrl
,
19 void __iomem
*base
, u32 offset
, u32
*out
)
21 return mhi_cntrl
->read_reg(mhi_cntrl
, base
+ offset
, out
);
24 int __must_check
mhi_read_reg_field(struct mhi_controller
*mhi_cntrl
,
25 void __iomem
*base
, u32 offset
,
26 u32 mask
, u32 shift
, u32
*out
)
31 ret
= mhi_read_reg(mhi_cntrl
, base
, offset
, &tmp
);
35 *out
= (tmp
& mask
) >> shift
;
40 void mhi_write_reg(struct mhi_controller
*mhi_cntrl
, void __iomem
*base
,
43 mhi_cntrl
->write_reg(mhi_cntrl
, base
+ offset
, val
);
46 void mhi_write_reg_field(struct mhi_controller
*mhi_cntrl
, void __iomem
*base
,
47 u32 offset
, u32 mask
, u32 shift
, u32 val
)
52 ret
= mhi_read_reg(mhi_cntrl
, base
, offset
, &tmp
);
57 tmp
|= (val
<< shift
);
58 mhi_write_reg(mhi_cntrl
, base
, offset
, tmp
);
61 void mhi_write_db(struct mhi_controller
*mhi_cntrl
, void __iomem
*db_addr
,
64 mhi_write_reg(mhi_cntrl
, db_addr
, 4, upper_32_bits(db_val
));
65 mhi_write_reg(mhi_cntrl
, db_addr
, 0, lower_32_bits(db_val
));
68 void mhi_db_brstmode(struct mhi_controller
*mhi_cntrl
,
69 struct db_cfg
*db_cfg
,
70 void __iomem
*db_addr
,
73 if (db_cfg
->db_mode
) {
74 db_cfg
->db_val
= db_val
;
75 mhi_write_db(mhi_cntrl
, db_addr
, db_val
);
80 void mhi_db_brstmode_disable(struct mhi_controller
*mhi_cntrl
,
81 struct db_cfg
*db_cfg
,
82 void __iomem
*db_addr
,
85 db_cfg
->db_val
= db_val
;
86 mhi_write_db(mhi_cntrl
, db_addr
, db_val
);
89 void mhi_ring_er_db(struct mhi_event
*mhi_event
)
91 struct mhi_ring
*ring
= &mhi_event
->ring
;
93 mhi_event
->db_cfg
.process_db(mhi_event
->mhi_cntrl
, &mhi_event
->db_cfg
,
94 ring
->db_addr
, *ring
->ctxt_wp
);
97 void mhi_ring_cmd_db(struct mhi_controller
*mhi_cntrl
, struct mhi_cmd
*mhi_cmd
)
100 struct mhi_ring
*ring
= &mhi_cmd
->ring
;
102 db
= ring
->iommu_base
+ (ring
->wp
- ring
->base
);
104 mhi_write_db(mhi_cntrl
, ring
->db_addr
, db
);
107 void mhi_ring_chan_db(struct mhi_controller
*mhi_cntrl
,
108 struct mhi_chan
*mhi_chan
)
110 struct mhi_ring
*ring
= &mhi_chan
->tre_ring
;
113 db
= ring
->iommu_base
+ (ring
->wp
- ring
->base
);
115 mhi_chan
->db_cfg
.process_db(mhi_cntrl
, &mhi_chan
->db_cfg
,
119 enum mhi_ee_type
mhi_get_exec_env(struct mhi_controller
*mhi_cntrl
)
122 int ret
= mhi_read_reg(mhi_cntrl
, mhi_cntrl
->bhi
, BHI_EXECENV
, &exec
);
124 return (ret
) ? MHI_EE_MAX
: exec
;
126 EXPORT_SYMBOL_GPL(mhi_get_exec_env
);
128 enum mhi_state
mhi_get_mhi_state(struct mhi_controller
*mhi_cntrl
)
131 int ret
= mhi_read_reg_field(mhi_cntrl
, mhi_cntrl
->regs
, MHISTATUS
,
132 MHISTATUS_MHISTATE_MASK
,
133 MHISTATUS_MHISTATE_SHIFT
, &state
);
134 return ret
? MHI_STATE_MAX
: state
;
136 EXPORT_SYMBOL_GPL(mhi_get_mhi_state
);
138 int mhi_map_single_no_bb(struct mhi_controller
*mhi_cntrl
,
139 struct mhi_buf_info
*buf_info
)
141 buf_info
->p_addr
= dma_map_single(mhi_cntrl
->cntrl_dev
,
142 buf_info
->v_addr
, buf_info
->len
,
144 if (dma_mapping_error(mhi_cntrl
->cntrl_dev
, buf_info
->p_addr
))
150 int mhi_map_single_use_bb(struct mhi_controller
*mhi_cntrl
,
151 struct mhi_buf_info
*buf_info
)
153 void *buf
= mhi_alloc_coherent(mhi_cntrl
, buf_info
->len
,
154 &buf_info
->p_addr
, GFP_ATOMIC
);
159 if (buf_info
->dir
== DMA_TO_DEVICE
)
160 memcpy(buf
, buf_info
->v_addr
, buf_info
->len
);
162 buf_info
->bb_addr
= buf
;
167 void mhi_unmap_single_no_bb(struct mhi_controller
*mhi_cntrl
,
168 struct mhi_buf_info
*buf_info
)
170 dma_unmap_single(mhi_cntrl
->cntrl_dev
, buf_info
->p_addr
, buf_info
->len
,
174 void mhi_unmap_single_use_bb(struct mhi_controller
*mhi_cntrl
,
175 struct mhi_buf_info
*buf_info
)
177 if (buf_info
->dir
== DMA_FROM_DEVICE
)
178 memcpy(buf_info
->v_addr
, buf_info
->bb_addr
, buf_info
->len
);
180 mhi_free_coherent(mhi_cntrl
, buf_info
->len
, buf_info
->bb_addr
,
184 static int get_nr_avail_ring_elements(struct mhi_controller
*mhi_cntrl
,
185 struct mhi_ring
*ring
)
189 if (ring
->wp
< ring
->rp
) {
190 nr_el
= ((ring
->rp
- ring
->wp
) / ring
->el_size
) - 1;
192 nr_el
= (ring
->rp
- ring
->base
) / ring
->el_size
;
193 nr_el
+= ((ring
->base
+ ring
->len
- ring
->wp
) /
200 static void *mhi_to_virtual(struct mhi_ring
*ring
, dma_addr_t addr
)
202 return (addr
- ring
->iommu_base
) + ring
->base
;
205 static void mhi_add_ring_element(struct mhi_controller
*mhi_cntrl
,
206 struct mhi_ring
*ring
)
208 ring
->wp
+= ring
->el_size
;
209 if (ring
->wp
>= (ring
->base
+ ring
->len
))
210 ring
->wp
= ring
->base
;
215 static void mhi_del_ring_element(struct mhi_controller
*mhi_cntrl
,
216 struct mhi_ring
*ring
)
218 ring
->rp
+= ring
->el_size
;
219 if (ring
->rp
>= (ring
->base
+ ring
->len
))
220 ring
->rp
= ring
->base
;
225 int mhi_destroy_device(struct device
*dev
, void *data
)
227 struct mhi_device
*mhi_dev
;
228 struct mhi_controller
*mhi_cntrl
;
230 if (dev
->bus
!= &mhi_bus_type
)
233 mhi_dev
= to_mhi_device(dev
);
234 mhi_cntrl
= mhi_dev
->mhi_cntrl
;
236 /* Only destroy virtual devices thats attached to bus */
237 if (mhi_dev
->dev_type
== MHI_DEVICE_CONTROLLER
)
241 * For the suspend and resume case, this function will get called
242 * without mhi_unregister_controller(). Hence, we need to drop the
243 * references to mhi_dev created for ul and dl channels. We can
244 * be sure that there will be no instances of mhi_dev left after
247 if (mhi_dev
->ul_chan
)
248 put_device(&mhi_dev
->ul_chan
->mhi_dev
->dev
);
250 if (mhi_dev
->dl_chan
)
251 put_device(&mhi_dev
->dl_chan
->mhi_dev
->dev
);
253 dev_dbg(&mhi_cntrl
->mhi_dev
->dev
, "destroy device for chan:%s\n",
256 /* Notify the client and remove the device from MHI bus */
263 void mhi_notify(struct mhi_device
*mhi_dev
, enum mhi_callback cb_reason
)
265 struct mhi_driver
*mhi_drv
;
267 if (!mhi_dev
->dev
.driver
)
270 mhi_drv
= to_mhi_driver(mhi_dev
->dev
.driver
);
272 if (mhi_drv
->status_cb
)
273 mhi_drv
->status_cb(mhi_dev
, cb_reason
);
275 EXPORT_SYMBOL_GPL(mhi_notify
);
277 /* Bind MHI channels to MHI devices */
278 void mhi_create_devices(struct mhi_controller
*mhi_cntrl
)
280 struct mhi_chan
*mhi_chan
;
281 struct mhi_device
*mhi_dev
;
282 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
285 mhi_chan
= mhi_cntrl
->mhi_chan
;
286 for (i
= 0; i
< mhi_cntrl
->max_chan
; i
++, mhi_chan
++) {
287 if (!mhi_chan
->configured
|| mhi_chan
->mhi_dev
||
288 !(mhi_chan
->ee_mask
& BIT(mhi_cntrl
->ee
)))
290 mhi_dev
= mhi_alloc_device(mhi_cntrl
);
294 mhi_dev
->dev_type
= MHI_DEVICE_XFER
;
295 switch (mhi_chan
->dir
) {
297 mhi_dev
->ul_chan
= mhi_chan
;
298 mhi_dev
->ul_chan_id
= mhi_chan
->chan
;
300 case DMA_FROM_DEVICE
:
301 /* We use dl_chan as offload channels */
302 mhi_dev
->dl_chan
= mhi_chan
;
303 mhi_dev
->dl_chan_id
= mhi_chan
->chan
;
306 dev_err(dev
, "Direction not supported\n");
307 put_device(&mhi_dev
->dev
);
311 get_device(&mhi_dev
->dev
);
312 mhi_chan
->mhi_dev
= mhi_dev
;
314 /* Check next channel if it matches */
315 if ((i
+ 1) < mhi_cntrl
->max_chan
&& mhi_chan
[1].configured
) {
316 if (!strcmp(mhi_chan
[1].name
, mhi_chan
->name
)) {
319 if (mhi_chan
->dir
== DMA_TO_DEVICE
) {
320 mhi_dev
->ul_chan
= mhi_chan
;
321 mhi_dev
->ul_chan_id
= mhi_chan
->chan
;
323 mhi_dev
->dl_chan
= mhi_chan
;
324 mhi_dev
->dl_chan_id
= mhi_chan
->chan
;
326 get_device(&mhi_dev
->dev
);
327 mhi_chan
->mhi_dev
= mhi_dev
;
331 /* Channel name is same for both UL and DL */
332 mhi_dev
->name
= mhi_chan
->name
;
333 dev_set_name(&mhi_dev
->dev
, "%s_%s",
334 dev_name(&mhi_cntrl
->mhi_dev
->dev
),
337 /* Init wakeup source if available */
338 if (mhi_dev
->dl_chan
&& mhi_dev
->dl_chan
->wake_capable
)
339 device_init_wakeup(&mhi_dev
->dev
, true);
341 ret
= device_add(&mhi_dev
->dev
);
343 put_device(&mhi_dev
->dev
);
347 irqreturn_t
mhi_irq_handler(int irq_number
, void *dev
)
349 struct mhi_event
*mhi_event
= dev
;
350 struct mhi_controller
*mhi_cntrl
= mhi_event
->mhi_cntrl
;
351 struct mhi_event_ctxt
*er_ctxt
=
352 &mhi_cntrl
->mhi_ctxt
->er_ctxt
[mhi_event
->er_index
];
353 struct mhi_ring
*ev_ring
= &mhi_event
->ring
;
354 void *dev_rp
= mhi_to_virtual(ev_ring
, er_ctxt
->rp
);
356 /* Only proceed if event ring has pending events */
357 if (ev_ring
->rp
== dev_rp
)
360 /* For client managed event ring, notify pending data */
361 if (mhi_event
->cl_manage
) {
362 struct mhi_chan
*mhi_chan
= mhi_event
->mhi_chan
;
363 struct mhi_device
*mhi_dev
= mhi_chan
->mhi_dev
;
366 mhi_notify(mhi_dev
, MHI_CB_PENDING_DATA
);
368 tasklet_schedule(&mhi_event
->task
);
374 irqreturn_t
mhi_intvec_threaded_handler(int irq_number
, void *priv
)
376 struct mhi_controller
*mhi_cntrl
= priv
;
377 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
378 enum mhi_state state
= MHI_STATE_MAX
;
379 enum mhi_pm_state pm_state
= 0;
380 enum mhi_ee_type ee
= 0;
382 write_lock_irq(&mhi_cntrl
->pm_lock
);
383 if (!MHI_REG_ACCESS_VALID(mhi_cntrl
->pm_state
)) {
384 write_unlock_irq(&mhi_cntrl
->pm_lock
);
388 state
= mhi_get_mhi_state(mhi_cntrl
);
390 mhi_cntrl
->ee
= mhi_get_exec_env(mhi_cntrl
);
391 dev_dbg(dev
, "local ee:%s device ee:%s dev_state:%s\n",
392 TO_MHI_EXEC_STR(mhi_cntrl
->ee
), TO_MHI_EXEC_STR(ee
),
393 TO_MHI_STATE_STR(state
));
395 if (state
== MHI_STATE_SYS_ERR
) {
396 dev_dbg(dev
, "System error detected\n");
397 pm_state
= mhi_tryset_pm_state(mhi_cntrl
,
398 MHI_PM_SYS_ERR_DETECT
);
400 write_unlock_irq(&mhi_cntrl
->pm_lock
);
402 /* If device supports RDDM don't bother processing SYS error */
403 if (mhi_cntrl
->rddm_image
) {
404 /* host may be performing a device power down already */
405 if (!mhi_is_active(mhi_cntrl
))
408 if (mhi_cntrl
->ee
== MHI_EE_RDDM
&& mhi_cntrl
->ee
!= ee
) {
409 mhi_cntrl
->status_cb(mhi_cntrl
, MHI_CB_EE_RDDM
);
410 wake_up_all(&mhi_cntrl
->state_event
);
415 if (pm_state
== MHI_PM_SYS_ERR_DETECT
) {
416 wake_up_all(&mhi_cntrl
->state_event
);
418 /* For fatal errors, we let controller decide next step */
420 mhi_cntrl
->status_cb(mhi_cntrl
, MHI_CB_FATAL_ERROR
);
422 mhi_pm_sys_err_handler(mhi_cntrl
);
430 irqreturn_t
mhi_intvec_handler(int irq_number
, void *dev
)
432 struct mhi_controller
*mhi_cntrl
= dev
;
434 /* Wake up events waiting for state change */
435 wake_up_all(&mhi_cntrl
->state_event
);
437 return IRQ_WAKE_THREAD
;
440 static void mhi_recycle_ev_ring_element(struct mhi_controller
*mhi_cntrl
,
441 struct mhi_ring
*ring
)
446 ring
->wp
+= ring
->el_size
;
447 ctxt_wp
= *ring
->ctxt_wp
+ ring
->el_size
;
449 if (ring
->wp
>= (ring
->base
+ ring
->len
)) {
450 ring
->wp
= ring
->base
;
451 ctxt_wp
= ring
->iommu_base
;
454 *ring
->ctxt_wp
= ctxt_wp
;
457 ring
->rp
+= ring
->el_size
;
458 if (ring
->rp
>= (ring
->base
+ ring
->len
))
459 ring
->rp
= ring
->base
;
461 /* Update to all cores */
465 static int parse_xfer_event(struct mhi_controller
*mhi_cntrl
,
466 struct mhi_tre
*event
,
467 struct mhi_chan
*mhi_chan
)
469 struct mhi_ring
*buf_ring
, *tre_ring
;
470 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
471 struct mhi_result result
;
472 unsigned long flags
= 0;
475 ev_code
= MHI_TRE_GET_EV_CODE(event
);
476 buf_ring
= &mhi_chan
->buf_ring
;
477 tre_ring
= &mhi_chan
->tre_ring
;
479 result
.transaction_status
= (ev_code
== MHI_EV_CC_OVERFLOW
) ?
483 * If it's a DB Event then we need to grab the lock
484 * with preemption disabled and as a write because we
485 * have to update db register and there are chances that
486 * another thread could be doing the same.
488 if (ev_code
>= MHI_EV_CC_OOB
)
489 write_lock_irqsave(&mhi_chan
->lock
, flags
);
491 read_lock_bh(&mhi_chan
->lock
);
493 if (mhi_chan
->ch_state
!= MHI_CH_STATE_ENABLED
)
494 goto end_process_tx_event
;
497 case MHI_EV_CC_OVERFLOW
:
501 dma_addr_t ptr
= MHI_TRE_GET_EV_PTR(event
);
502 struct mhi_tre
*local_rp
, *ev_tre
;
504 struct mhi_buf_info
*buf_info
;
507 /* Get the TRB this event points to */
508 ev_tre
= mhi_to_virtual(tre_ring
, ptr
);
511 if (dev_rp
>= (tre_ring
->base
+ tre_ring
->len
))
512 dev_rp
= tre_ring
->base
;
514 result
.dir
= mhi_chan
->dir
;
516 local_rp
= tre_ring
->rp
;
517 while (local_rp
!= dev_rp
) {
518 buf_info
= buf_ring
->rp
;
519 /* If it's the last TRE, get length from the event */
520 if (local_rp
== ev_tre
)
521 xfer_len
= MHI_TRE_GET_EV_LEN(event
);
523 xfer_len
= buf_info
->len
;
525 /* Unmap if it's not pre-mapped by client */
526 if (likely(!buf_info
->pre_mapped
))
527 mhi_cntrl
->unmap_single(mhi_cntrl
, buf_info
);
529 result
.buf_addr
= buf_info
->cb_buf
;
531 /* truncate to buf len if xfer_len is larger */
533 min_t(u16
, xfer_len
, buf_info
->len
);
534 mhi_del_ring_element(mhi_cntrl
, buf_ring
);
535 mhi_del_ring_element(mhi_cntrl
, tre_ring
);
536 local_rp
= tre_ring
->rp
;
539 mhi_chan
->xfer_cb(mhi_chan
->mhi_dev
, &result
);
541 if (mhi_chan
->dir
== DMA_TO_DEVICE
)
542 atomic_dec(&mhi_cntrl
->pending_pkts
);
545 * Recycle the buffer if buffer is pre-allocated,
546 * if there is an error, not much we can do apart
547 * from dropping the packet
549 if (mhi_chan
->pre_alloc
) {
550 if (mhi_queue_buf(mhi_chan
->mhi_dev
,
553 buf_info
->len
, MHI_EOT
)) {
555 "Error recycling buffer for chan:%d\n",
557 kfree(buf_info
->cb_buf
);
564 case MHI_EV_CC_DB_MODE
:
568 mhi_chan
->db_cfg
.db_mode
= 1;
569 read_lock_irqsave(&mhi_cntrl
->pm_lock
, flags
);
570 if (tre_ring
->wp
!= tre_ring
->rp
&&
571 MHI_DB_ACCESS_VALID(mhi_cntrl
)) {
572 mhi_ring_chan_db(mhi_cntrl
, mhi_chan
);
574 read_unlock_irqrestore(&mhi_cntrl
->pm_lock
, flags
);
577 case MHI_EV_CC_BAD_TRE
:
579 dev_err(dev
, "Unknown event 0x%x\n", ev_code
);
581 } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
583 end_process_tx_event
:
584 if (ev_code
>= MHI_EV_CC_OOB
)
585 write_unlock_irqrestore(&mhi_chan
->lock
, flags
);
587 read_unlock_bh(&mhi_chan
->lock
);
592 static int parse_rsc_event(struct mhi_controller
*mhi_cntrl
,
593 struct mhi_tre
*event
,
594 struct mhi_chan
*mhi_chan
)
596 struct mhi_ring
*buf_ring
, *tre_ring
;
597 struct mhi_buf_info
*buf_info
;
598 struct mhi_result result
;
600 u32 cookie
; /* offset to local descriptor */
603 buf_ring
= &mhi_chan
->buf_ring
;
604 tre_ring
= &mhi_chan
->tre_ring
;
606 ev_code
= MHI_TRE_GET_EV_CODE(event
);
607 cookie
= MHI_TRE_GET_EV_COOKIE(event
);
608 xfer_len
= MHI_TRE_GET_EV_LEN(event
);
610 /* Received out of bound cookie */
611 WARN_ON(cookie
>= buf_ring
->len
);
613 buf_info
= buf_ring
->base
+ cookie
;
615 result
.transaction_status
= (ev_code
== MHI_EV_CC_OVERFLOW
) ?
618 /* truncate to buf len if xfer_len is larger */
619 result
.bytes_xferd
= min_t(u16
, xfer_len
, buf_info
->len
);
620 result
.buf_addr
= buf_info
->cb_buf
;
621 result
.dir
= mhi_chan
->dir
;
623 read_lock_bh(&mhi_chan
->lock
);
625 if (mhi_chan
->ch_state
!= MHI_CH_STATE_ENABLED
)
626 goto end_process_rsc_event
;
628 WARN_ON(!buf_info
->used
);
630 /* notify the client */
631 mhi_chan
->xfer_cb(mhi_chan
->mhi_dev
, &result
);
634 * Note: We're arbitrarily incrementing RP even though, completion
635 * packet we processed might not be the same one, reason we can do this
636 * is because device guaranteed to cache descriptors in order it
637 * receive, so even though completion event is different we can re-use
638 * all descriptors in between.
640 * Transfer Ring has descriptors: A, B, C, D
641 * Last descriptor host queue is D (WP) and first descriptor
642 * host queue is A (RP).
643 * The completion event we just serviced is descriptor C.
644 * Then we can safely queue descriptors to replace A, B, and C
645 * even though host did not receive any completions.
647 mhi_del_ring_element(mhi_cntrl
, tre_ring
);
648 buf_info
->used
= false;
650 end_process_rsc_event
:
651 read_unlock_bh(&mhi_chan
->lock
);
656 static void mhi_process_cmd_completion(struct mhi_controller
*mhi_cntrl
,
659 dma_addr_t ptr
= MHI_TRE_GET_EV_PTR(tre
);
660 struct mhi_cmd
*cmd_ring
= &mhi_cntrl
->mhi_cmd
[PRIMARY_CMD_RING
];
661 struct mhi_ring
*mhi_ring
= &cmd_ring
->ring
;
662 struct mhi_tre
*cmd_pkt
;
663 struct mhi_chan
*mhi_chan
;
666 cmd_pkt
= mhi_to_virtual(mhi_ring
, ptr
);
668 chan
= MHI_TRE_GET_CMD_CHID(cmd_pkt
);
669 mhi_chan
= &mhi_cntrl
->mhi_chan
[chan
];
670 write_lock_bh(&mhi_chan
->lock
);
671 mhi_chan
->ccs
= MHI_TRE_GET_EV_CODE(tre
);
672 complete(&mhi_chan
->completion
);
673 write_unlock_bh(&mhi_chan
->lock
);
675 mhi_del_ring_element(mhi_cntrl
, mhi_ring
);
678 int mhi_process_ctrl_ev_ring(struct mhi_controller
*mhi_cntrl
,
679 struct mhi_event
*mhi_event
,
682 struct mhi_tre
*dev_rp
, *local_rp
;
683 struct mhi_ring
*ev_ring
= &mhi_event
->ring
;
684 struct mhi_event_ctxt
*er_ctxt
=
685 &mhi_cntrl
->mhi_ctxt
->er_ctxt
[mhi_event
->er_index
];
686 struct mhi_chan
*mhi_chan
;
687 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
692 * This is a quick check to avoid unnecessary event processing
693 * in case MHI is already in error state, but it's still possible
694 * to transition to error state while processing events
696 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl
->pm_state
)))
699 dev_rp
= mhi_to_virtual(ev_ring
, er_ctxt
->rp
);
700 local_rp
= ev_ring
->rp
;
702 while (dev_rp
!= local_rp
) {
703 enum mhi_pkt_type type
= MHI_TRE_GET_EV_TYPE(local_rp
);
706 case MHI_PKT_TYPE_BW_REQ_EVENT
:
708 struct mhi_link_info
*link_info
;
710 link_info
= &mhi_cntrl
->mhi_link_info
;
711 write_lock_irq(&mhi_cntrl
->pm_lock
);
712 link_info
->target_link_speed
=
713 MHI_TRE_GET_EV_LINKSPEED(local_rp
);
714 link_info
->target_link_width
=
715 MHI_TRE_GET_EV_LINKWIDTH(local_rp
);
716 write_unlock_irq(&mhi_cntrl
->pm_lock
);
717 dev_dbg(dev
, "Received BW_REQ event\n");
718 mhi_cntrl
->status_cb(mhi_cntrl
, MHI_CB_BW_REQ
);
721 case MHI_PKT_TYPE_STATE_CHANGE_EVENT
:
723 enum mhi_state new_state
;
725 new_state
= MHI_TRE_GET_EV_STATE(local_rp
);
727 dev_dbg(dev
, "State change event to state: %s\n",
728 TO_MHI_STATE_STR(new_state
));
732 mhi_pm_m0_transition(mhi_cntrl
);
735 mhi_pm_m1_transition(mhi_cntrl
);
738 mhi_pm_m3_transition(mhi_cntrl
);
740 case MHI_STATE_SYS_ERR
:
742 enum mhi_pm_state new_state
;
744 dev_dbg(dev
, "System error detected\n");
745 write_lock_irq(&mhi_cntrl
->pm_lock
);
746 new_state
= mhi_tryset_pm_state(mhi_cntrl
,
747 MHI_PM_SYS_ERR_DETECT
);
748 write_unlock_irq(&mhi_cntrl
->pm_lock
);
749 if (new_state
== MHI_PM_SYS_ERR_DETECT
)
750 mhi_pm_sys_err_handler(mhi_cntrl
);
754 dev_err(dev
, "Invalid state: %s\n",
755 TO_MHI_STATE_STR(new_state
));
760 case MHI_PKT_TYPE_CMD_COMPLETION_EVENT
:
761 mhi_process_cmd_completion(mhi_cntrl
, local_rp
);
763 case MHI_PKT_TYPE_EE_EVENT
:
765 enum dev_st_transition st
= DEV_ST_TRANSITION_MAX
;
766 enum mhi_ee_type event
= MHI_TRE_GET_EV_EXECENV(local_rp
);
768 dev_dbg(dev
, "Received EE event: %s\n",
769 TO_MHI_EXEC_STR(event
));
772 st
= DEV_ST_TRANSITION_SBL
;
776 st
= DEV_ST_TRANSITION_MISSION_MODE
;
779 mhi_cntrl
->status_cb(mhi_cntrl
, MHI_CB_EE_RDDM
);
780 write_lock_irq(&mhi_cntrl
->pm_lock
);
781 mhi_cntrl
->ee
= event
;
782 write_unlock_irq(&mhi_cntrl
->pm_lock
);
783 wake_up_all(&mhi_cntrl
->state_event
);
787 "Unhandled EE event: 0x%x\n", type
);
789 if (st
!= DEV_ST_TRANSITION_MAX
)
790 mhi_queue_state_transition(mhi_cntrl
, st
);
794 case MHI_PKT_TYPE_TX_EVENT
:
795 chan
= MHI_TRE_GET_EV_CHID(local_rp
);
797 WARN_ON(chan
>= mhi_cntrl
->max_chan
);
800 * Only process the event ring elements whose channel
801 * ID is within the maximum supported range.
803 if (chan
< mhi_cntrl
->max_chan
) {
804 mhi_chan
= &mhi_cntrl
->mhi_chan
[chan
];
805 parse_xfer_event(mhi_cntrl
, local_rp
, mhi_chan
);
810 dev_err(dev
, "Unhandled event type: %d\n", type
);
814 mhi_recycle_ev_ring_element(mhi_cntrl
, ev_ring
);
815 local_rp
= ev_ring
->rp
;
816 dev_rp
= mhi_to_virtual(ev_ring
, er_ctxt
->rp
);
820 read_lock_bh(&mhi_cntrl
->pm_lock
);
821 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl
)))
822 mhi_ring_er_db(mhi_event
);
823 read_unlock_bh(&mhi_cntrl
->pm_lock
);
828 int mhi_process_data_event_ring(struct mhi_controller
*mhi_cntrl
,
829 struct mhi_event
*mhi_event
,
832 struct mhi_tre
*dev_rp
, *local_rp
;
833 struct mhi_ring
*ev_ring
= &mhi_event
->ring
;
834 struct mhi_event_ctxt
*er_ctxt
=
835 &mhi_cntrl
->mhi_ctxt
->er_ctxt
[mhi_event
->er_index
];
838 struct mhi_chan
*mhi_chan
;
840 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl
->pm_state
)))
843 dev_rp
= mhi_to_virtual(ev_ring
, er_ctxt
->rp
);
844 local_rp
= ev_ring
->rp
;
846 while (dev_rp
!= local_rp
&& event_quota
> 0) {
847 enum mhi_pkt_type type
= MHI_TRE_GET_EV_TYPE(local_rp
);
849 chan
= MHI_TRE_GET_EV_CHID(local_rp
);
851 WARN_ON(chan
>= mhi_cntrl
->max_chan
);
854 * Only process the event ring elements whose channel
855 * ID is within the maximum supported range.
857 if (chan
< mhi_cntrl
->max_chan
) {
858 mhi_chan
= &mhi_cntrl
->mhi_chan
[chan
];
860 if (likely(type
== MHI_PKT_TYPE_TX_EVENT
)) {
861 parse_xfer_event(mhi_cntrl
, local_rp
, mhi_chan
);
863 } else if (type
== MHI_PKT_TYPE_RSC_TX_EVENT
) {
864 parse_rsc_event(mhi_cntrl
, local_rp
, mhi_chan
);
869 mhi_recycle_ev_ring_element(mhi_cntrl
, ev_ring
);
870 local_rp
= ev_ring
->rp
;
871 dev_rp
= mhi_to_virtual(ev_ring
, er_ctxt
->rp
);
874 read_lock_bh(&mhi_cntrl
->pm_lock
);
875 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl
)))
876 mhi_ring_er_db(mhi_event
);
877 read_unlock_bh(&mhi_cntrl
->pm_lock
);
882 void mhi_ev_task(unsigned long data
)
884 struct mhi_event
*mhi_event
= (struct mhi_event
*)data
;
885 struct mhi_controller
*mhi_cntrl
= mhi_event
->mhi_cntrl
;
887 /* process all pending events */
888 spin_lock_bh(&mhi_event
->lock
);
889 mhi_event
->process_event(mhi_cntrl
, mhi_event
, U32_MAX
);
890 spin_unlock_bh(&mhi_event
->lock
);
893 void mhi_ctrl_ev_task(unsigned long data
)
895 struct mhi_event
*mhi_event
= (struct mhi_event
*)data
;
896 struct mhi_controller
*mhi_cntrl
= mhi_event
->mhi_cntrl
;
897 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
898 enum mhi_state state
;
899 enum mhi_pm_state pm_state
= 0;
903 * We can check PM state w/o a lock here because there is no way
904 * PM state can change from reg access valid to no access while this
905 * thread being executed.
907 if (!MHI_REG_ACCESS_VALID(mhi_cntrl
->pm_state
)) {
909 * We may have a pending event but not allowed to
910 * process it since we are probably in a suspended state,
911 * so trigger a resume.
913 mhi_trigger_resume(mhi_cntrl
);
918 /* Process ctrl events events */
919 ret
= mhi_event
->process_event(mhi_cntrl
, mhi_event
, U32_MAX
);
922 * We received an IRQ but no events to process, maybe device went to
923 * SYS_ERR state? Check the state to confirm.
926 write_lock_irq(&mhi_cntrl
->pm_lock
);
927 state
= mhi_get_mhi_state(mhi_cntrl
);
928 if (state
== MHI_STATE_SYS_ERR
) {
929 dev_dbg(dev
, "System error detected\n");
930 pm_state
= mhi_tryset_pm_state(mhi_cntrl
,
931 MHI_PM_SYS_ERR_DETECT
);
933 write_unlock_irq(&mhi_cntrl
->pm_lock
);
934 if (pm_state
== MHI_PM_SYS_ERR_DETECT
)
935 mhi_pm_sys_err_handler(mhi_cntrl
);
939 static bool mhi_is_ring_full(struct mhi_controller
*mhi_cntrl
,
940 struct mhi_ring
*ring
)
942 void *tmp
= ring
->wp
+ ring
->el_size
;
944 if (tmp
>= (ring
->base
+ ring
->len
))
947 return (tmp
== ring
->rp
);
950 int mhi_queue_skb(struct mhi_device
*mhi_dev
, enum dma_data_direction dir
,
951 struct sk_buff
*skb
, size_t len
, enum mhi_flags mflags
)
953 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
954 struct mhi_chan
*mhi_chan
= (dir
== DMA_TO_DEVICE
) ? mhi_dev
->ul_chan
:
956 struct mhi_ring
*tre_ring
= &mhi_chan
->tre_ring
;
957 struct mhi_buf_info buf_info
= { };
960 /* If MHI host pre-allocates buffers then client drivers cannot queue */
961 if (mhi_chan
->pre_alloc
)
964 if (mhi_is_ring_full(mhi_cntrl
, tre_ring
))
967 read_lock_bh(&mhi_cntrl
->pm_lock
);
968 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
))) {
969 read_unlock_bh(&mhi_cntrl
->pm_lock
);
973 /* we're in M3 or transitioning to M3 */
974 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl
->pm_state
))
975 mhi_trigger_resume(mhi_cntrl
);
977 /* Toggle wake to exit out of M2 */
978 mhi_cntrl
->wake_toggle(mhi_cntrl
);
980 buf_info
.v_addr
= skb
->data
;
981 buf_info
.cb_buf
= skb
;
984 ret
= mhi_gen_tre(mhi_cntrl
, mhi_chan
, &buf_info
, mflags
);
986 read_unlock_bh(&mhi_cntrl
->pm_lock
);
990 if (mhi_chan
->dir
== DMA_TO_DEVICE
)
991 atomic_inc(&mhi_cntrl
->pending_pkts
);
993 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl
))) {
994 read_lock_bh(&mhi_chan
->lock
);
995 mhi_ring_chan_db(mhi_cntrl
, mhi_chan
);
996 read_unlock_bh(&mhi_chan
->lock
);
999 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1003 EXPORT_SYMBOL_GPL(mhi_queue_skb
);
1005 int mhi_queue_dma(struct mhi_device
*mhi_dev
, enum dma_data_direction dir
,
1006 struct mhi_buf
*mhi_buf
, size_t len
, enum mhi_flags mflags
)
1008 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1009 struct mhi_chan
*mhi_chan
= (dir
== DMA_TO_DEVICE
) ? mhi_dev
->ul_chan
:
1011 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1012 struct mhi_ring
*tre_ring
= &mhi_chan
->tre_ring
;
1013 struct mhi_buf_info buf_info
= { };
1016 /* If MHI host pre-allocates buffers then client drivers cannot queue */
1017 if (mhi_chan
->pre_alloc
)
1020 if (mhi_is_ring_full(mhi_cntrl
, tre_ring
))
1023 read_lock_bh(&mhi_cntrl
->pm_lock
);
1024 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
))) {
1025 dev_err(dev
, "MHI is not in activate state, PM state: %s\n",
1026 to_mhi_pm_state_str(mhi_cntrl
->pm_state
));
1027 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1032 /* we're in M3 or transitioning to M3 */
1033 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl
->pm_state
))
1034 mhi_trigger_resume(mhi_cntrl
);
1036 /* Toggle wake to exit out of M2 */
1037 mhi_cntrl
->wake_toggle(mhi_cntrl
);
1039 buf_info
.p_addr
= mhi_buf
->dma_addr
;
1040 buf_info
.cb_buf
= mhi_buf
;
1041 buf_info
.pre_mapped
= true;
1044 ret
= mhi_gen_tre(mhi_cntrl
, mhi_chan
, &buf_info
, mflags
);
1045 if (unlikely(ret
)) {
1046 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1050 if (mhi_chan
->dir
== DMA_TO_DEVICE
)
1051 atomic_inc(&mhi_cntrl
->pending_pkts
);
1053 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl
))) {
1054 read_lock_bh(&mhi_chan
->lock
);
1055 mhi_ring_chan_db(mhi_cntrl
, mhi_chan
);
1056 read_unlock_bh(&mhi_chan
->lock
);
1059 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1063 EXPORT_SYMBOL_GPL(mhi_queue_dma
);
1065 int mhi_gen_tre(struct mhi_controller
*mhi_cntrl
, struct mhi_chan
*mhi_chan
,
1066 struct mhi_buf_info
*info
, enum mhi_flags flags
)
1068 struct mhi_ring
*buf_ring
, *tre_ring
;
1069 struct mhi_tre
*mhi_tre
;
1070 struct mhi_buf_info
*buf_info
;
1071 int eot
, eob
, chain
, bei
;
1074 buf_ring
= &mhi_chan
->buf_ring
;
1075 tre_ring
= &mhi_chan
->tre_ring
;
1077 buf_info
= buf_ring
->wp
;
1078 WARN_ON(buf_info
->used
);
1079 buf_info
->pre_mapped
= info
->pre_mapped
;
1080 if (info
->pre_mapped
)
1081 buf_info
->p_addr
= info
->p_addr
;
1083 buf_info
->v_addr
= info
->v_addr
;
1084 buf_info
->cb_buf
= info
->cb_buf
;
1085 buf_info
->wp
= tre_ring
->wp
;
1086 buf_info
->dir
= mhi_chan
->dir
;
1087 buf_info
->len
= info
->len
;
1089 if (!info
->pre_mapped
) {
1090 ret
= mhi_cntrl
->map_single(mhi_cntrl
, buf_info
);
1095 eob
= !!(flags
& MHI_EOB
);
1096 eot
= !!(flags
& MHI_EOT
);
1097 chain
= !!(flags
& MHI_CHAIN
);
1098 bei
= !!(mhi_chan
->intmod
);
1100 mhi_tre
= tre_ring
->wp
;
1101 mhi_tre
->ptr
= MHI_TRE_DATA_PTR(buf_info
->p_addr
);
1102 mhi_tre
->dword
[0] = MHI_TRE_DATA_DWORD0(info
->len
);
1103 mhi_tre
->dword
[1] = MHI_TRE_DATA_DWORD1(bei
, eot
, eob
, chain
);
1106 mhi_add_ring_element(mhi_cntrl
, tre_ring
);
1107 mhi_add_ring_element(mhi_cntrl
, buf_ring
);
1112 int mhi_queue_buf(struct mhi_device
*mhi_dev
, enum dma_data_direction dir
,
1113 void *buf
, size_t len
, enum mhi_flags mflags
)
1115 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1116 struct mhi_chan
*mhi_chan
= (dir
== DMA_TO_DEVICE
) ? mhi_dev
->ul_chan
:
1118 struct mhi_ring
*tre_ring
;
1119 struct mhi_buf_info buf_info
= { };
1120 unsigned long flags
;
1124 * this check here only as a guard, it's always
1125 * possible mhi can enter error while executing rest of function,
1126 * which is not fatal so we do not need to hold pm_lock
1128 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
)))
1131 tre_ring
= &mhi_chan
->tre_ring
;
1132 if (mhi_is_ring_full(mhi_cntrl
, tre_ring
))
1135 buf_info
.v_addr
= buf
;
1136 buf_info
.cb_buf
= buf
;
1139 ret
= mhi_gen_tre(mhi_cntrl
, mhi_chan
, &buf_info
, mflags
);
1143 read_lock_irqsave(&mhi_cntrl
->pm_lock
, flags
);
1145 /* we're in M3 or transitioning to M3 */
1146 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl
->pm_state
))
1147 mhi_trigger_resume(mhi_cntrl
);
1149 /* Toggle wake to exit out of M2 */
1150 mhi_cntrl
->wake_toggle(mhi_cntrl
);
1152 if (mhi_chan
->dir
== DMA_TO_DEVICE
)
1153 atomic_inc(&mhi_cntrl
->pending_pkts
);
1155 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl
))) {
1156 unsigned long flags
;
1158 read_lock_irqsave(&mhi_chan
->lock
, flags
);
1159 mhi_ring_chan_db(mhi_cntrl
, mhi_chan
);
1160 read_unlock_irqrestore(&mhi_chan
->lock
, flags
);
1163 read_unlock_irqrestore(&mhi_cntrl
->pm_lock
, flags
);
1167 EXPORT_SYMBOL_GPL(mhi_queue_buf
);
1169 bool mhi_queue_is_full(struct mhi_device
*mhi_dev
, enum dma_data_direction dir
)
1171 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1172 struct mhi_chan
*mhi_chan
= (dir
== DMA_TO_DEVICE
) ?
1173 mhi_dev
->ul_chan
: mhi_dev
->dl_chan
;
1174 struct mhi_ring
*tre_ring
= &mhi_chan
->tre_ring
;
1176 return mhi_is_ring_full(mhi_cntrl
, tre_ring
);
1178 EXPORT_SYMBOL_GPL(mhi_queue_is_full
);
1180 int mhi_send_cmd(struct mhi_controller
*mhi_cntrl
,
1181 struct mhi_chan
*mhi_chan
,
1182 enum mhi_cmd_type cmd
)
1184 struct mhi_tre
*cmd_tre
= NULL
;
1185 struct mhi_cmd
*mhi_cmd
= &mhi_cntrl
->mhi_cmd
[PRIMARY_CMD_RING
];
1186 struct mhi_ring
*ring
= &mhi_cmd
->ring
;
1187 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1191 chan
= mhi_chan
->chan
;
1193 spin_lock_bh(&mhi_cmd
->lock
);
1194 if (!get_nr_avail_ring_elements(mhi_cntrl
, ring
)) {
1195 spin_unlock_bh(&mhi_cmd
->lock
);
1199 /* prepare the cmd tre */
1202 case MHI_CMD_RESET_CHAN
:
1203 cmd_tre
->ptr
= MHI_TRE_CMD_RESET_PTR
;
1204 cmd_tre
->dword
[0] = MHI_TRE_CMD_RESET_DWORD0
;
1205 cmd_tre
->dword
[1] = MHI_TRE_CMD_RESET_DWORD1(chan
);
1207 case MHI_CMD_START_CHAN
:
1208 cmd_tre
->ptr
= MHI_TRE_CMD_START_PTR
;
1209 cmd_tre
->dword
[0] = MHI_TRE_CMD_START_DWORD0
;
1210 cmd_tre
->dword
[1] = MHI_TRE_CMD_START_DWORD1(chan
);
1213 dev_err(dev
, "Command not supported\n");
1217 /* queue to hardware */
1218 mhi_add_ring_element(mhi_cntrl
, ring
);
1219 read_lock_bh(&mhi_cntrl
->pm_lock
);
1220 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl
)))
1221 mhi_ring_cmd_db(mhi_cntrl
, mhi_cmd
);
1222 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1223 spin_unlock_bh(&mhi_cmd
->lock
);
1228 static void __mhi_unprepare_channel(struct mhi_controller
*mhi_cntrl
,
1229 struct mhi_chan
*mhi_chan
)
1232 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1234 dev_dbg(dev
, "Entered: unprepare channel:%d\n", mhi_chan
->chan
);
1236 /* no more processing events for this channel */
1237 mutex_lock(&mhi_chan
->mutex
);
1238 write_lock_irq(&mhi_chan
->lock
);
1239 if (mhi_chan
->ch_state
!= MHI_CH_STATE_ENABLED
&&
1240 mhi_chan
->ch_state
!= MHI_CH_STATE_SUSPENDED
) {
1241 write_unlock_irq(&mhi_chan
->lock
);
1242 mutex_unlock(&mhi_chan
->mutex
);
1246 mhi_chan
->ch_state
= MHI_CH_STATE_DISABLED
;
1247 write_unlock_irq(&mhi_chan
->lock
);
1249 reinit_completion(&mhi_chan
->completion
);
1250 read_lock_bh(&mhi_cntrl
->pm_lock
);
1251 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
)) {
1252 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1253 goto error_invalid_state
;
1256 mhi_cntrl
->wake_toggle(mhi_cntrl
);
1257 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1259 mhi_cntrl
->runtime_get(mhi_cntrl
);
1260 mhi_cntrl
->runtime_put(mhi_cntrl
);
1261 ret
= mhi_send_cmd(mhi_cntrl
, mhi_chan
, MHI_CMD_RESET_CHAN
);
1263 goto error_invalid_state
;
1265 /* even if it fails we will still reset */
1266 ret
= wait_for_completion_timeout(&mhi_chan
->completion
,
1267 msecs_to_jiffies(mhi_cntrl
->timeout_ms
));
1268 if (!ret
|| mhi_chan
->ccs
!= MHI_EV_CC_SUCCESS
)
1270 "Failed to receive cmd completion, still resetting\n");
1272 error_invalid_state
:
1273 if (!mhi_chan
->offload_ch
) {
1274 mhi_reset_chan(mhi_cntrl
, mhi_chan
);
1275 mhi_deinit_chan_ctxt(mhi_cntrl
, mhi_chan
);
1277 dev_dbg(dev
, "chan:%d successfully resetted\n", mhi_chan
->chan
);
1278 mutex_unlock(&mhi_chan
->mutex
);
1281 int mhi_prepare_channel(struct mhi_controller
*mhi_cntrl
,
1282 struct mhi_chan
*mhi_chan
)
1285 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1287 dev_dbg(dev
, "Preparing channel: %d\n", mhi_chan
->chan
);
1289 if (!(BIT(mhi_cntrl
->ee
) & mhi_chan
->ee_mask
)) {
1291 "Current EE: %s Required EE Mask: 0x%x for chan: %s\n",
1292 TO_MHI_EXEC_STR(mhi_cntrl
->ee
), mhi_chan
->ee_mask
,
1297 mutex_lock(&mhi_chan
->mutex
);
1299 /* If channel is not in disable state, do not allow it to start */
1300 if (mhi_chan
->ch_state
!= MHI_CH_STATE_DISABLED
) {
1302 dev_dbg(dev
, "channel: %d is not in disabled state\n",
1304 goto error_init_chan
;
1307 /* Check of client manages channel context for offload channels */
1308 if (!mhi_chan
->offload_ch
) {
1309 ret
= mhi_init_chan_ctxt(mhi_cntrl
, mhi_chan
);
1311 goto error_init_chan
;
1314 reinit_completion(&mhi_chan
->completion
);
1315 read_lock_bh(&mhi_cntrl
->pm_lock
);
1316 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
)) {
1317 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1319 goto error_pm_state
;
1322 mhi_cntrl
->wake_toggle(mhi_cntrl
);
1323 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1324 mhi_cntrl
->runtime_get(mhi_cntrl
);
1325 mhi_cntrl
->runtime_put(mhi_cntrl
);
1327 ret
= mhi_send_cmd(mhi_cntrl
, mhi_chan
, MHI_CMD_START_CHAN
);
1329 goto error_pm_state
;
1331 ret
= wait_for_completion_timeout(&mhi_chan
->completion
,
1332 msecs_to_jiffies(mhi_cntrl
->timeout_ms
));
1333 if (!ret
|| mhi_chan
->ccs
!= MHI_EV_CC_SUCCESS
) {
1335 goto error_pm_state
;
1338 write_lock_irq(&mhi_chan
->lock
);
1339 mhi_chan
->ch_state
= MHI_CH_STATE_ENABLED
;
1340 write_unlock_irq(&mhi_chan
->lock
);
1342 /* Pre-allocate buffer for xfer ring */
1343 if (mhi_chan
->pre_alloc
) {
1344 int nr_el
= get_nr_avail_ring_elements(mhi_cntrl
,
1345 &mhi_chan
->tre_ring
);
1346 size_t len
= mhi_cntrl
->buffer_len
;
1350 struct mhi_buf_info info
= { };
1351 buf
= kmalloc(len
, GFP_KERNEL
);
1354 goto error_pre_alloc
;
1357 /* Prepare transfer descriptors */
1361 ret
= mhi_gen_tre(mhi_cntrl
, mhi_chan
, &info
, MHI_EOT
);
1364 goto error_pre_alloc
;
1368 read_lock_bh(&mhi_cntrl
->pm_lock
);
1369 if (MHI_DB_ACCESS_VALID(mhi_cntrl
)) {
1370 read_lock_irq(&mhi_chan
->lock
);
1371 mhi_ring_chan_db(mhi_cntrl
, mhi_chan
);
1372 read_unlock_irq(&mhi_chan
->lock
);
1374 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1377 mutex_unlock(&mhi_chan
->mutex
);
1379 dev_dbg(dev
, "Chan: %d successfully moved to start state\n",
1385 if (!mhi_chan
->offload_ch
)
1386 mhi_deinit_chan_ctxt(mhi_cntrl
, mhi_chan
);
1389 mutex_unlock(&mhi_chan
->mutex
);
1394 mutex_unlock(&mhi_chan
->mutex
);
1395 __mhi_unprepare_channel(mhi_cntrl
, mhi_chan
);
1400 static void mhi_mark_stale_events(struct mhi_controller
*mhi_cntrl
,
1401 struct mhi_event
*mhi_event
,
1402 struct mhi_event_ctxt
*er_ctxt
,
1406 struct mhi_tre
*dev_rp
, *local_rp
;
1407 struct mhi_ring
*ev_ring
;
1408 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1409 unsigned long flags
;
1411 dev_dbg(dev
, "Marking all events for chan: %d as stale\n", chan
);
1413 ev_ring
= &mhi_event
->ring
;
1415 /* mark all stale events related to channel as STALE event */
1416 spin_lock_irqsave(&mhi_event
->lock
, flags
);
1417 dev_rp
= mhi_to_virtual(ev_ring
, er_ctxt
->rp
);
1419 local_rp
= ev_ring
->rp
;
1420 while (dev_rp
!= local_rp
) {
1421 if (MHI_TRE_GET_EV_TYPE(local_rp
) == MHI_PKT_TYPE_TX_EVENT
&&
1422 chan
== MHI_TRE_GET_EV_CHID(local_rp
))
1423 local_rp
->dword
[1] = MHI_TRE_EV_DWORD1(chan
,
1424 MHI_PKT_TYPE_STALE_EVENT
);
1426 if (local_rp
== (ev_ring
->base
+ ev_ring
->len
))
1427 local_rp
= ev_ring
->base
;
1430 dev_dbg(dev
, "Finished marking events as stale events\n");
1431 spin_unlock_irqrestore(&mhi_event
->lock
, flags
);
1434 static void mhi_reset_data_chan(struct mhi_controller
*mhi_cntrl
,
1435 struct mhi_chan
*mhi_chan
)
1437 struct mhi_ring
*buf_ring
, *tre_ring
;
1438 struct mhi_result result
;
1440 /* Reset any pending buffers */
1441 buf_ring
= &mhi_chan
->buf_ring
;
1442 tre_ring
= &mhi_chan
->tre_ring
;
1443 result
.transaction_status
= -ENOTCONN
;
1444 result
.bytes_xferd
= 0;
1445 while (tre_ring
->rp
!= tre_ring
->wp
) {
1446 struct mhi_buf_info
*buf_info
= buf_ring
->rp
;
1448 if (mhi_chan
->dir
== DMA_TO_DEVICE
)
1449 atomic_dec(&mhi_cntrl
->pending_pkts
);
1451 if (!buf_info
->pre_mapped
)
1452 mhi_cntrl
->unmap_single(mhi_cntrl
, buf_info
);
1454 mhi_del_ring_element(mhi_cntrl
, buf_ring
);
1455 mhi_del_ring_element(mhi_cntrl
, tre_ring
);
1457 if (mhi_chan
->pre_alloc
) {
1458 kfree(buf_info
->cb_buf
);
1460 result
.buf_addr
= buf_info
->cb_buf
;
1461 mhi_chan
->xfer_cb(mhi_chan
->mhi_dev
, &result
);
1466 void mhi_reset_chan(struct mhi_controller
*mhi_cntrl
, struct mhi_chan
*mhi_chan
)
1468 struct mhi_event
*mhi_event
;
1469 struct mhi_event_ctxt
*er_ctxt
;
1470 int chan
= mhi_chan
->chan
;
1472 /* Nothing to reset, client doesn't queue buffers */
1473 if (mhi_chan
->offload_ch
)
1476 read_lock_bh(&mhi_cntrl
->pm_lock
);
1477 mhi_event
= &mhi_cntrl
->mhi_event
[mhi_chan
->er_index
];
1478 er_ctxt
= &mhi_cntrl
->mhi_ctxt
->er_ctxt
[mhi_chan
->er_index
];
1480 mhi_mark_stale_events(mhi_cntrl
, mhi_event
, er_ctxt
, chan
);
1482 mhi_reset_data_chan(mhi_cntrl
, mhi_chan
);
1484 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1487 /* Move channel to start state */
1488 int mhi_prepare_for_transfer(struct mhi_device
*mhi_dev
)
1491 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1492 struct mhi_chan
*mhi_chan
;
1494 for (dir
= 0; dir
< 2; dir
++) {
1495 mhi_chan
= dir
? mhi_dev
->dl_chan
: mhi_dev
->ul_chan
;
1499 ret
= mhi_prepare_channel(mhi_cntrl
, mhi_chan
);
1501 goto error_open_chan
;
1507 for (--dir
; dir
>= 0; dir
--) {
1508 mhi_chan
= dir
? mhi_dev
->dl_chan
: mhi_dev
->ul_chan
;
1512 __mhi_unprepare_channel(mhi_cntrl
, mhi_chan
);
1517 EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer
);
1519 void mhi_unprepare_from_transfer(struct mhi_device
*mhi_dev
)
1521 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1522 struct mhi_chan
*mhi_chan
;
1525 for (dir
= 0; dir
< 2; dir
++) {
1526 mhi_chan
= dir
? mhi_dev
->ul_chan
: mhi_dev
->dl_chan
;
1530 __mhi_unprepare_channel(mhi_cntrl
, mhi_chan
);
1533 EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer
);
1535 int mhi_poll(struct mhi_device
*mhi_dev
, u32 budget
)
1537 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1538 struct mhi_chan
*mhi_chan
= mhi_dev
->dl_chan
;
1539 struct mhi_event
*mhi_event
= &mhi_cntrl
->mhi_event
[mhi_chan
->er_index
];
1542 spin_lock_bh(&mhi_event
->lock
);
1543 ret
= mhi_event
->process_event(mhi_cntrl
, mhi_event
, budget
);
1544 spin_unlock_bh(&mhi_event
->lock
);
1548 EXPORT_SYMBOL_GPL(mhi_poll
);