1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/list.h>
13 #include <linux/mhi.h>
14 #include <linux/module.h>
15 #include <linux/skbuff.h>
16 #include <linux/slab.h>
20 int __must_check
mhi_read_reg(struct mhi_controller
*mhi_cntrl
,
21 void __iomem
*base
, u32 offset
, u32
*out
)
23 return mhi_cntrl
->read_reg(mhi_cntrl
, base
+ offset
, out
);
26 int __must_check
mhi_read_reg_field(struct mhi_controller
*mhi_cntrl
,
27 void __iomem
*base
, u32 offset
,
33 ret
= mhi_read_reg(mhi_cntrl
, base
, offset
, &tmp
);
37 *out
= (tmp
& mask
) >> __ffs(mask
);
42 int __must_check
mhi_poll_reg_field(struct mhi_controller
*mhi_cntrl
,
43 void __iomem
*base
, u32 offset
,
44 u32 mask
, u32 val
, u32 delayus
,
48 u32 out
, retry
= (timeout_ms
* 1000) / delayus
;
51 ret
= mhi_read_reg_field(mhi_cntrl
, base
, offset
, mask
, &out
);
64 void mhi_write_reg(struct mhi_controller
*mhi_cntrl
, void __iomem
*base
,
67 mhi_cntrl
->write_reg(mhi_cntrl
, base
+ offset
, val
);
70 int __must_check
mhi_write_reg_field(struct mhi_controller
*mhi_cntrl
,
71 void __iomem
*base
, u32 offset
, u32 mask
,
77 ret
= mhi_read_reg(mhi_cntrl
, base
, offset
, &tmp
);
82 tmp
|= (val
<< __ffs(mask
));
83 mhi_write_reg(mhi_cntrl
, base
, offset
, tmp
);
88 void mhi_write_db(struct mhi_controller
*mhi_cntrl
, void __iomem
*db_addr
,
91 mhi_write_reg(mhi_cntrl
, db_addr
, 4, upper_32_bits(db_val
));
92 mhi_write_reg(mhi_cntrl
, db_addr
, 0, lower_32_bits(db_val
));
95 void mhi_db_brstmode(struct mhi_controller
*mhi_cntrl
,
96 struct db_cfg
*db_cfg
,
97 void __iomem
*db_addr
,
100 if (db_cfg
->db_mode
) {
101 db_cfg
->db_val
= db_val
;
102 mhi_write_db(mhi_cntrl
, db_addr
, db_val
);
107 void mhi_db_brstmode_disable(struct mhi_controller
*mhi_cntrl
,
108 struct db_cfg
*db_cfg
,
109 void __iomem
*db_addr
,
112 db_cfg
->db_val
= db_val
;
113 mhi_write_db(mhi_cntrl
, db_addr
, db_val
);
116 void mhi_ring_er_db(struct mhi_event
*mhi_event
)
118 struct mhi_ring
*ring
= &mhi_event
->ring
;
120 mhi_event
->db_cfg
.process_db(mhi_event
->mhi_cntrl
, &mhi_event
->db_cfg
,
121 ring
->db_addr
, le64_to_cpu(*ring
->ctxt_wp
));
124 void mhi_ring_cmd_db(struct mhi_controller
*mhi_cntrl
, struct mhi_cmd
*mhi_cmd
)
127 struct mhi_ring
*ring
= &mhi_cmd
->ring
;
129 db
= ring
->iommu_base
+ (ring
->wp
- ring
->base
);
130 *ring
->ctxt_wp
= cpu_to_le64(db
);
131 mhi_write_db(mhi_cntrl
, ring
->db_addr
, db
);
134 void mhi_ring_chan_db(struct mhi_controller
*mhi_cntrl
,
135 struct mhi_chan
*mhi_chan
)
137 struct mhi_ring
*ring
= &mhi_chan
->tre_ring
;
140 db
= ring
->iommu_base
+ (ring
->wp
- ring
->base
);
143 * Writes to the new ring element must be visible to the hardware
144 * before letting h/w know there is new element to fetch.
147 *ring
->ctxt_wp
= cpu_to_le64(db
);
149 mhi_chan
->db_cfg
.process_db(mhi_cntrl
, &mhi_chan
->db_cfg
,
153 enum mhi_ee_type
mhi_get_exec_env(struct mhi_controller
*mhi_cntrl
)
156 int ret
= mhi_read_reg(mhi_cntrl
, mhi_cntrl
->bhi
, BHI_EXECENV
, &exec
);
158 return (ret
) ? MHI_EE_MAX
: exec
;
160 EXPORT_SYMBOL_GPL(mhi_get_exec_env
);
162 enum mhi_state
mhi_get_mhi_state(struct mhi_controller
*mhi_cntrl
)
165 int ret
= mhi_read_reg_field(mhi_cntrl
, mhi_cntrl
->regs
, MHISTATUS
,
166 MHISTATUS_MHISTATE_MASK
, &state
);
167 return ret
? MHI_STATE_MAX
: state
;
169 EXPORT_SYMBOL_GPL(mhi_get_mhi_state
);
171 void mhi_soc_reset(struct mhi_controller
*mhi_cntrl
)
173 if (mhi_cntrl
->reset
) {
174 mhi_cntrl
->reset(mhi_cntrl
);
178 /* Generic MHI SoC reset */
179 mhi_write_reg(mhi_cntrl
, mhi_cntrl
->regs
, MHI_SOC_RESET_REQ_OFFSET
,
182 EXPORT_SYMBOL_GPL(mhi_soc_reset
);
184 int mhi_map_single_no_bb(struct mhi_controller
*mhi_cntrl
,
185 struct mhi_buf_info
*buf_info
)
187 buf_info
->p_addr
= dma_map_single(mhi_cntrl
->cntrl_dev
,
188 buf_info
->v_addr
, buf_info
->len
,
190 if (dma_mapping_error(mhi_cntrl
->cntrl_dev
, buf_info
->p_addr
))
196 int mhi_map_single_use_bb(struct mhi_controller
*mhi_cntrl
,
197 struct mhi_buf_info
*buf_info
)
199 void *buf
= dma_alloc_coherent(mhi_cntrl
->cntrl_dev
, buf_info
->len
,
200 &buf_info
->p_addr
, GFP_ATOMIC
);
205 if (buf_info
->dir
== DMA_TO_DEVICE
)
206 memcpy(buf
, buf_info
->v_addr
, buf_info
->len
);
208 buf_info
->bb_addr
= buf
;
213 void mhi_unmap_single_no_bb(struct mhi_controller
*mhi_cntrl
,
214 struct mhi_buf_info
*buf_info
)
216 dma_unmap_single(mhi_cntrl
->cntrl_dev
, buf_info
->p_addr
, buf_info
->len
,
220 void mhi_unmap_single_use_bb(struct mhi_controller
*mhi_cntrl
,
221 struct mhi_buf_info
*buf_info
)
223 if (buf_info
->dir
== DMA_FROM_DEVICE
)
224 memcpy(buf_info
->v_addr
, buf_info
->bb_addr
, buf_info
->len
);
226 dma_free_coherent(mhi_cntrl
->cntrl_dev
, buf_info
->len
,
227 buf_info
->bb_addr
, buf_info
->p_addr
);
230 static int get_nr_avail_ring_elements(struct mhi_controller
*mhi_cntrl
,
231 struct mhi_ring
*ring
)
235 if (ring
->wp
< ring
->rp
) {
236 nr_el
= ((ring
->rp
- ring
->wp
) / ring
->el_size
) - 1;
238 nr_el
= (ring
->rp
- ring
->base
) / ring
->el_size
;
239 nr_el
+= ((ring
->base
+ ring
->len
- ring
->wp
) /
246 static void *mhi_to_virtual(struct mhi_ring
*ring
, dma_addr_t addr
)
248 return (addr
- ring
->iommu_base
) + ring
->base
;
251 static void mhi_add_ring_element(struct mhi_controller
*mhi_cntrl
,
252 struct mhi_ring
*ring
)
254 ring
->wp
+= ring
->el_size
;
255 if (ring
->wp
>= (ring
->base
+ ring
->len
))
256 ring
->wp
= ring
->base
;
261 static void mhi_del_ring_element(struct mhi_controller
*mhi_cntrl
,
262 struct mhi_ring
*ring
)
264 ring
->rp
+= ring
->el_size
;
265 if (ring
->rp
>= (ring
->base
+ ring
->len
))
266 ring
->rp
= ring
->base
;
271 static bool is_valid_ring_ptr(struct mhi_ring
*ring
, dma_addr_t addr
)
273 return addr
>= ring
->iommu_base
&& addr
< ring
->iommu_base
+ ring
->len
&&
274 !(addr
& (sizeof(struct mhi_ring_element
) - 1));
277 int mhi_destroy_device(struct device
*dev
, void *data
)
279 struct mhi_chan
*ul_chan
, *dl_chan
;
280 struct mhi_device
*mhi_dev
;
281 struct mhi_controller
*mhi_cntrl
;
282 enum mhi_ee_type ee
= MHI_EE_MAX
;
284 if (dev
->bus
!= &mhi_bus_type
)
287 mhi_dev
= to_mhi_device(dev
);
288 mhi_cntrl
= mhi_dev
->mhi_cntrl
;
290 /* Only destroy virtual devices thats attached to bus */
291 if (mhi_dev
->dev_type
== MHI_DEVICE_CONTROLLER
)
294 ul_chan
= mhi_dev
->ul_chan
;
295 dl_chan
= mhi_dev
->dl_chan
;
298 * If execution environment is specified, remove only those devices that
299 * started in them based on ee_mask for the channels as we move on to a
300 * different execution environment
303 ee
= *(enum mhi_ee_type
*)data
;
306 * For the suspend and resume case, this function will get called
307 * without mhi_unregister_controller(). Hence, we need to drop the
308 * references to mhi_dev created for ul and dl channels. We can
309 * be sure that there will be no instances of mhi_dev left after
313 if (ee
!= MHI_EE_MAX
&& !(ul_chan
->ee_mask
& BIT(ee
)))
316 put_device(&ul_chan
->mhi_dev
->dev
);
320 if (ee
!= MHI_EE_MAX
&& !(dl_chan
->ee_mask
& BIT(ee
)))
323 put_device(&dl_chan
->mhi_dev
->dev
);
326 dev_dbg(&mhi_cntrl
->mhi_dev
->dev
, "destroy device for chan:%s\n",
329 /* Notify the client and remove the device from MHI bus */
336 int mhi_get_free_desc_count(struct mhi_device
*mhi_dev
,
337 enum dma_data_direction dir
)
339 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
340 struct mhi_chan
*mhi_chan
= (dir
== DMA_TO_DEVICE
) ?
341 mhi_dev
->ul_chan
: mhi_dev
->dl_chan
;
342 struct mhi_ring
*tre_ring
= &mhi_chan
->tre_ring
;
344 return get_nr_avail_ring_elements(mhi_cntrl
, tre_ring
);
346 EXPORT_SYMBOL_GPL(mhi_get_free_desc_count
);
348 void mhi_notify(struct mhi_device
*mhi_dev
, enum mhi_callback cb_reason
)
350 struct mhi_driver
*mhi_drv
;
352 if (!mhi_dev
->dev
.driver
)
355 mhi_drv
= to_mhi_driver(mhi_dev
->dev
.driver
);
357 if (mhi_drv
->status_cb
)
358 mhi_drv
->status_cb(mhi_dev
, cb_reason
);
360 EXPORT_SYMBOL_GPL(mhi_notify
);
362 /* Bind MHI channels to MHI devices */
363 void mhi_create_devices(struct mhi_controller
*mhi_cntrl
)
365 struct mhi_chan
*mhi_chan
;
366 struct mhi_device
*mhi_dev
;
367 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
370 mhi_chan
= mhi_cntrl
->mhi_chan
;
371 for (i
= 0; i
< mhi_cntrl
->max_chan
; i
++, mhi_chan
++) {
372 if (!mhi_chan
->configured
|| mhi_chan
->mhi_dev
||
373 !(mhi_chan
->ee_mask
& BIT(mhi_cntrl
->ee
)))
375 mhi_dev
= mhi_alloc_device(mhi_cntrl
);
379 mhi_dev
->dev_type
= MHI_DEVICE_XFER
;
380 switch (mhi_chan
->dir
) {
382 mhi_dev
->ul_chan
= mhi_chan
;
383 mhi_dev
->ul_chan_id
= mhi_chan
->chan
;
385 case DMA_FROM_DEVICE
:
386 /* We use dl_chan as offload channels */
387 mhi_dev
->dl_chan
= mhi_chan
;
388 mhi_dev
->dl_chan_id
= mhi_chan
->chan
;
391 dev_err(dev
, "Direction not supported\n");
392 put_device(&mhi_dev
->dev
);
396 get_device(&mhi_dev
->dev
);
397 mhi_chan
->mhi_dev
= mhi_dev
;
399 /* Check next channel if it matches */
400 if ((i
+ 1) < mhi_cntrl
->max_chan
&& mhi_chan
[1].configured
) {
401 if (!strcmp(mhi_chan
[1].name
, mhi_chan
->name
)) {
404 if (mhi_chan
->dir
== DMA_TO_DEVICE
) {
405 mhi_dev
->ul_chan
= mhi_chan
;
406 mhi_dev
->ul_chan_id
= mhi_chan
->chan
;
408 mhi_dev
->dl_chan
= mhi_chan
;
409 mhi_dev
->dl_chan_id
= mhi_chan
->chan
;
411 get_device(&mhi_dev
->dev
);
412 mhi_chan
->mhi_dev
= mhi_dev
;
416 /* Channel name is same for both UL and DL */
417 mhi_dev
->name
= mhi_chan
->name
;
418 dev_set_name(&mhi_dev
->dev
, "%s_%s",
419 dev_name(&mhi_cntrl
->mhi_dev
->dev
),
422 /* Init wakeup source if available */
423 if (mhi_dev
->dl_chan
&& mhi_dev
->dl_chan
->wake_capable
)
424 device_init_wakeup(&mhi_dev
->dev
, true);
426 ret
= device_add(&mhi_dev
->dev
);
428 put_device(&mhi_dev
->dev
);
432 irqreturn_t
mhi_irq_handler(int irq_number
, void *dev
)
434 struct mhi_event
*mhi_event
= dev
;
435 struct mhi_controller
*mhi_cntrl
= mhi_event
->mhi_cntrl
;
436 struct mhi_event_ctxt
*er_ctxt
;
437 struct mhi_ring
*ev_ring
= &mhi_event
->ring
;
442 * If CONFIG_DEBUG_SHIRQ is set, the IRQ handler will get invoked during __free_irq()
443 * and by that time mhi_ctxt() would've freed. So check for the existence of mhi_ctxt
444 * before handling the IRQs.
446 if (!mhi_cntrl
->mhi_ctxt
) {
447 dev_dbg(&mhi_cntrl
->mhi_dev
->dev
,
448 "mhi_ctxt has been freed\n");
452 er_ctxt
= &mhi_cntrl
->mhi_ctxt
->er_ctxt
[mhi_event
->er_index
];
453 ptr
= le64_to_cpu(er_ctxt
->rp
);
455 if (!is_valid_ring_ptr(ev_ring
, ptr
)) {
456 dev_err(&mhi_cntrl
->mhi_dev
->dev
,
457 "Event ring rp points outside of the event ring\n");
461 dev_rp
= mhi_to_virtual(ev_ring
, ptr
);
463 /* Only proceed if event ring has pending events */
464 if (ev_ring
->rp
== dev_rp
)
467 /* For client managed event ring, notify pending data */
468 if (mhi_event
->cl_manage
) {
469 struct mhi_chan
*mhi_chan
= mhi_event
->mhi_chan
;
470 struct mhi_device
*mhi_dev
= mhi_chan
->mhi_dev
;
473 mhi_notify(mhi_dev
, MHI_CB_PENDING_DATA
);
475 tasklet_schedule(&mhi_event
->task
);
481 irqreturn_t
mhi_intvec_threaded_handler(int irq_number
, void *priv
)
483 struct mhi_controller
*mhi_cntrl
= priv
;
484 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
485 enum mhi_state state
;
486 enum mhi_pm_state pm_state
= 0;
489 write_lock_irq(&mhi_cntrl
->pm_lock
);
490 if (!MHI_REG_ACCESS_VALID(mhi_cntrl
->pm_state
)) {
491 write_unlock_irq(&mhi_cntrl
->pm_lock
);
495 state
= mhi_get_mhi_state(mhi_cntrl
);
496 ee
= mhi_get_exec_env(mhi_cntrl
);
498 trace_mhi_intvec_states(mhi_cntrl
, ee
, state
);
499 if (state
== MHI_STATE_SYS_ERR
) {
500 dev_dbg(dev
, "System error detected\n");
501 pm_state
= mhi_tryset_pm_state(mhi_cntrl
,
502 MHI_PM_SYS_ERR_DETECT
);
504 write_unlock_irq(&mhi_cntrl
->pm_lock
);
506 if (pm_state
!= MHI_PM_SYS_ERR_DETECT
)
511 /* proceed if power down is not already in progress */
512 if (mhi_cntrl
->rddm_image
&& mhi_is_active(mhi_cntrl
)) {
513 mhi_cntrl
->status_cb(mhi_cntrl
, MHI_CB_EE_RDDM
);
515 wake_up_all(&mhi_cntrl
->state_event
);
521 mhi_cntrl
->status_cb(mhi_cntrl
, MHI_CB_FATAL_ERROR
);
523 wake_up_all(&mhi_cntrl
->state_event
);
524 mhi_pm_sys_err_handler(mhi_cntrl
);
527 wake_up_all(&mhi_cntrl
->state_event
);
528 mhi_pm_sys_err_handler(mhi_cntrl
);
537 irqreturn_t
mhi_intvec_handler(int irq_number
, void *dev
)
539 struct mhi_controller
*mhi_cntrl
= dev
;
541 /* Wake up events waiting for state change */
542 wake_up_all(&mhi_cntrl
->state_event
);
544 return IRQ_WAKE_THREAD
;
547 static void mhi_recycle_ev_ring_element(struct mhi_controller
*mhi_cntrl
,
548 struct mhi_ring
*ring
)
551 ring
->wp
+= ring
->el_size
;
553 if (ring
->wp
>= (ring
->base
+ ring
->len
))
554 ring
->wp
= ring
->base
;
556 *ring
->ctxt_wp
= cpu_to_le64(ring
->iommu_base
+ (ring
->wp
- ring
->base
));
559 ring
->rp
+= ring
->el_size
;
560 if (ring
->rp
>= (ring
->base
+ ring
->len
))
561 ring
->rp
= ring
->base
;
563 /* Update to all cores */
567 static int parse_xfer_event(struct mhi_controller
*mhi_cntrl
,
568 struct mhi_ring_element
*event
,
569 struct mhi_chan
*mhi_chan
)
571 struct mhi_ring
*buf_ring
, *tre_ring
;
572 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
573 struct mhi_result result
;
574 unsigned long flags
= 0;
577 ev_code
= MHI_TRE_GET_EV_CODE(event
);
578 buf_ring
= &mhi_chan
->buf_ring
;
579 tre_ring
= &mhi_chan
->tre_ring
;
581 result
.transaction_status
= (ev_code
== MHI_EV_CC_OVERFLOW
) ?
585 * If it's a DB Event then we need to grab the lock
586 * with preemption disabled and as a write because we
587 * have to update db register and there are chances that
588 * another thread could be doing the same.
590 if (ev_code
>= MHI_EV_CC_OOB
)
591 write_lock_irqsave(&mhi_chan
->lock
, flags
);
593 read_lock_bh(&mhi_chan
->lock
);
595 if (mhi_chan
->ch_state
!= MHI_CH_STATE_ENABLED
)
596 goto end_process_tx_event
;
599 case MHI_EV_CC_OVERFLOW
:
603 dma_addr_t ptr
= MHI_TRE_GET_EV_PTR(event
);
604 struct mhi_ring_element
*local_rp
, *ev_tre
;
606 struct mhi_buf_info
*buf_info
;
609 if (!is_valid_ring_ptr(tre_ring
, ptr
)) {
610 dev_err(&mhi_cntrl
->mhi_dev
->dev
,
611 "Event element points outside of the tre ring\n");
614 /* Get the TRB this event points to */
615 ev_tre
= mhi_to_virtual(tre_ring
, ptr
);
618 if (dev_rp
>= (tre_ring
->base
+ tre_ring
->len
))
619 dev_rp
= tre_ring
->base
;
621 result
.dir
= mhi_chan
->dir
;
623 local_rp
= tre_ring
->rp
;
624 while (local_rp
!= dev_rp
) {
625 buf_info
= buf_ring
->rp
;
626 /* If it's the last TRE, get length from the event */
627 if (local_rp
== ev_tre
)
628 xfer_len
= MHI_TRE_GET_EV_LEN(event
);
630 xfer_len
= buf_info
->len
;
632 /* Unmap if it's not pre-mapped by client */
633 if (likely(!buf_info
->pre_mapped
))
634 mhi_cntrl
->unmap_single(mhi_cntrl
, buf_info
);
636 result
.buf_addr
= buf_info
->cb_buf
;
638 /* truncate to buf len if xfer_len is larger */
640 min_t(u16
, xfer_len
, buf_info
->len
);
641 mhi_del_ring_element(mhi_cntrl
, buf_ring
);
642 mhi_del_ring_element(mhi_cntrl
, tre_ring
);
643 local_rp
= tre_ring
->rp
;
645 read_unlock_bh(&mhi_chan
->lock
);
648 mhi_chan
->xfer_cb(mhi_chan
->mhi_dev
, &result
);
650 if (mhi_chan
->dir
== DMA_TO_DEVICE
) {
651 atomic_dec(&mhi_cntrl
->pending_pkts
);
652 /* Release the reference got from mhi_queue() */
653 mhi_cntrl
->runtime_put(mhi_cntrl
);
657 * Recycle the buffer if buffer is pre-allocated,
658 * if there is an error, not much we can do apart
659 * from dropping the packet
661 if (mhi_chan
->pre_alloc
) {
662 if (mhi_queue_buf(mhi_chan
->mhi_dev
,
665 buf_info
->len
, MHI_EOT
)) {
667 "Error recycling buffer for chan:%d\n",
669 kfree(buf_info
->cb_buf
);
673 read_lock_bh(&mhi_chan
->lock
);
678 case MHI_EV_CC_DB_MODE
:
680 unsigned long pm_lock_flags
;
682 mhi_chan
->db_cfg
.db_mode
= 1;
683 read_lock_irqsave(&mhi_cntrl
->pm_lock
, pm_lock_flags
);
684 if (tre_ring
->wp
!= tre_ring
->rp
&&
685 MHI_DB_ACCESS_VALID(mhi_cntrl
)) {
686 mhi_ring_chan_db(mhi_cntrl
, mhi_chan
);
688 read_unlock_irqrestore(&mhi_cntrl
->pm_lock
, pm_lock_flags
);
691 case MHI_EV_CC_BAD_TRE
:
693 dev_err(dev
, "Unknown event 0x%x\n", ev_code
);
695 } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
697 end_process_tx_event
:
698 if (ev_code
>= MHI_EV_CC_OOB
)
699 write_unlock_irqrestore(&mhi_chan
->lock
, flags
);
701 read_unlock_bh(&mhi_chan
->lock
);
706 static int parse_rsc_event(struct mhi_controller
*mhi_cntrl
,
707 struct mhi_ring_element
*event
,
708 struct mhi_chan
*mhi_chan
)
710 struct mhi_ring
*buf_ring
, *tre_ring
;
711 struct mhi_buf_info
*buf_info
;
712 struct mhi_result result
;
714 u32 cookie
; /* offset to local descriptor */
717 buf_ring
= &mhi_chan
->buf_ring
;
718 tre_ring
= &mhi_chan
->tre_ring
;
720 ev_code
= MHI_TRE_GET_EV_CODE(event
);
721 cookie
= MHI_TRE_GET_EV_COOKIE(event
);
722 xfer_len
= MHI_TRE_GET_EV_LEN(event
);
724 /* Received out of bound cookie */
725 WARN_ON(cookie
>= buf_ring
->len
);
727 buf_info
= buf_ring
->base
+ cookie
;
729 result
.transaction_status
= (ev_code
== MHI_EV_CC_OVERFLOW
) ?
732 /* truncate to buf len if xfer_len is larger */
733 result
.bytes_xferd
= min_t(u16
, xfer_len
, buf_info
->len
);
734 result
.buf_addr
= buf_info
->cb_buf
;
735 result
.dir
= mhi_chan
->dir
;
737 read_lock_bh(&mhi_chan
->lock
);
739 if (mhi_chan
->ch_state
!= MHI_CH_STATE_ENABLED
)
740 goto end_process_rsc_event
;
742 WARN_ON(!buf_info
->used
);
744 /* notify the client */
745 mhi_chan
->xfer_cb(mhi_chan
->mhi_dev
, &result
);
748 * Note: We're arbitrarily incrementing RP even though, completion
749 * packet we processed might not be the same one, reason we can do this
750 * is because device guaranteed to cache descriptors in order it
751 * receive, so even though completion event is different we can re-use
752 * all descriptors in between.
754 * Transfer Ring has descriptors: A, B, C, D
755 * Last descriptor host queue is D (WP) and first descriptor
756 * host queue is A (RP).
757 * The completion event we just serviced is descriptor C.
758 * Then we can safely queue descriptors to replace A, B, and C
759 * even though host did not receive any completions.
761 mhi_del_ring_element(mhi_cntrl
, tre_ring
);
762 buf_info
->used
= false;
764 end_process_rsc_event
:
765 read_unlock_bh(&mhi_chan
->lock
);
770 static void mhi_process_cmd_completion(struct mhi_controller
*mhi_cntrl
,
771 struct mhi_ring_element
*tre
)
773 dma_addr_t ptr
= MHI_TRE_GET_EV_PTR(tre
);
774 struct mhi_cmd
*cmd_ring
= &mhi_cntrl
->mhi_cmd
[PRIMARY_CMD_RING
];
775 struct mhi_ring
*mhi_ring
= &cmd_ring
->ring
;
776 struct mhi_ring_element
*cmd_pkt
;
777 struct mhi_chan
*mhi_chan
;
780 if (!is_valid_ring_ptr(mhi_ring
, ptr
)) {
781 dev_err(&mhi_cntrl
->mhi_dev
->dev
,
782 "Event element points outside of the cmd ring\n");
786 cmd_pkt
= mhi_to_virtual(mhi_ring
, ptr
);
788 chan
= MHI_TRE_GET_CMD_CHID(cmd_pkt
);
790 if (chan
< mhi_cntrl
->max_chan
&&
791 mhi_cntrl
->mhi_chan
[chan
].configured
) {
792 mhi_chan
= &mhi_cntrl
->mhi_chan
[chan
];
793 write_lock_bh(&mhi_chan
->lock
);
794 mhi_chan
->ccs
= MHI_TRE_GET_EV_CODE(tre
);
795 complete(&mhi_chan
->completion
);
796 write_unlock_bh(&mhi_chan
->lock
);
798 dev_err(&mhi_cntrl
->mhi_dev
->dev
,
799 "Completion packet for invalid channel ID: %d\n", chan
);
802 mhi_del_ring_element(mhi_cntrl
, mhi_ring
);
805 int mhi_process_ctrl_ev_ring(struct mhi_controller
*mhi_cntrl
,
806 struct mhi_event
*mhi_event
,
809 struct mhi_ring_element
*dev_rp
, *local_rp
;
810 struct mhi_ring
*ev_ring
= &mhi_event
->ring
;
811 struct mhi_event_ctxt
*er_ctxt
=
812 &mhi_cntrl
->mhi_ctxt
->er_ctxt
[mhi_event
->er_index
];
813 struct mhi_chan
*mhi_chan
;
814 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
817 dma_addr_t ptr
= le64_to_cpu(er_ctxt
->rp
);
820 * This is a quick check to avoid unnecessary event processing
821 * in case MHI is already in error state, but it's still possible
822 * to transition to error state while processing events
824 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl
->pm_state
)))
827 if (!is_valid_ring_ptr(ev_ring
, ptr
)) {
828 dev_err(&mhi_cntrl
->mhi_dev
->dev
,
829 "Event ring rp points outside of the event ring\n");
833 dev_rp
= mhi_to_virtual(ev_ring
, ptr
);
834 local_rp
= ev_ring
->rp
;
836 while (dev_rp
!= local_rp
) {
837 enum mhi_pkt_type type
= MHI_TRE_GET_EV_TYPE(local_rp
);
839 trace_mhi_ctrl_event(mhi_cntrl
, local_rp
);
842 case MHI_PKT_TYPE_BW_REQ_EVENT
:
844 struct mhi_link_info
*link_info
;
846 link_info
= &mhi_cntrl
->mhi_link_info
;
847 write_lock_irq(&mhi_cntrl
->pm_lock
);
848 link_info
->target_link_speed
=
849 MHI_TRE_GET_EV_LINKSPEED(local_rp
);
850 link_info
->target_link_width
=
851 MHI_TRE_GET_EV_LINKWIDTH(local_rp
);
852 write_unlock_irq(&mhi_cntrl
->pm_lock
);
853 dev_dbg(dev
, "Received BW_REQ event\n");
854 mhi_cntrl
->status_cb(mhi_cntrl
, MHI_CB_BW_REQ
);
857 case MHI_PKT_TYPE_STATE_CHANGE_EVENT
:
859 enum mhi_state new_state
;
861 new_state
= MHI_TRE_GET_EV_STATE(local_rp
);
863 dev_dbg(dev
, "State change event to state: %s\n",
864 mhi_state_str(new_state
));
868 mhi_pm_m0_transition(mhi_cntrl
);
871 mhi_pm_m1_transition(mhi_cntrl
);
874 mhi_pm_m3_transition(mhi_cntrl
);
876 case MHI_STATE_SYS_ERR
:
878 enum mhi_pm_state pm_state
;
880 dev_dbg(dev
, "System error detected\n");
881 write_lock_irq(&mhi_cntrl
->pm_lock
);
882 pm_state
= mhi_tryset_pm_state(mhi_cntrl
,
883 MHI_PM_SYS_ERR_DETECT
);
884 write_unlock_irq(&mhi_cntrl
->pm_lock
);
885 if (pm_state
== MHI_PM_SYS_ERR_DETECT
)
886 mhi_pm_sys_err_handler(mhi_cntrl
);
890 dev_err(dev
, "Invalid state: %s\n",
891 mhi_state_str(new_state
));
896 case MHI_PKT_TYPE_CMD_COMPLETION_EVENT
:
897 mhi_process_cmd_completion(mhi_cntrl
, local_rp
);
899 case MHI_PKT_TYPE_EE_EVENT
:
901 enum dev_st_transition st
= DEV_ST_TRANSITION_MAX
;
902 enum mhi_ee_type event
= MHI_TRE_GET_EV_EXECENV(local_rp
);
904 dev_dbg(dev
, "Received EE event: %s\n",
905 TO_MHI_EXEC_STR(event
));
908 st
= DEV_ST_TRANSITION_SBL
;
912 st
= DEV_ST_TRANSITION_MISSION_MODE
;
915 st
= DEV_ST_TRANSITION_FP
;
918 mhi_cntrl
->status_cb(mhi_cntrl
, MHI_CB_EE_RDDM
);
919 write_lock_irq(&mhi_cntrl
->pm_lock
);
920 mhi_cntrl
->ee
= event
;
921 write_unlock_irq(&mhi_cntrl
->pm_lock
);
922 wake_up_all(&mhi_cntrl
->state_event
);
926 "Unhandled EE event: 0x%x\n", type
);
928 if (st
!= DEV_ST_TRANSITION_MAX
)
929 mhi_queue_state_transition(mhi_cntrl
, st
);
933 case MHI_PKT_TYPE_TX_EVENT
:
934 chan
= MHI_TRE_GET_EV_CHID(local_rp
);
936 WARN_ON(chan
>= mhi_cntrl
->max_chan
);
939 * Only process the event ring elements whose channel
940 * ID is within the maximum supported range.
942 if (chan
< mhi_cntrl
->max_chan
) {
943 mhi_chan
= &mhi_cntrl
->mhi_chan
[chan
];
944 if (!mhi_chan
->configured
)
946 parse_xfer_event(mhi_cntrl
, local_rp
, mhi_chan
);
950 dev_err(dev
, "Unhandled event type: %d\n", type
);
954 mhi_recycle_ev_ring_element(mhi_cntrl
, ev_ring
);
955 local_rp
= ev_ring
->rp
;
957 ptr
= le64_to_cpu(er_ctxt
->rp
);
958 if (!is_valid_ring_ptr(ev_ring
, ptr
)) {
959 dev_err(&mhi_cntrl
->mhi_dev
->dev
,
960 "Event ring rp points outside of the event ring\n");
964 dev_rp
= mhi_to_virtual(ev_ring
, ptr
);
968 read_lock_bh(&mhi_cntrl
->pm_lock
);
970 /* Ring EV DB only if there is any pending element to process */
971 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl
)) && count
)
972 mhi_ring_er_db(mhi_event
);
973 read_unlock_bh(&mhi_cntrl
->pm_lock
);
978 int mhi_process_data_event_ring(struct mhi_controller
*mhi_cntrl
,
979 struct mhi_event
*mhi_event
,
982 struct mhi_ring_element
*dev_rp
, *local_rp
;
983 struct mhi_ring
*ev_ring
= &mhi_event
->ring
;
984 struct mhi_event_ctxt
*er_ctxt
=
985 &mhi_cntrl
->mhi_ctxt
->er_ctxt
[mhi_event
->er_index
];
988 struct mhi_chan
*mhi_chan
;
989 dma_addr_t ptr
= le64_to_cpu(er_ctxt
->rp
);
991 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl
->pm_state
)))
994 if (!is_valid_ring_ptr(ev_ring
, ptr
)) {
995 dev_err(&mhi_cntrl
->mhi_dev
->dev
,
996 "Event ring rp points outside of the event ring\n");
1000 dev_rp
= mhi_to_virtual(ev_ring
, ptr
);
1001 local_rp
= ev_ring
->rp
;
1003 while (dev_rp
!= local_rp
&& event_quota
> 0) {
1004 enum mhi_pkt_type type
= MHI_TRE_GET_EV_TYPE(local_rp
);
1006 trace_mhi_data_event(mhi_cntrl
, local_rp
);
1008 chan
= MHI_TRE_GET_EV_CHID(local_rp
);
1010 WARN_ON(chan
>= mhi_cntrl
->max_chan
);
1013 * Only process the event ring elements whose channel
1014 * ID is within the maximum supported range.
1016 if (chan
< mhi_cntrl
->max_chan
&&
1017 mhi_cntrl
->mhi_chan
[chan
].configured
) {
1018 mhi_chan
= &mhi_cntrl
->mhi_chan
[chan
];
1020 if (likely(type
== MHI_PKT_TYPE_TX_EVENT
)) {
1021 parse_xfer_event(mhi_cntrl
, local_rp
, mhi_chan
);
1023 } else if (type
== MHI_PKT_TYPE_RSC_TX_EVENT
) {
1024 parse_rsc_event(mhi_cntrl
, local_rp
, mhi_chan
);
1029 mhi_recycle_ev_ring_element(mhi_cntrl
, ev_ring
);
1030 local_rp
= ev_ring
->rp
;
1032 ptr
= le64_to_cpu(er_ctxt
->rp
);
1033 if (!is_valid_ring_ptr(ev_ring
, ptr
)) {
1034 dev_err(&mhi_cntrl
->mhi_dev
->dev
,
1035 "Event ring rp points outside of the event ring\n");
1039 dev_rp
= mhi_to_virtual(ev_ring
, ptr
);
1042 read_lock_bh(&mhi_cntrl
->pm_lock
);
1044 /* Ring EV DB only if there is any pending element to process */
1045 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl
)) && count
)
1046 mhi_ring_er_db(mhi_event
);
1047 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1052 void mhi_ev_task(unsigned long data
)
1054 struct mhi_event
*mhi_event
= (struct mhi_event
*)data
;
1055 struct mhi_controller
*mhi_cntrl
= mhi_event
->mhi_cntrl
;
1057 /* process all pending events */
1058 spin_lock_bh(&mhi_event
->lock
);
1059 mhi_event
->process_event(mhi_cntrl
, mhi_event
, U32_MAX
);
1060 spin_unlock_bh(&mhi_event
->lock
);
1063 void mhi_ctrl_ev_task(unsigned long data
)
1065 struct mhi_event
*mhi_event
= (struct mhi_event
*)data
;
1066 struct mhi_controller
*mhi_cntrl
= mhi_event
->mhi_cntrl
;
1067 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1068 enum mhi_state state
;
1069 enum mhi_pm_state pm_state
= 0;
1073 * We can check PM state w/o a lock here because there is no way
1074 * PM state can change from reg access valid to no access while this
1075 * thread being executed.
1077 if (!MHI_REG_ACCESS_VALID(mhi_cntrl
->pm_state
)) {
1079 * We may have a pending event but not allowed to
1080 * process it since we are probably in a suspended state,
1081 * so trigger a resume.
1083 mhi_trigger_resume(mhi_cntrl
);
1088 /* Process ctrl events */
1089 ret
= mhi_event
->process_event(mhi_cntrl
, mhi_event
, U32_MAX
);
1092 * We received an IRQ but no events to process, maybe device went to
1093 * SYS_ERR state? Check the state to confirm.
1096 write_lock_irq(&mhi_cntrl
->pm_lock
);
1097 state
= mhi_get_mhi_state(mhi_cntrl
);
1098 if (state
== MHI_STATE_SYS_ERR
) {
1099 dev_dbg(dev
, "System error detected\n");
1100 pm_state
= mhi_tryset_pm_state(mhi_cntrl
,
1101 MHI_PM_SYS_ERR_DETECT
);
1103 write_unlock_irq(&mhi_cntrl
->pm_lock
);
1104 if (pm_state
== MHI_PM_SYS_ERR_DETECT
)
1105 mhi_pm_sys_err_handler(mhi_cntrl
);
1109 static bool mhi_is_ring_full(struct mhi_controller
*mhi_cntrl
,
1110 struct mhi_ring
*ring
)
1112 void *tmp
= ring
->wp
+ ring
->el_size
;
1114 if (tmp
>= (ring
->base
+ ring
->len
))
1117 return (tmp
== ring
->rp
);
1120 static int mhi_queue(struct mhi_device
*mhi_dev
, struct mhi_buf_info
*buf_info
,
1121 enum dma_data_direction dir
, enum mhi_flags mflags
)
1123 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1124 struct mhi_chan
*mhi_chan
= (dir
== DMA_TO_DEVICE
) ? mhi_dev
->ul_chan
:
1126 struct mhi_ring
*tre_ring
= &mhi_chan
->tre_ring
;
1127 unsigned long flags
;
1130 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
)))
1133 ret
= mhi_is_ring_full(mhi_cntrl
, tre_ring
);
1137 ret
= mhi_gen_tre(mhi_cntrl
, mhi_chan
, buf_info
, mflags
);
1141 read_lock_irqsave(&mhi_cntrl
->pm_lock
, flags
);
1143 /* Packet is queued, take a usage ref to exit M3 if necessary
1144 * for host->device buffer, balanced put is done on buffer completion
1145 * for device->host buffer, balanced put is after ringing the DB
1147 mhi_cntrl
->runtime_get(mhi_cntrl
);
1149 /* Assert dev_wake (to exit/prevent M1/M2)*/
1150 mhi_cntrl
->wake_toggle(mhi_cntrl
);
1152 if (mhi_chan
->dir
== DMA_TO_DEVICE
)
1153 atomic_inc(&mhi_cntrl
->pending_pkts
);
1155 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl
)))
1156 mhi_ring_chan_db(mhi_cntrl
, mhi_chan
);
1158 if (dir
== DMA_FROM_DEVICE
)
1159 mhi_cntrl
->runtime_put(mhi_cntrl
);
1161 read_unlock_irqrestore(&mhi_cntrl
->pm_lock
, flags
);
1166 int mhi_queue_skb(struct mhi_device
*mhi_dev
, enum dma_data_direction dir
,
1167 struct sk_buff
*skb
, size_t len
, enum mhi_flags mflags
)
1169 struct mhi_chan
*mhi_chan
= (dir
== DMA_TO_DEVICE
) ? mhi_dev
->ul_chan
:
1171 struct mhi_buf_info buf_info
= { };
1173 buf_info
.v_addr
= skb
->data
;
1174 buf_info
.cb_buf
= skb
;
1177 if (unlikely(mhi_chan
->pre_alloc
))
1180 return mhi_queue(mhi_dev
, &buf_info
, dir
, mflags
);
1182 EXPORT_SYMBOL_GPL(mhi_queue_skb
);
1184 int mhi_queue_dma(struct mhi_device
*mhi_dev
, enum dma_data_direction dir
,
1185 struct mhi_buf
*mhi_buf
, size_t len
, enum mhi_flags mflags
)
1187 struct mhi_chan
*mhi_chan
= (dir
== DMA_TO_DEVICE
) ? mhi_dev
->ul_chan
:
1189 struct mhi_buf_info buf_info
= { };
1191 buf_info
.p_addr
= mhi_buf
->dma_addr
;
1192 buf_info
.cb_buf
= mhi_buf
;
1193 buf_info
.pre_mapped
= true;
1196 if (unlikely(mhi_chan
->pre_alloc
))
1199 return mhi_queue(mhi_dev
, &buf_info
, dir
, mflags
);
1201 EXPORT_SYMBOL_GPL(mhi_queue_dma
);
1203 int mhi_gen_tre(struct mhi_controller
*mhi_cntrl
, struct mhi_chan
*mhi_chan
,
1204 struct mhi_buf_info
*info
, enum mhi_flags flags
)
1206 struct mhi_ring
*buf_ring
, *tre_ring
;
1207 struct mhi_ring_element
*mhi_tre
;
1208 struct mhi_buf_info
*buf_info
;
1209 int eot
, eob
, chain
, bei
;
1212 /* Protect accesses for reading and incrementing WP */
1213 write_lock_bh(&mhi_chan
->lock
);
1215 buf_ring
= &mhi_chan
->buf_ring
;
1216 tre_ring
= &mhi_chan
->tre_ring
;
1218 buf_info
= buf_ring
->wp
;
1219 WARN_ON(buf_info
->used
);
1220 buf_info
->pre_mapped
= info
->pre_mapped
;
1221 if (info
->pre_mapped
)
1222 buf_info
->p_addr
= info
->p_addr
;
1224 buf_info
->v_addr
= info
->v_addr
;
1225 buf_info
->cb_buf
= info
->cb_buf
;
1226 buf_info
->wp
= tre_ring
->wp
;
1227 buf_info
->dir
= mhi_chan
->dir
;
1228 buf_info
->len
= info
->len
;
1230 if (!info
->pre_mapped
) {
1231 ret
= mhi_cntrl
->map_single(mhi_cntrl
, buf_info
);
1233 write_unlock_bh(&mhi_chan
->lock
);
1238 eob
= !!(flags
& MHI_EOB
);
1239 eot
= !!(flags
& MHI_EOT
);
1240 chain
= !!(flags
& MHI_CHAIN
);
1241 bei
= !!(mhi_chan
->intmod
);
1243 mhi_tre
= tre_ring
->wp
;
1244 mhi_tre
->ptr
= MHI_TRE_DATA_PTR(buf_info
->p_addr
);
1245 mhi_tre
->dword
[0] = MHI_TRE_DATA_DWORD0(info
->len
);
1246 mhi_tre
->dword
[1] = MHI_TRE_DATA_DWORD1(bei
, eot
, eob
, chain
);
1248 trace_mhi_gen_tre(mhi_cntrl
, mhi_chan
, mhi_tre
);
1250 mhi_add_ring_element(mhi_cntrl
, tre_ring
);
1251 mhi_add_ring_element(mhi_cntrl
, buf_ring
);
1253 write_unlock_bh(&mhi_chan
->lock
);
1258 int mhi_queue_buf(struct mhi_device
*mhi_dev
, enum dma_data_direction dir
,
1259 void *buf
, size_t len
, enum mhi_flags mflags
)
1261 struct mhi_buf_info buf_info
= { };
1263 buf_info
.v_addr
= buf
;
1264 buf_info
.cb_buf
= buf
;
1267 return mhi_queue(mhi_dev
, &buf_info
, dir
, mflags
);
1269 EXPORT_SYMBOL_GPL(mhi_queue_buf
);
1271 bool mhi_queue_is_full(struct mhi_device
*mhi_dev
, enum dma_data_direction dir
)
1273 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1274 struct mhi_chan
*mhi_chan
= (dir
== DMA_TO_DEVICE
) ?
1275 mhi_dev
->ul_chan
: mhi_dev
->dl_chan
;
1276 struct mhi_ring
*tre_ring
= &mhi_chan
->tre_ring
;
1278 return mhi_is_ring_full(mhi_cntrl
, tre_ring
);
1280 EXPORT_SYMBOL_GPL(mhi_queue_is_full
);
1282 int mhi_send_cmd(struct mhi_controller
*mhi_cntrl
,
1283 struct mhi_chan
*mhi_chan
,
1284 enum mhi_cmd_type cmd
)
1286 struct mhi_ring_element
*cmd_tre
= NULL
;
1287 struct mhi_cmd
*mhi_cmd
= &mhi_cntrl
->mhi_cmd
[PRIMARY_CMD_RING
];
1288 struct mhi_ring
*ring
= &mhi_cmd
->ring
;
1289 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1293 chan
= mhi_chan
->chan
;
1295 spin_lock_bh(&mhi_cmd
->lock
);
1296 if (!get_nr_avail_ring_elements(mhi_cntrl
, ring
)) {
1297 spin_unlock_bh(&mhi_cmd
->lock
);
1301 /* prepare the cmd tre */
1304 case MHI_CMD_RESET_CHAN
:
1305 cmd_tre
->ptr
= MHI_TRE_CMD_RESET_PTR
;
1306 cmd_tre
->dword
[0] = MHI_TRE_CMD_RESET_DWORD0
;
1307 cmd_tre
->dword
[1] = MHI_TRE_CMD_RESET_DWORD1(chan
);
1309 case MHI_CMD_STOP_CHAN
:
1310 cmd_tre
->ptr
= MHI_TRE_CMD_STOP_PTR
;
1311 cmd_tre
->dword
[0] = MHI_TRE_CMD_STOP_DWORD0
;
1312 cmd_tre
->dword
[1] = MHI_TRE_CMD_STOP_DWORD1(chan
);
1314 case MHI_CMD_START_CHAN
:
1315 cmd_tre
->ptr
= MHI_TRE_CMD_START_PTR
;
1316 cmd_tre
->dword
[0] = MHI_TRE_CMD_START_DWORD0
;
1317 cmd_tre
->dword
[1] = MHI_TRE_CMD_START_DWORD1(chan
);
1320 dev_err(dev
, "Command not supported\n");
1324 /* queue to hardware */
1325 mhi_add_ring_element(mhi_cntrl
, ring
);
1326 read_lock_bh(&mhi_cntrl
->pm_lock
);
1327 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl
)))
1328 mhi_ring_cmd_db(mhi_cntrl
, mhi_cmd
);
1329 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1330 spin_unlock_bh(&mhi_cmd
->lock
);
1335 static int mhi_update_channel_state(struct mhi_controller
*mhi_cntrl
,
1336 struct mhi_chan
*mhi_chan
,
1337 enum mhi_ch_state_type to_state
)
1339 struct device
*dev
= &mhi_chan
->mhi_dev
->dev
;
1340 enum mhi_cmd_type cmd
= MHI_CMD_NOP
;
1343 trace_mhi_channel_command_start(mhi_cntrl
, mhi_chan
, to_state
, TPS("Updating"));
1345 case MHI_CH_STATE_TYPE_RESET
:
1346 write_lock_irq(&mhi_chan
->lock
);
1347 if (mhi_chan
->ch_state
!= MHI_CH_STATE_STOP
&&
1348 mhi_chan
->ch_state
!= MHI_CH_STATE_ENABLED
&&
1349 mhi_chan
->ch_state
!= MHI_CH_STATE_SUSPENDED
) {
1350 write_unlock_irq(&mhi_chan
->lock
);
1353 mhi_chan
->ch_state
= MHI_CH_STATE_DISABLED
;
1354 write_unlock_irq(&mhi_chan
->lock
);
1356 cmd
= MHI_CMD_RESET_CHAN
;
1358 case MHI_CH_STATE_TYPE_STOP
:
1359 if (mhi_chan
->ch_state
!= MHI_CH_STATE_ENABLED
)
1362 cmd
= MHI_CMD_STOP_CHAN
;
1364 case MHI_CH_STATE_TYPE_START
:
1365 if (mhi_chan
->ch_state
!= MHI_CH_STATE_STOP
&&
1366 mhi_chan
->ch_state
!= MHI_CH_STATE_DISABLED
)
1369 cmd
= MHI_CMD_START_CHAN
;
1372 dev_err(dev
, "%d: Channel state update to %s not allowed\n",
1373 mhi_chan
->chan
, TO_CH_STATE_TYPE_STR(to_state
));
1377 /* bring host and device out of suspended states */
1378 ret
= mhi_device_get_sync(mhi_cntrl
->mhi_dev
);
1381 mhi_cntrl
->runtime_get(mhi_cntrl
);
1383 reinit_completion(&mhi_chan
->completion
);
1384 ret
= mhi_send_cmd(mhi_cntrl
, mhi_chan
, cmd
);
1386 dev_err(dev
, "%d: Failed to send %s channel command\n",
1387 mhi_chan
->chan
, TO_CH_STATE_TYPE_STR(to_state
));
1388 goto exit_channel_update
;
1391 ret
= wait_for_completion_timeout(&mhi_chan
->completion
,
1392 msecs_to_jiffies(mhi_cntrl
->timeout_ms
));
1393 if (!ret
|| mhi_chan
->ccs
!= MHI_EV_CC_SUCCESS
) {
1395 "%d: Failed to receive %s channel command completion\n",
1396 mhi_chan
->chan
, TO_CH_STATE_TYPE_STR(to_state
));
1398 goto exit_channel_update
;
1403 if (to_state
!= MHI_CH_STATE_TYPE_RESET
) {
1404 write_lock_irq(&mhi_chan
->lock
);
1405 mhi_chan
->ch_state
= (to_state
== MHI_CH_STATE_TYPE_START
) ?
1406 MHI_CH_STATE_ENABLED
: MHI_CH_STATE_STOP
;
1407 write_unlock_irq(&mhi_chan
->lock
);
1410 trace_mhi_channel_command_end(mhi_cntrl
, mhi_chan
, to_state
, TPS("Updated"));
1411 exit_channel_update
:
1412 mhi_cntrl
->runtime_put(mhi_cntrl
);
1413 mhi_device_put(mhi_cntrl
->mhi_dev
);
1418 static void mhi_unprepare_channel(struct mhi_controller
*mhi_cntrl
,
1419 struct mhi_chan
*mhi_chan
)
1422 struct device
*dev
= &mhi_chan
->mhi_dev
->dev
;
1424 mutex_lock(&mhi_chan
->mutex
);
1426 if (!(BIT(mhi_cntrl
->ee
) & mhi_chan
->ee_mask
)) {
1427 dev_dbg(dev
, "Current EE: %s Required EE Mask: 0x%x\n",
1428 TO_MHI_EXEC_STR(mhi_cntrl
->ee
), mhi_chan
->ee_mask
);
1429 goto exit_unprepare_channel
;
1432 /* no more processing events for this channel */
1433 ret
= mhi_update_channel_state(mhi_cntrl
, mhi_chan
,
1434 MHI_CH_STATE_TYPE_RESET
);
1436 dev_err(dev
, "%d: Failed to reset channel, still resetting\n",
1439 exit_unprepare_channel
:
1440 write_lock_irq(&mhi_chan
->lock
);
1441 mhi_chan
->ch_state
= MHI_CH_STATE_DISABLED
;
1442 write_unlock_irq(&mhi_chan
->lock
);
1444 if (!mhi_chan
->offload_ch
) {
1445 mhi_reset_chan(mhi_cntrl
, mhi_chan
);
1446 mhi_deinit_chan_ctxt(mhi_cntrl
, mhi_chan
);
1448 dev_dbg(dev
, "%d: successfully reset\n", mhi_chan
->chan
);
1450 mutex_unlock(&mhi_chan
->mutex
);
1453 int mhi_prepare_channel(struct mhi_controller
*mhi_cntrl
,
1454 struct mhi_chan
*mhi_chan
, unsigned int flags
)
1457 struct device
*dev
= &mhi_chan
->mhi_dev
->dev
;
1459 if (!(BIT(mhi_cntrl
->ee
) & mhi_chan
->ee_mask
)) {
1460 dev_err(dev
, "Current EE: %s Required EE Mask: 0x%x\n",
1461 TO_MHI_EXEC_STR(mhi_cntrl
->ee
), mhi_chan
->ee_mask
);
1465 mutex_lock(&mhi_chan
->mutex
);
1467 /* Check of client manages channel context for offload channels */
1468 if (!mhi_chan
->offload_ch
) {
1469 ret
= mhi_init_chan_ctxt(mhi_cntrl
, mhi_chan
);
1471 goto error_init_chan
;
1474 ret
= mhi_update_channel_state(mhi_cntrl
, mhi_chan
,
1475 MHI_CH_STATE_TYPE_START
);
1477 goto error_pm_state
;
1479 if (mhi_chan
->dir
== DMA_FROM_DEVICE
)
1480 mhi_chan
->pre_alloc
= !!(flags
& MHI_CH_INBOUND_ALLOC_BUFS
);
1482 /* Pre-allocate buffer for xfer ring */
1483 if (mhi_chan
->pre_alloc
) {
1484 int nr_el
= get_nr_avail_ring_elements(mhi_cntrl
,
1485 &mhi_chan
->tre_ring
);
1486 size_t len
= mhi_cntrl
->buffer_len
;
1490 struct mhi_buf_info info
= { };
1492 buf
= kmalloc(len
, GFP_KERNEL
);
1495 goto error_pre_alloc
;
1498 /* Prepare transfer descriptors */
1502 ret
= mhi_gen_tre(mhi_cntrl
, mhi_chan
, &info
, MHI_EOT
);
1505 goto error_pre_alloc
;
1509 read_lock_bh(&mhi_cntrl
->pm_lock
);
1510 if (MHI_DB_ACCESS_VALID(mhi_cntrl
)) {
1511 read_lock_irq(&mhi_chan
->lock
);
1512 mhi_ring_chan_db(mhi_cntrl
, mhi_chan
);
1513 read_unlock_irq(&mhi_chan
->lock
);
1515 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1518 mutex_unlock(&mhi_chan
->mutex
);
1523 if (!mhi_chan
->offload_ch
)
1524 mhi_deinit_chan_ctxt(mhi_cntrl
, mhi_chan
);
1527 mutex_unlock(&mhi_chan
->mutex
);
1532 mutex_unlock(&mhi_chan
->mutex
);
1533 mhi_unprepare_channel(mhi_cntrl
, mhi_chan
);
1538 static void mhi_mark_stale_events(struct mhi_controller
*mhi_cntrl
,
1539 struct mhi_event
*mhi_event
,
1540 struct mhi_event_ctxt
*er_ctxt
,
1544 struct mhi_ring_element
*dev_rp
, *local_rp
;
1545 struct mhi_ring
*ev_ring
;
1546 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1547 unsigned long flags
;
1550 dev_dbg(dev
, "Marking all events for chan: %d as stale\n", chan
);
1552 ev_ring
= &mhi_event
->ring
;
1554 /* mark all stale events related to channel as STALE event */
1555 spin_lock_irqsave(&mhi_event
->lock
, flags
);
1557 ptr
= le64_to_cpu(er_ctxt
->rp
);
1558 if (!is_valid_ring_ptr(ev_ring
, ptr
)) {
1559 dev_err(&mhi_cntrl
->mhi_dev
->dev
,
1560 "Event ring rp points outside of the event ring\n");
1561 dev_rp
= ev_ring
->rp
;
1563 dev_rp
= mhi_to_virtual(ev_ring
, ptr
);
1566 local_rp
= ev_ring
->rp
;
1567 while (dev_rp
!= local_rp
) {
1568 if (MHI_TRE_GET_EV_TYPE(local_rp
) == MHI_PKT_TYPE_TX_EVENT
&&
1569 chan
== MHI_TRE_GET_EV_CHID(local_rp
))
1570 local_rp
->dword
[1] = MHI_TRE_EV_DWORD1(chan
,
1571 MHI_PKT_TYPE_STALE_EVENT
);
1573 if (local_rp
== (ev_ring
->base
+ ev_ring
->len
))
1574 local_rp
= ev_ring
->base
;
1577 dev_dbg(dev
, "Finished marking events as stale events\n");
1578 spin_unlock_irqrestore(&mhi_event
->lock
, flags
);
1581 static void mhi_reset_data_chan(struct mhi_controller
*mhi_cntrl
,
1582 struct mhi_chan
*mhi_chan
)
1584 struct mhi_ring
*buf_ring
, *tre_ring
;
1585 struct mhi_result result
;
1587 /* Reset any pending buffers */
1588 buf_ring
= &mhi_chan
->buf_ring
;
1589 tre_ring
= &mhi_chan
->tre_ring
;
1590 result
.transaction_status
= -ENOTCONN
;
1591 result
.bytes_xferd
= 0;
1592 while (tre_ring
->rp
!= tre_ring
->wp
) {
1593 struct mhi_buf_info
*buf_info
= buf_ring
->rp
;
1595 if (mhi_chan
->dir
== DMA_TO_DEVICE
) {
1596 atomic_dec(&mhi_cntrl
->pending_pkts
);
1597 /* Release the reference got from mhi_queue() */
1598 mhi_cntrl
->runtime_put(mhi_cntrl
);
1601 if (!buf_info
->pre_mapped
)
1602 mhi_cntrl
->unmap_single(mhi_cntrl
, buf_info
);
1604 mhi_del_ring_element(mhi_cntrl
, buf_ring
);
1605 mhi_del_ring_element(mhi_cntrl
, tre_ring
);
1607 if (mhi_chan
->pre_alloc
) {
1608 kfree(buf_info
->cb_buf
);
1610 result
.buf_addr
= buf_info
->cb_buf
;
1611 mhi_chan
->xfer_cb(mhi_chan
->mhi_dev
, &result
);
1616 void mhi_reset_chan(struct mhi_controller
*mhi_cntrl
, struct mhi_chan
*mhi_chan
)
1618 struct mhi_event
*mhi_event
;
1619 struct mhi_event_ctxt
*er_ctxt
;
1620 int chan
= mhi_chan
->chan
;
1622 /* Nothing to reset, client doesn't queue buffers */
1623 if (mhi_chan
->offload_ch
)
1626 read_lock_bh(&mhi_cntrl
->pm_lock
);
1627 mhi_event
= &mhi_cntrl
->mhi_event
[mhi_chan
->er_index
];
1628 er_ctxt
= &mhi_cntrl
->mhi_ctxt
->er_ctxt
[mhi_chan
->er_index
];
1630 mhi_mark_stale_events(mhi_cntrl
, mhi_event
, er_ctxt
, chan
);
1632 mhi_reset_data_chan(mhi_cntrl
, mhi_chan
);
1634 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1637 static int __mhi_prepare_for_transfer(struct mhi_device
*mhi_dev
, unsigned int flags
)
1640 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1641 struct mhi_chan
*mhi_chan
;
1643 for (dir
= 0; dir
< 2; dir
++) {
1644 mhi_chan
= dir
? mhi_dev
->dl_chan
: mhi_dev
->ul_chan
;
1648 ret
= mhi_prepare_channel(mhi_cntrl
, mhi_chan
, flags
);
1650 goto error_open_chan
;
1656 for (--dir
; dir
>= 0; dir
--) {
1657 mhi_chan
= dir
? mhi_dev
->dl_chan
: mhi_dev
->ul_chan
;
1661 mhi_unprepare_channel(mhi_cntrl
, mhi_chan
);
1667 int mhi_prepare_for_transfer(struct mhi_device
*mhi_dev
)
1669 return __mhi_prepare_for_transfer(mhi_dev
, 0);
1671 EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer
);
1673 int mhi_prepare_for_transfer_autoqueue(struct mhi_device
*mhi_dev
)
1675 return __mhi_prepare_for_transfer(mhi_dev
, MHI_CH_INBOUND_ALLOC_BUFS
);
1677 EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer_autoqueue
);
1679 void mhi_unprepare_from_transfer(struct mhi_device
*mhi_dev
)
1681 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1682 struct mhi_chan
*mhi_chan
;
1685 for (dir
= 0; dir
< 2; dir
++) {
1686 mhi_chan
= dir
? mhi_dev
->ul_chan
: mhi_dev
->dl_chan
;
1690 mhi_unprepare_channel(mhi_cntrl
, mhi_chan
);
1693 EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer
);
1695 int mhi_get_channel_doorbell_offset(struct mhi_controller
*mhi_cntrl
, u32
*chdb_offset
)
1697 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1698 void __iomem
*base
= mhi_cntrl
->regs
;
1701 ret
= mhi_read_reg(mhi_cntrl
, base
, CHDBOFF
, chdb_offset
);
1703 dev_err(dev
, "Unable to read CHDBOFF register\n");
1709 EXPORT_SYMBOL_GPL(mhi_get_channel_doorbell_offset
);