1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/list.h>
13 #include <linux/mhi.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/wait.h>
20 * Not all MHI state transitions are synchronous. Transitions like Linkdown,
21 * SYS_ERR, and shutdown can happen anytime asynchronously. This function will
22 * transition to a new state only if we're allowed to.
24 * Priority increases as we go down. For instance, from any state in L0, the
25 * transition can be made to states in L1, L2 and L3. A notable exception to
26 * this rule is state DISABLE. From DISABLE state we can only transition to
27 * POR state. Also, while in L2 state, user cannot jump back to previous
31 * L0: DISABLE <--> POR
33 * POR -> M0 -> M2 --> M0
35 * FW_DL_ERR <--> FW_DL_ERR
38 * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
39 * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
40 * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
41 * SHUTDOWN_PROCESS -> DISABLE
42 * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
43 * LD_ERR_FATAL_DETECT -> DISABLE
45 static struct mhi_pm_transitions
const dev_state_transitions
[] = {
53 MHI_PM_POR
| MHI_PM_DISABLE
| MHI_PM_M0
|
54 MHI_PM_SYS_ERR_DETECT
| MHI_PM_SHUTDOWN_PROCESS
|
55 MHI_PM_LD_ERR_FATAL_DETECT
| MHI_PM_FW_DL_ERR
59 MHI_PM_M0
| MHI_PM_M2
| MHI_PM_M3_ENTER
|
60 MHI_PM_SYS_ERR_DETECT
| MHI_PM_SHUTDOWN_PROCESS
|
61 MHI_PM_LD_ERR_FATAL_DETECT
| MHI_PM_FW_DL_ERR
65 MHI_PM_M0
| MHI_PM_SYS_ERR_DETECT
| MHI_PM_SHUTDOWN_PROCESS
|
66 MHI_PM_LD_ERR_FATAL_DETECT
70 MHI_PM_M3
| MHI_PM_SYS_ERR_DETECT
| MHI_PM_SHUTDOWN_PROCESS
|
71 MHI_PM_LD_ERR_FATAL_DETECT
75 MHI_PM_M3_EXIT
| MHI_PM_SYS_ERR_DETECT
|
76 MHI_PM_LD_ERR_FATAL_DETECT
80 MHI_PM_M0
| MHI_PM_SYS_ERR_DETECT
| MHI_PM_SHUTDOWN_PROCESS
|
81 MHI_PM_LD_ERR_FATAL_DETECT
85 MHI_PM_FW_DL_ERR
| MHI_PM_SYS_ERR_DETECT
|
86 MHI_PM_SHUTDOWN_PROCESS
| MHI_PM_LD_ERR_FATAL_DETECT
90 MHI_PM_SYS_ERR_DETECT
,
91 MHI_PM_SYS_ERR_PROCESS
| MHI_PM_SHUTDOWN_PROCESS
|
92 MHI_PM_LD_ERR_FATAL_DETECT
95 MHI_PM_SYS_ERR_PROCESS
,
96 MHI_PM_POR
| MHI_PM_SHUTDOWN_PROCESS
|
97 MHI_PM_LD_ERR_FATAL_DETECT
101 MHI_PM_SHUTDOWN_PROCESS
,
102 MHI_PM_DISABLE
| MHI_PM_LD_ERR_FATAL_DETECT
106 MHI_PM_LD_ERR_FATAL_DETECT
,
107 MHI_PM_LD_ERR_FATAL_DETECT
| MHI_PM_DISABLE
111 enum mhi_pm_state __must_check
mhi_tryset_pm_state(struct mhi_controller
*mhi_cntrl
,
112 enum mhi_pm_state state
)
114 unsigned long cur_state
= mhi_cntrl
->pm_state
;
115 int index
= find_last_bit(&cur_state
, 32);
117 if (unlikely(index
>= ARRAY_SIZE(dev_state_transitions
)))
120 if (unlikely(dev_state_transitions
[index
].from_state
!= cur_state
))
123 if (unlikely(!(dev_state_transitions
[index
].to_states
& state
)))
126 mhi_cntrl
->pm_state
= state
;
127 return mhi_cntrl
->pm_state
;
130 void mhi_set_mhi_state(struct mhi_controller
*mhi_cntrl
, enum mhi_state state
)
132 if (state
== MHI_STATE_RESET
) {
133 mhi_write_reg_field(mhi_cntrl
, mhi_cntrl
->regs
, MHICTRL
,
134 MHICTRL_RESET_MASK
, MHICTRL_RESET_SHIFT
, 1);
136 mhi_write_reg_field(mhi_cntrl
, mhi_cntrl
->regs
, MHICTRL
,
137 MHICTRL_MHISTATE_MASK
,
138 MHICTRL_MHISTATE_SHIFT
, state
);
142 /* NOP for backward compatibility, host allowed to ring DB in M2 state */
143 static void mhi_toggle_dev_wake_nop(struct mhi_controller
*mhi_cntrl
)
147 static void mhi_toggle_dev_wake(struct mhi_controller
*mhi_cntrl
)
149 mhi_cntrl
->wake_get(mhi_cntrl
, false);
150 mhi_cntrl
->wake_put(mhi_cntrl
, true);
153 /* Handle device ready state transition */
154 int mhi_ready_state_transition(struct mhi_controller
*mhi_cntrl
)
156 void __iomem
*base
= mhi_cntrl
->regs
;
157 struct mhi_event
*mhi_event
;
158 enum mhi_pm_state cur_state
;
159 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
160 u32 reset
= 1, ready
= 0;
163 /* Wait for RESET to be cleared and READY bit to be set by the device */
164 wait_event_timeout(mhi_cntrl
->state_event
,
165 MHI_PM_IN_FATAL_STATE(mhi_cntrl
->pm_state
) ||
166 mhi_read_reg_field(mhi_cntrl
, base
, MHICTRL
,
168 MHICTRL_RESET_SHIFT
, &reset
) ||
169 mhi_read_reg_field(mhi_cntrl
, base
, MHISTATUS
,
170 MHISTATUS_READY_MASK
,
171 MHISTATUS_READY_SHIFT
, &ready
) ||
173 msecs_to_jiffies(mhi_cntrl
->timeout_ms
));
175 /* Check if device entered error state */
176 if (MHI_PM_IN_FATAL_STATE(mhi_cntrl
->pm_state
)) {
177 dev_err(dev
, "Device link is not accessible\n");
181 /* Timeout if device did not transition to ready state */
182 if (reset
|| !ready
) {
183 dev_err(dev
, "Device Ready timeout\n");
187 dev_dbg(dev
, "Device in READY State\n");
188 write_lock_irq(&mhi_cntrl
->pm_lock
);
189 cur_state
= mhi_tryset_pm_state(mhi_cntrl
, MHI_PM_POR
);
190 mhi_cntrl
->dev_state
= MHI_STATE_READY
;
191 write_unlock_irq(&mhi_cntrl
->pm_lock
);
193 if (cur_state
!= MHI_PM_POR
) {
194 dev_err(dev
, "Error moving to state %s from %s\n",
195 to_mhi_pm_state_str(MHI_PM_POR
),
196 to_mhi_pm_state_str(cur_state
));
200 read_lock_bh(&mhi_cntrl
->pm_lock
);
201 if (!MHI_REG_ACCESS_VALID(mhi_cntrl
->pm_state
)) {
202 dev_err(dev
, "Device registers not accessible\n");
206 /* Configure MMIO registers */
207 ret
= mhi_init_mmio(mhi_cntrl
);
209 dev_err(dev
, "Error configuring MMIO registers\n");
213 /* Add elements to all SW event rings */
214 mhi_event
= mhi_cntrl
->mhi_event
;
215 for (i
= 0; i
< mhi_cntrl
->total_ev_rings
; i
++, mhi_event
++) {
216 struct mhi_ring
*ring
= &mhi_event
->ring
;
218 /* Skip if this is an offload or HW event */
219 if (mhi_event
->offload_ev
|| mhi_event
->hw_ring
)
222 ring
->wp
= ring
->base
+ ring
->len
- ring
->el_size
;
223 *ring
->ctxt_wp
= ring
->iommu_base
+ ring
->len
- ring
->el_size
;
224 /* Update all cores */
227 /* Ring the event ring db */
228 spin_lock_irq(&mhi_event
->lock
);
229 mhi_ring_er_db(mhi_event
);
230 spin_unlock_irq(&mhi_event
->lock
);
233 /* Set MHI to M0 state */
234 mhi_set_mhi_state(mhi_cntrl
, MHI_STATE_M0
);
235 read_unlock_bh(&mhi_cntrl
->pm_lock
);
240 read_unlock_bh(&mhi_cntrl
->pm_lock
);
245 int mhi_pm_m0_transition(struct mhi_controller
*mhi_cntrl
)
247 enum mhi_pm_state cur_state
;
248 struct mhi_chan
*mhi_chan
;
249 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
252 write_lock_irq(&mhi_cntrl
->pm_lock
);
253 mhi_cntrl
->dev_state
= MHI_STATE_M0
;
254 cur_state
= mhi_tryset_pm_state(mhi_cntrl
, MHI_PM_M0
);
255 write_unlock_irq(&mhi_cntrl
->pm_lock
);
256 if (unlikely(cur_state
!= MHI_PM_M0
)) {
257 dev_err(dev
, "Unable to transition to M0 state\n");
262 /* Wake up the device */
263 read_lock_bh(&mhi_cntrl
->pm_lock
);
264 mhi_cntrl
->wake_get(mhi_cntrl
, true);
266 /* Ring all event rings and CMD ring only if we're in mission mode */
267 if (MHI_IN_MISSION_MODE(mhi_cntrl
->ee
)) {
268 struct mhi_event
*mhi_event
= mhi_cntrl
->mhi_event
;
269 struct mhi_cmd
*mhi_cmd
=
270 &mhi_cntrl
->mhi_cmd
[PRIMARY_CMD_RING
];
272 for (i
= 0; i
< mhi_cntrl
->total_ev_rings
; i
++, mhi_event
++) {
273 if (mhi_event
->offload_ev
)
276 spin_lock_irq(&mhi_event
->lock
);
277 mhi_ring_er_db(mhi_event
);
278 spin_unlock_irq(&mhi_event
->lock
);
281 /* Only ring primary cmd ring if ring is not empty */
282 spin_lock_irq(&mhi_cmd
->lock
);
283 if (mhi_cmd
->ring
.rp
!= mhi_cmd
->ring
.wp
)
284 mhi_ring_cmd_db(mhi_cntrl
, mhi_cmd
);
285 spin_unlock_irq(&mhi_cmd
->lock
);
288 /* Ring channel DB registers */
289 mhi_chan
= mhi_cntrl
->mhi_chan
;
290 for (i
= 0; i
< mhi_cntrl
->max_chan
; i
++, mhi_chan
++) {
291 struct mhi_ring
*tre_ring
= &mhi_chan
->tre_ring
;
293 if (mhi_chan
->db_cfg
.reset_req
) {
294 write_lock_irq(&mhi_chan
->lock
);
295 mhi_chan
->db_cfg
.db_mode
= true;
296 write_unlock_irq(&mhi_chan
->lock
);
299 read_lock_irq(&mhi_chan
->lock
);
301 /* Only ring DB if ring is not empty */
302 if (tre_ring
->base
&& tre_ring
->wp
!= tre_ring
->rp
)
303 mhi_ring_chan_db(mhi_cntrl
, mhi_chan
);
304 read_unlock_irq(&mhi_chan
->lock
);
307 mhi_cntrl
->wake_put(mhi_cntrl
, false);
308 read_unlock_bh(&mhi_cntrl
->pm_lock
);
309 wake_up_all(&mhi_cntrl
->state_event
);
315 * After receiving the MHI state change event from the device indicating the
316 * transition to M1 state, the host can transition the device to M2 state
317 * for keeping it in low power state.
319 void mhi_pm_m1_transition(struct mhi_controller
*mhi_cntrl
)
321 enum mhi_pm_state state
;
322 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
324 write_lock_irq(&mhi_cntrl
->pm_lock
);
325 state
= mhi_tryset_pm_state(mhi_cntrl
, MHI_PM_M2
);
326 if (state
== MHI_PM_M2
) {
327 mhi_set_mhi_state(mhi_cntrl
, MHI_STATE_M2
);
328 mhi_cntrl
->dev_state
= MHI_STATE_M2
;
330 write_unlock_irq(&mhi_cntrl
->pm_lock
);
333 wake_up_all(&mhi_cntrl
->state_event
);
335 /* If there are any pending resources, exit M2 immediately */
336 if (unlikely(atomic_read(&mhi_cntrl
->pending_pkts
) ||
337 atomic_read(&mhi_cntrl
->dev_wake
))) {
339 "Exiting M2, pending_pkts: %d dev_wake: %d\n",
340 atomic_read(&mhi_cntrl
->pending_pkts
),
341 atomic_read(&mhi_cntrl
->dev_wake
));
342 read_lock_bh(&mhi_cntrl
->pm_lock
);
343 mhi_cntrl
->wake_get(mhi_cntrl
, true);
344 mhi_cntrl
->wake_put(mhi_cntrl
, true);
345 read_unlock_bh(&mhi_cntrl
->pm_lock
);
347 mhi_cntrl
->status_cb(mhi_cntrl
, MHI_CB_IDLE
);
350 write_unlock_irq(&mhi_cntrl
->pm_lock
);
354 /* MHI M3 completion handler */
355 int mhi_pm_m3_transition(struct mhi_controller
*mhi_cntrl
)
357 enum mhi_pm_state state
;
358 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
360 write_lock_irq(&mhi_cntrl
->pm_lock
);
361 mhi_cntrl
->dev_state
= MHI_STATE_M3
;
362 state
= mhi_tryset_pm_state(mhi_cntrl
, MHI_PM_M3
);
363 write_unlock_irq(&mhi_cntrl
->pm_lock
);
364 if (state
!= MHI_PM_M3
) {
365 dev_err(dev
, "Unable to transition to M3 state\n");
370 wake_up_all(&mhi_cntrl
->state_event
);
375 /* Handle device Mission Mode transition */
376 static int mhi_pm_mission_mode_transition(struct mhi_controller
*mhi_cntrl
)
378 struct mhi_event
*mhi_event
;
379 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
382 dev_dbg(dev
, "Processing Mission Mode transition\n");
384 write_lock_irq(&mhi_cntrl
->pm_lock
);
385 if (MHI_REG_ACCESS_VALID(mhi_cntrl
->pm_state
))
386 mhi_cntrl
->ee
= mhi_get_exec_env(mhi_cntrl
);
388 if (!MHI_IN_MISSION_MODE(mhi_cntrl
->ee
)) {
389 mhi_cntrl
->pm_state
= MHI_PM_LD_ERR_FATAL_DETECT
;
390 write_unlock_irq(&mhi_cntrl
->pm_lock
);
391 wake_up_all(&mhi_cntrl
->state_event
);
394 write_unlock_irq(&mhi_cntrl
->pm_lock
);
396 wake_up_all(&mhi_cntrl
->state_event
);
398 mhi_cntrl
->status_cb(mhi_cntrl
, MHI_CB_EE_MISSION_MODE
);
400 /* Force MHI to be in M0 state before continuing */
401 ret
= __mhi_device_get_sync(mhi_cntrl
);
405 read_lock_bh(&mhi_cntrl
->pm_lock
);
407 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
)) {
409 goto error_mission_mode
;
412 /* Add elements to all HW event rings */
413 mhi_event
= mhi_cntrl
->mhi_event
;
414 for (i
= 0; i
< mhi_cntrl
->total_ev_rings
; i
++, mhi_event
++) {
415 struct mhi_ring
*ring
= &mhi_event
->ring
;
417 if (mhi_event
->offload_ev
|| !mhi_event
->hw_ring
)
420 ring
->wp
= ring
->base
+ ring
->len
- ring
->el_size
;
421 *ring
->ctxt_wp
= ring
->iommu_base
+ ring
->len
- ring
->el_size
;
422 /* Update to all cores */
425 spin_lock_irq(&mhi_event
->lock
);
426 if (MHI_DB_ACCESS_VALID(mhi_cntrl
))
427 mhi_ring_er_db(mhi_event
);
428 spin_unlock_irq(&mhi_event
->lock
);
431 read_unlock_bh(&mhi_cntrl
->pm_lock
);
434 * The MHI devices are only created when the client device switches its
435 * Execution Environment (EE) to either SBL or AMSS states
437 mhi_create_devices(mhi_cntrl
);
439 read_lock_bh(&mhi_cntrl
->pm_lock
);
442 mhi_cntrl
->wake_put(mhi_cntrl
, false);
443 read_unlock_bh(&mhi_cntrl
->pm_lock
);
448 /* Handle shutdown transitions */
449 static void mhi_pm_disable_transition(struct mhi_controller
*mhi_cntrl
)
451 enum mhi_pm_state cur_state
;
452 struct mhi_event
*mhi_event
;
453 struct mhi_cmd_ctxt
*cmd_ctxt
;
454 struct mhi_cmd
*mhi_cmd
;
455 struct mhi_event_ctxt
*er_ctxt
;
456 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
459 dev_dbg(dev
, "Processing disable transition with PM state: %s\n",
460 to_mhi_pm_state_str(mhi_cntrl
->pm_state
));
462 mutex_lock(&mhi_cntrl
->pm_mutex
);
464 /* Trigger MHI RESET so that the device will not access host memory */
465 if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl
->pm_state
)) {
467 unsigned long timeout
= msecs_to_jiffies(mhi_cntrl
->timeout_ms
);
469 dev_dbg(dev
, "Triggering MHI Reset in device\n");
470 mhi_set_mhi_state(mhi_cntrl
, MHI_STATE_RESET
);
472 /* Wait for the reset bit to be cleared by the device */
473 ret
= wait_event_timeout(mhi_cntrl
->state_event
,
474 mhi_read_reg_field(mhi_cntrl
,
481 if (!ret
|| in_reset
)
482 dev_err(dev
, "Device failed to exit MHI Reset state\n");
485 * Device will clear BHI_INTVEC as a part of RESET processing,
486 * hence re-program it
488 mhi_write_reg(mhi_cntrl
, mhi_cntrl
->bhi
, BHI_INTVEC
, 0);
492 "Waiting for all pending event ring processing to complete\n");
493 mhi_event
= mhi_cntrl
->mhi_event
;
494 for (i
= 0; i
< mhi_cntrl
->total_ev_rings
; i
++, mhi_event
++) {
495 if (mhi_event
->offload_ev
)
497 free_irq(mhi_cntrl
->irq
[mhi_event
->irq
], mhi_event
);
498 tasklet_kill(&mhi_event
->task
);
501 /* Release lock and wait for all pending threads to complete */
502 mutex_unlock(&mhi_cntrl
->pm_mutex
);
503 dev_dbg(dev
, "Waiting for all pending threads to complete\n");
504 wake_up_all(&mhi_cntrl
->state_event
);
506 dev_dbg(dev
, "Reset all active channels and remove MHI devices\n");
507 device_for_each_child(&mhi_cntrl
->mhi_dev
->dev
, NULL
, mhi_destroy_device
);
509 mutex_lock(&mhi_cntrl
->pm_mutex
);
511 WARN_ON(atomic_read(&mhi_cntrl
->dev_wake
));
512 WARN_ON(atomic_read(&mhi_cntrl
->pending_pkts
));
514 /* Reset the ev rings and cmd rings */
515 dev_dbg(dev
, "Resetting EV CTXT and CMD CTXT\n");
516 mhi_cmd
= mhi_cntrl
->mhi_cmd
;
517 cmd_ctxt
= mhi_cntrl
->mhi_ctxt
->cmd_ctxt
;
518 for (i
= 0; i
< NR_OF_CMD_RINGS
; i
++, mhi_cmd
++, cmd_ctxt
++) {
519 struct mhi_ring
*ring
= &mhi_cmd
->ring
;
521 ring
->rp
= ring
->base
;
522 ring
->wp
= ring
->base
;
523 cmd_ctxt
->rp
= cmd_ctxt
->rbase
;
524 cmd_ctxt
->wp
= cmd_ctxt
->rbase
;
527 mhi_event
= mhi_cntrl
->mhi_event
;
528 er_ctxt
= mhi_cntrl
->mhi_ctxt
->er_ctxt
;
529 for (i
= 0; i
< mhi_cntrl
->total_ev_rings
; i
++, er_ctxt
++,
531 struct mhi_ring
*ring
= &mhi_event
->ring
;
533 /* Skip offload events */
534 if (mhi_event
->offload_ev
)
537 ring
->rp
= ring
->base
;
538 ring
->wp
= ring
->base
;
539 er_ctxt
->rp
= er_ctxt
->rbase
;
540 er_ctxt
->wp
= er_ctxt
->rbase
;
543 /* Move to disable state */
544 write_lock_irq(&mhi_cntrl
->pm_lock
);
545 cur_state
= mhi_tryset_pm_state(mhi_cntrl
, MHI_PM_DISABLE
);
546 write_unlock_irq(&mhi_cntrl
->pm_lock
);
547 if (unlikely(cur_state
!= MHI_PM_DISABLE
))
548 dev_err(dev
, "Error moving from PM state: %s to: %s\n",
549 to_mhi_pm_state_str(cur_state
),
550 to_mhi_pm_state_str(MHI_PM_DISABLE
));
552 dev_dbg(dev
, "Exiting with PM state: %s, MHI state: %s\n",
553 to_mhi_pm_state_str(mhi_cntrl
->pm_state
),
554 TO_MHI_STATE_STR(mhi_cntrl
->dev_state
));
556 mutex_unlock(&mhi_cntrl
->pm_mutex
);
559 /* Handle system error transitions */
560 static void mhi_pm_sys_error_transition(struct mhi_controller
*mhi_cntrl
)
562 enum mhi_pm_state cur_state
, prev_state
;
563 struct mhi_event
*mhi_event
;
564 struct mhi_cmd_ctxt
*cmd_ctxt
;
565 struct mhi_cmd
*mhi_cmd
;
566 struct mhi_event_ctxt
*er_ctxt
;
567 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
570 dev_dbg(dev
, "Transitioning from PM state: %s to: %s\n",
571 to_mhi_pm_state_str(mhi_cntrl
->pm_state
),
572 to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS
));
574 /* We must notify MHI control driver so it can clean up first */
575 mhi_cntrl
->status_cb(mhi_cntrl
, MHI_CB_SYS_ERROR
);
577 mutex_lock(&mhi_cntrl
->pm_mutex
);
578 write_lock_irq(&mhi_cntrl
->pm_lock
);
579 prev_state
= mhi_cntrl
->pm_state
;
580 cur_state
= mhi_tryset_pm_state(mhi_cntrl
, MHI_PM_SYS_ERR_PROCESS
);
581 write_unlock_irq(&mhi_cntrl
->pm_lock
);
583 if (cur_state
!= MHI_PM_SYS_ERR_PROCESS
) {
584 dev_err(dev
, "Failed to transition from PM state: %s to: %s\n",
585 to_mhi_pm_state_str(cur_state
),
586 to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS
));
587 goto exit_sys_error_transition
;
590 mhi_cntrl
->ee
= MHI_EE_DISABLE_TRANSITION
;
591 mhi_cntrl
->dev_state
= MHI_STATE_RESET
;
593 /* Wake up threads waiting for state transition */
594 wake_up_all(&mhi_cntrl
->state_event
);
596 /* Trigger MHI RESET so that the device will not access host memory */
597 if (MHI_REG_ACCESS_VALID(prev_state
)) {
599 unsigned long timeout
= msecs_to_jiffies(mhi_cntrl
->timeout_ms
);
601 dev_dbg(dev
, "Triggering MHI Reset in device\n");
602 mhi_set_mhi_state(mhi_cntrl
, MHI_STATE_RESET
);
604 /* Wait for the reset bit to be cleared by the device */
605 ret
= wait_event_timeout(mhi_cntrl
->state_event
,
606 mhi_read_reg_field(mhi_cntrl
,
613 if (!ret
|| in_reset
) {
614 dev_err(dev
, "Device failed to exit MHI Reset state\n");
615 goto exit_sys_error_transition
;
619 * Device will clear BHI_INTVEC as a part of RESET processing,
620 * hence re-program it
622 mhi_write_reg(mhi_cntrl
, mhi_cntrl
->bhi
, BHI_INTVEC
, 0);
626 "Waiting for all pending event ring processing to complete\n");
627 mhi_event
= mhi_cntrl
->mhi_event
;
628 for (i
= 0; i
< mhi_cntrl
->total_ev_rings
; i
++, mhi_event
++) {
629 if (mhi_event
->offload_ev
)
631 tasklet_kill(&mhi_event
->task
);
634 /* Release lock and wait for all pending threads to complete */
635 mutex_unlock(&mhi_cntrl
->pm_mutex
);
636 dev_dbg(dev
, "Waiting for all pending threads to complete\n");
637 wake_up_all(&mhi_cntrl
->state_event
);
639 dev_dbg(dev
, "Reset all active channels and remove MHI devices\n");
640 device_for_each_child(&mhi_cntrl
->mhi_dev
->dev
, NULL
, mhi_destroy_device
);
642 mutex_lock(&mhi_cntrl
->pm_mutex
);
644 WARN_ON(atomic_read(&mhi_cntrl
->dev_wake
));
645 WARN_ON(atomic_read(&mhi_cntrl
->pending_pkts
));
647 /* Reset the ev rings and cmd rings */
648 dev_dbg(dev
, "Resetting EV CTXT and CMD CTXT\n");
649 mhi_cmd
= mhi_cntrl
->mhi_cmd
;
650 cmd_ctxt
= mhi_cntrl
->mhi_ctxt
->cmd_ctxt
;
651 for (i
= 0; i
< NR_OF_CMD_RINGS
; i
++, mhi_cmd
++, cmd_ctxt
++) {
652 struct mhi_ring
*ring
= &mhi_cmd
->ring
;
654 ring
->rp
= ring
->base
;
655 ring
->wp
= ring
->base
;
656 cmd_ctxt
->rp
= cmd_ctxt
->rbase
;
657 cmd_ctxt
->wp
= cmd_ctxt
->rbase
;
660 mhi_event
= mhi_cntrl
->mhi_event
;
661 er_ctxt
= mhi_cntrl
->mhi_ctxt
->er_ctxt
;
662 for (i
= 0; i
< mhi_cntrl
->total_ev_rings
; i
++, er_ctxt
++,
664 struct mhi_ring
*ring
= &mhi_event
->ring
;
666 /* Skip offload events */
667 if (mhi_event
->offload_ev
)
670 ring
->rp
= ring
->base
;
671 ring
->wp
= ring
->base
;
672 er_ctxt
->rp
= er_ctxt
->rbase
;
673 er_ctxt
->wp
= er_ctxt
->rbase
;
676 mhi_ready_state_transition(mhi_cntrl
);
678 exit_sys_error_transition
:
679 dev_dbg(dev
, "Exiting with PM state: %s, MHI state: %s\n",
680 to_mhi_pm_state_str(mhi_cntrl
->pm_state
),
681 TO_MHI_STATE_STR(mhi_cntrl
->dev_state
));
683 mutex_unlock(&mhi_cntrl
->pm_mutex
);
686 /* Queue a new work item and schedule work */
687 int mhi_queue_state_transition(struct mhi_controller
*mhi_cntrl
,
688 enum dev_st_transition state
)
690 struct state_transition
*item
= kmalloc(sizeof(*item
), GFP_ATOMIC
);
697 spin_lock_irqsave(&mhi_cntrl
->transition_lock
, flags
);
698 list_add_tail(&item
->node
, &mhi_cntrl
->transition_list
);
699 spin_unlock_irqrestore(&mhi_cntrl
->transition_lock
, flags
);
701 queue_work(mhi_cntrl
->hiprio_wq
, &mhi_cntrl
->st_worker
);
707 void mhi_pm_sys_err_handler(struct mhi_controller
*mhi_cntrl
)
709 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
711 /* skip if controller supports RDDM */
712 if (mhi_cntrl
->rddm_image
) {
713 dev_dbg(dev
, "Controller supports RDDM, skip SYS_ERROR\n");
717 mhi_queue_state_transition(mhi_cntrl
, DEV_ST_TRANSITION_SYS_ERR
);
720 /* Device State Transition worker */
721 void mhi_pm_st_worker(struct work_struct
*work
)
723 struct state_transition
*itr
, *tmp
;
725 struct mhi_controller
*mhi_cntrl
= container_of(work
,
726 struct mhi_controller
,
728 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
730 spin_lock_irq(&mhi_cntrl
->transition_lock
);
731 list_splice_tail_init(&mhi_cntrl
->transition_list
, &head
);
732 spin_unlock_irq(&mhi_cntrl
->transition_lock
);
734 list_for_each_entry_safe(itr
, tmp
, &head
, node
) {
735 list_del(&itr
->node
);
736 dev_dbg(dev
, "Handling state transition: %s\n",
737 TO_DEV_STATE_TRANS_STR(itr
->state
));
739 switch (itr
->state
) {
740 case DEV_ST_TRANSITION_PBL
:
741 write_lock_irq(&mhi_cntrl
->pm_lock
);
742 if (MHI_REG_ACCESS_VALID(mhi_cntrl
->pm_state
))
743 mhi_cntrl
->ee
= mhi_get_exec_env(mhi_cntrl
);
744 write_unlock_irq(&mhi_cntrl
->pm_lock
);
745 if (MHI_IN_PBL(mhi_cntrl
->ee
))
746 mhi_fw_load_handler(mhi_cntrl
);
748 case DEV_ST_TRANSITION_SBL
:
749 write_lock_irq(&mhi_cntrl
->pm_lock
);
750 mhi_cntrl
->ee
= MHI_EE_SBL
;
751 write_unlock_irq(&mhi_cntrl
->pm_lock
);
753 * The MHI devices are only created when the client
754 * device switches its Execution Environment (EE) to
755 * either SBL or AMSS states
757 mhi_create_devices(mhi_cntrl
);
759 case DEV_ST_TRANSITION_MISSION_MODE
:
760 mhi_pm_mission_mode_transition(mhi_cntrl
);
762 case DEV_ST_TRANSITION_READY
:
763 mhi_ready_state_transition(mhi_cntrl
);
765 case DEV_ST_TRANSITION_SYS_ERR
:
766 mhi_pm_sys_error_transition(mhi_cntrl
);
768 case DEV_ST_TRANSITION_DISABLE
:
769 mhi_pm_disable_transition(mhi_cntrl
);
778 int mhi_pm_suspend(struct mhi_controller
*mhi_cntrl
)
780 struct mhi_chan
*itr
, *tmp
;
781 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
782 enum mhi_pm_state new_state
;
785 if (mhi_cntrl
->pm_state
== MHI_PM_DISABLE
)
788 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
))
791 /* Return busy if there are any pending resources */
792 if (atomic_read(&mhi_cntrl
->dev_wake
) ||
793 atomic_read(&mhi_cntrl
->pending_pkts
))
796 /* Take MHI out of M2 state */
797 read_lock_bh(&mhi_cntrl
->pm_lock
);
798 mhi_cntrl
->wake_get(mhi_cntrl
, false);
799 read_unlock_bh(&mhi_cntrl
->pm_lock
);
801 ret
= wait_event_timeout(mhi_cntrl
->state_event
,
802 mhi_cntrl
->dev_state
== MHI_STATE_M0
||
803 mhi_cntrl
->dev_state
== MHI_STATE_M1
||
804 MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
),
805 msecs_to_jiffies(mhi_cntrl
->timeout_ms
));
807 read_lock_bh(&mhi_cntrl
->pm_lock
);
808 mhi_cntrl
->wake_put(mhi_cntrl
, false);
809 read_unlock_bh(&mhi_cntrl
->pm_lock
);
811 if (!ret
|| MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
)) {
813 "Could not enter M0/M1 state");
817 write_lock_irq(&mhi_cntrl
->pm_lock
);
819 if (atomic_read(&mhi_cntrl
->dev_wake
) ||
820 atomic_read(&mhi_cntrl
->pending_pkts
)) {
821 write_unlock_irq(&mhi_cntrl
->pm_lock
);
825 dev_info(dev
, "Allowing M3 transition\n");
826 new_state
= mhi_tryset_pm_state(mhi_cntrl
, MHI_PM_M3_ENTER
);
827 if (new_state
!= MHI_PM_M3_ENTER
) {
828 write_unlock_irq(&mhi_cntrl
->pm_lock
);
830 "Error setting to PM state: %s from: %s\n",
831 to_mhi_pm_state_str(MHI_PM_M3_ENTER
),
832 to_mhi_pm_state_str(mhi_cntrl
->pm_state
));
836 /* Set MHI to M3 and wait for completion */
837 mhi_set_mhi_state(mhi_cntrl
, MHI_STATE_M3
);
838 write_unlock_irq(&mhi_cntrl
->pm_lock
);
839 dev_info(dev
, "Wait for M3 completion\n");
841 ret
= wait_event_timeout(mhi_cntrl
->state_event
,
842 mhi_cntrl
->dev_state
== MHI_STATE_M3
||
843 MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
),
844 msecs_to_jiffies(mhi_cntrl
->timeout_ms
));
846 if (!ret
|| MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
)) {
848 "Did not enter M3 state, MHI state: %s, PM state: %s\n",
849 TO_MHI_STATE_STR(mhi_cntrl
->dev_state
),
850 to_mhi_pm_state_str(mhi_cntrl
->pm_state
));
854 /* Notify clients about entering LPM */
855 list_for_each_entry_safe(itr
, tmp
, &mhi_cntrl
->lpm_chans
, node
) {
856 mutex_lock(&itr
->mutex
);
858 mhi_notify(itr
->mhi_dev
, MHI_CB_LPM_ENTER
);
859 mutex_unlock(&itr
->mutex
);
864 EXPORT_SYMBOL_GPL(mhi_pm_suspend
);
866 int mhi_pm_resume(struct mhi_controller
*mhi_cntrl
)
868 struct mhi_chan
*itr
, *tmp
;
869 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
870 enum mhi_pm_state cur_state
;
873 dev_info(dev
, "Entered with PM state: %s, MHI state: %s\n",
874 to_mhi_pm_state_str(mhi_cntrl
->pm_state
),
875 TO_MHI_STATE_STR(mhi_cntrl
->dev_state
));
877 if (mhi_cntrl
->pm_state
== MHI_PM_DISABLE
)
880 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
))
883 /* Notify clients about exiting LPM */
884 list_for_each_entry_safe(itr
, tmp
, &mhi_cntrl
->lpm_chans
, node
) {
885 mutex_lock(&itr
->mutex
);
887 mhi_notify(itr
->mhi_dev
, MHI_CB_LPM_EXIT
);
888 mutex_unlock(&itr
->mutex
);
891 write_lock_irq(&mhi_cntrl
->pm_lock
);
892 cur_state
= mhi_tryset_pm_state(mhi_cntrl
, MHI_PM_M3_EXIT
);
893 if (cur_state
!= MHI_PM_M3_EXIT
) {
894 write_unlock_irq(&mhi_cntrl
->pm_lock
);
896 "Error setting to PM state: %s from: %s\n",
897 to_mhi_pm_state_str(MHI_PM_M3_EXIT
),
898 to_mhi_pm_state_str(mhi_cntrl
->pm_state
));
902 /* Set MHI to M0 and wait for completion */
903 mhi_set_mhi_state(mhi_cntrl
, MHI_STATE_M0
);
904 write_unlock_irq(&mhi_cntrl
->pm_lock
);
906 ret
= wait_event_timeout(mhi_cntrl
->state_event
,
907 mhi_cntrl
->dev_state
== MHI_STATE_M0
||
908 MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
),
909 msecs_to_jiffies(mhi_cntrl
->timeout_ms
));
911 if (!ret
|| MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
)) {
913 "Did not enter M0 state, MHI state: %s, PM state: %s\n",
914 TO_MHI_STATE_STR(mhi_cntrl
->dev_state
),
915 to_mhi_pm_state_str(mhi_cntrl
->pm_state
));
921 EXPORT_SYMBOL_GPL(mhi_pm_resume
);
923 int __mhi_device_get_sync(struct mhi_controller
*mhi_cntrl
)
927 /* Wake up the device */
928 read_lock_bh(&mhi_cntrl
->pm_lock
);
929 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
)) {
930 read_unlock_bh(&mhi_cntrl
->pm_lock
);
933 mhi_cntrl
->wake_get(mhi_cntrl
, true);
934 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl
->pm_state
))
935 mhi_trigger_resume(mhi_cntrl
);
936 read_unlock_bh(&mhi_cntrl
->pm_lock
);
938 ret
= wait_event_timeout(mhi_cntrl
->state_event
,
939 mhi_cntrl
->pm_state
== MHI_PM_M0
||
940 MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
),
941 msecs_to_jiffies(mhi_cntrl
->timeout_ms
));
943 if (!ret
|| MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
)) {
944 read_lock_bh(&mhi_cntrl
->pm_lock
);
945 mhi_cntrl
->wake_put(mhi_cntrl
, false);
946 read_unlock_bh(&mhi_cntrl
->pm_lock
);
953 /* Assert device wake db */
954 static void mhi_assert_dev_wake(struct mhi_controller
*mhi_cntrl
, bool force
)
959 * If force flag is set, then increment the wake count value and
962 if (unlikely(force
)) {
963 spin_lock_irqsave(&mhi_cntrl
->wlock
, flags
);
964 atomic_inc(&mhi_cntrl
->dev_wake
);
965 if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl
->pm_state
) &&
966 !mhi_cntrl
->wake_set
) {
967 mhi_write_db(mhi_cntrl
, mhi_cntrl
->wake_db
, 1);
968 mhi_cntrl
->wake_set
= true;
970 spin_unlock_irqrestore(&mhi_cntrl
->wlock
, flags
);
973 * If resources are already requested, then just increment
974 * the wake count value and return
976 if (likely(atomic_add_unless(&mhi_cntrl
->dev_wake
, 1, 0)))
979 spin_lock_irqsave(&mhi_cntrl
->wlock
, flags
);
980 if ((atomic_inc_return(&mhi_cntrl
->dev_wake
) == 1) &&
981 MHI_WAKE_DB_SET_VALID(mhi_cntrl
->pm_state
) &&
982 !mhi_cntrl
->wake_set
) {
983 mhi_write_db(mhi_cntrl
, mhi_cntrl
->wake_db
, 1);
984 mhi_cntrl
->wake_set
= true;
986 spin_unlock_irqrestore(&mhi_cntrl
->wlock
, flags
);
990 /* De-assert device wake db */
991 static void mhi_deassert_dev_wake(struct mhi_controller
*mhi_cntrl
,
997 * Only continue if there is a single resource, else just decrement
1000 if (likely(atomic_add_unless(&mhi_cntrl
->dev_wake
, -1, 1)))
1003 spin_lock_irqsave(&mhi_cntrl
->wlock
, flags
);
1004 if ((atomic_dec_return(&mhi_cntrl
->dev_wake
) == 0) &&
1005 MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl
->pm_state
) && !override
&&
1006 mhi_cntrl
->wake_set
) {
1007 mhi_write_db(mhi_cntrl
, mhi_cntrl
->wake_db
, 0);
1008 mhi_cntrl
->wake_set
= false;
1010 spin_unlock_irqrestore(&mhi_cntrl
->wlock
, flags
);
1013 int mhi_async_power_up(struct mhi_controller
*mhi_cntrl
)
1015 enum mhi_state state
;
1016 enum mhi_ee_type current_ee
;
1017 enum dev_st_transition next_state
;
1018 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1022 dev_info(dev
, "Requested to power ON\n");
1024 /* Supply default wake routines if not provided by controller driver */
1025 if (!mhi_cntrl
->wake_get
|| !mhi_cntrl
->wake_put
||
1026 !mhi_cntrl
->wake_toggle
) {
1027 mhi_cntrl
->wake_get
= mhi_assert_dev_wake
;
1028 mhi_cntrl
->wake_put
= mhi_deassert_dev_wake
;
1029 mhi_cntrl
->wake_toggle
= (mhi_cntrl
->db_access
& MHI_PM_M2
) ?
1030 mhi_toggle_dev_wake_nop
: mhi_toggle_dev_wake
;
1033 mutex_lock(&mhi_cntrl
->pm_mutex
);
1034 mhi_cntrl
->pm_state
= MHI_PM_DISABLE
;
1036 if (!mhi_cntrl
->pre_init
) {
1037 /* Setup device context */
1038 ret
= mhi_init_dev_ctxt(mhi_cntrl
);
1040 goto error_dev_ctxt
;
1043 ret
= mhi_init_irq_setup(mhi_cntrl
);
1045 goto error_setup_irq
;
1047 /* Setup BHI offset & INTVEC */
1048 write_lock_irq(&mhi_cntrl
->pm_lock
);
1049 ret
= mhi_read_reg(mhi_cntrl
, mhi_cntrl
->regs
, BHIOFF
, &val
);
1051 write_unlock_irq(&mhi_cntrl
->pm_lock
);
1052 goto error_bhi_offset
;
1055 mhi_cntrl
->bhi
= mhi_cntrl
->regs
+ val
;
1057 /* Setup BHIE offset */
1058 if (mhi_cntrl
->fbc_download
) {
1059 ret
= mhi_read_reg(mhi_cntrl
, mhi_cntrl
->regs
, BHIEOFF
, &val
);
1061 write_unlock_irq(&mhi_cntrl
->pm_lock
);
1062 dev_err(dev
, "Error reading BHIE offset\n");
1063 goto error_bhi_offset
;
1066 mhi_cntrl
->bhie
= mhi_cntrl
->regs
+ val
;
1069 mhi_write_reg(mhi_cntrl
, mhi_cntrl
->bhi
, BHI_INTVEC
, 0);
1070 mhi_cntrl
->pm_state
= MHI_PM_POR
;
1071 mhi_cntrl
->ee
= MHI_EE_MAX
;
1072 current_ee
= mhi_get_exec_env(mhi_cntrl
);
1073 write_unlock_irq(&mhi_cntrl
->pm_lock
);
1075 /* Confirm that the device is in valid exec env */
1076 if (!MHI_IN_PBL(current_ee
) && current_ee
!= MHI_EE_AMSS
) {
1077 dev_err(dev
, "Not a valid EE for power on\n");
1079 goto error_bhi_offset
;
1082 state
= mhi_get_mhi_state(mhi_cntrl
);
1083 if (state
== MHI_STATE_SYS_ERR
) {
1084 mhi_set_mhi_state(mhi_cntrl
, MHI_STATE_RESET
);
1085 ret
= wait_event_timeout(mhi_cntrl
->state_event
,
1086 MHI_PM_IN_FATAL_STATE(mhi_cntrl
->pm_state
) ||
1087 mhi_read_reg_field(mhi_cntrl
,
1091 MHICTRL_RESET_SHIFT
,
1094 msecs_to_jiffies(mhi_cntrl
->timeout_ms
));
1097 dev_info(dev
, "Failed to reset MHI due to syserr state\n");
1098 goto error_bhi_offset
;
1102 * device cleares INTVEC as part of RESET processing,
1105 mhi_write_reg(mhi_cntrl
, mhi_cntrl
->bhi
, BHI_INTVEC
, 0);
1108 /* Transition to next state */
1109 next_state
= MHI_IN_PBL(current_ee
) ?
1110 DEV_ST_TRANSITION_PBL
: DEV_ST_TRANSITION_READY
;
1112 mhi_queue_state_transition(mhi_cntrl
, next_state
);
1114 mutex_unlock(&mhi_cntrl
->pm_mutex
);
1116 dev_info(dev
, "Power on setup success\n");
1121 mhi_deinit_free_irq(mhi_cntrl
);
1124 if (!mhi_cntrl
->pre_init
)
1125 mhi_deinit_dev_ctxt(mhi_cntrl
);
1128 mutex_unlock(&mhi_cntrl
->pm_mutex
);
1132 EXPORT_SYMBOL_GPL(mhi_async_power_up
);
1134 void mhi_power_down(struct mhi_controller
*mhi_cntrl
, bool graceful
)
1136 enum mhi_pm_state cur_state
, transition_state
;
1137 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1139 /* If it's not a graceful shutdown, force MHI to linkdown state */
1140 transition_state
= (graceful
) ? MHI_PM_SHUTDOWN_PROCESS
:
1141 MHI_PM_LD_ERR_FATAL_DETECT
;
1143 mutex_lock(&mhi_cntrl
->pm_mutex
);
1144 write_lock_irq(&mhi_cntrl
->pm_lock
);
1145 cur_state
= mhi_tryset_pm_state(mhi_cntrl
, transition_state
);
1146 if (cur_state
!= transition_state
) {
1147 dev_err(dev
, "Failed to move to state: %s from: %s\n",
1148 to_mhi_pm_state_str(transition_state
),
1149 to_mhi_pm_state_str(mhi_cntrl
->pm_state
));
1150 /* Force link down or error fatal detected state */
1151 mhi_cntrl
->pm_state
= MHI_PM_LD_ERR_FATAL_DETECT
;
1154 /* mark device inactive to avoid any further host processing */
1155 mhi_cntrl
->ee
= MHI_EE_DISABLE_TRANSITION
;
1156 mhi_cntrl
->dev_state
= MHI_STATE_RESET
;
1158 wake_up_all(&mhi_cntrl
->state_event
);
1160 write_unlock_irq(&mhi_cntrl
->pm_lock
);
1161 mutex_unlock(&mhi_cntrl
->pm_mutex
);
1163 mhi_queue_state_transition(mhi_cntrl
, DEV_ST_TRANSITION_DISABLE
);
1165 /* Wait for shutdown to complete */
1166 flush_work(&mhi_cntrl
->st_worker
);
1168 free_irq(mhi_cntrl
->irq
[0], mhi_cntrl
);
1170 if (!mhi_cntrl
->pre_init
) {
1171 /* Free all allocated resources */
1172 if (mhi_cntrl
->fbc_image
) {
1173 mhi_free_bhie_table(mhi_cntrl
, mhi_cntrl
->fbc_image
);
1174 mhi_cntrl
->fbc_image
= NULL
;
1176 mhi_deinit_dev_ctxt(mhi_cntrl
);
1179 EXPORT_SYMBOL_GPL(mhi_power_down
);
1181 int mhi_sync_power_up(struct mhi_controller
*mhi_cntrl
)
1183 int ret
= mhi_async_power_up(mhi_cntrl
);
1188 wait_event_timeout(mhi_cntrl
->state_event
,
1189 MHI_IN_MISSION_MODE(mhi_cntrl
->ee
) ||
1190 MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
),
1191 msecs_to_jiffies(mhi_cntrl
->timeout_ms
));
1193 ret
= (MHI_IN_MISSION_MODE(mhi_cntrl
->ee
)) ? 0 : -ETIMEDOUT
;
1195 mhi_power_down(mhi_cntrl
, false);
1199 EXPORT_SYMBOL(mhi_sync_power_up
);
1201 int mhi_force_rddm_mode(struct mhi_controller
*mhi_cntrl
)
1203 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1206 /* Check if device is already in RDDM */
1207 if (mhi_cntrl
->ee
== MHI_EE_RDDM
)
1210 dev_dbg(dev
, "Triggering SYS_ERR to force RDDM state\n");
1211 mhi_set_mhi_state(mhi_cntrl
, MHI_STATE_SYS_ERR
);
1213 /* Wait for RDDM event */
1214 ret
= wait_event_timeout(mhi_cntrl
->state_event
,
1215 mhi_cntrl
->ee
== MHI_EE_RDDM
,
1216 msecs_to_jiffies(mhi_cntrl
->timeout_ms
));
1217 ret
= ret
? 0 : -EIO
;
1221 EXPORT_SYMBOL_GPL(mhi_force_rddm_mode
);
1223 void mhi_device_get(struct mhi_device
*mhi_dev
)
1225 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1227 mhi_dev
->dev_wake
++;
1228 read_lock_bh(&mhi_cntrl
->pm_lock
);
1229 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl
->pm_state
))
1230 mhi_trigger_resume(mhi_cntrl
);
1232 mhi_cntrl
->wake_get(mhi_cntrl
, true);
1233 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1235 EXPORT_SYMBOL_GPL(mhi_device_get
);
1237 int mhi_device_get_sync(struct mhi_device
*mhi_dev
)
1239 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1242 ret
= __mhi_device_get_sync(mhi_cntrl
);
1244 mhi_dev
->dev_wake
++;
1248 EXPORT_SYMBOL_GPL(mhi_device_get_sync
);
1250 void mhi_device_put(struct mhi_device
*mhi_dev
)
1252 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1254 mhi_dev
->dev_wake
--;
1255 read_lock_bh(&mhi_cntrl
->pm_lock
);
1256 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl
->pm_state
))
1257 mhi_trigger_resume(mhi_cntrl
);
1259 mhi_cntrl
->wake_put(mhi_cntrl
, false);
1260 read_unlock_bh(&mhi_cntrl
->pm_lock
);
1262 EXPORT_SYMBOL_GPL(mhi_device_put
);