1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
10 #include "../common.h"
12 extern const struct bus_type mhi_bus_type
;
14 /* Host request register */
15 #define MHI_SOC_RESET_REQ_OFFSET 0xb0
16 #define MHI_SOC_RESET_REQ BIT(0)
19 struct mhi_event_ctxt
*er_ctxt
;
20 struct mhi_chan_ctxt
*chan_ctxt
;
21 struct mhi_cmd_ctxt
*cmd_ctxt
;
22 dma_addr_t er_ctxt_addr
;
23 dma_addr_t chan_ctxt_addr
;
24 dma_addr_t cmd_ctxt_addr
;
27 struct bhi_vec_entry
{
32 enum mhi_fw_load_type
{
33 MHI_FW_LOAD_BHI
, /* BHI only in PBL */
34 MHI_FW_LOAD_BHIE
, /* BHIe only in PBL */
35 MHI_FW_LOAD_FBC
, /* BHI in PBL followed by BHIe in SBL */
39 enum mhi_ch_state_type
{
40 MHI_CH_STATE_TYPE_RESET
,
41 MHI_CH_STATE_TYPE_STOP
,
42 MHI_CH_STATE_TYPE_START
,
43 MHI_CH_STATE_TYPE_MAX
,
46 #define MHI_CH_STATE_TYPE_LIST \
47 ch_state_type(RESET, "RESET") \
48 ch_state_type(STOP, "STOP") \
49 ch_state_type_end(START, "START")
51 extern const char * const mhi_ch_state_type_str
[MHI_CH_STATE_TYPE_MAX
];
52 #define TO_CH_STATE_TYPE_STR(state) (((state) >= MHI_CH_STATE_TYPE_MAX) ? \
54 mhi_ch_state_type_str[(state)])
56 #define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \
57 mode != MHI_DB_BRST_ENABLE)
60 mhi_ee(PBL, "PRIMARY BOOTLOADER") \
61 mhi_ee(SBL, "SECONDARY BOOTLOADER") \
62 mhi_ee(AMSS, "MISSION MODE") \
63 mhi_ee(RDDM, "RAMDUMP DOWNLOAD MODE")\
64 mhi_ee(WFW, "WLAN FIRMWARE") \
65 mhi_ee(PTHRU, "PASS THROUGH") \
66 mhi_ee(EDL, "EMERGENCY DOWNLOAD") \
67 mhi_ee(FP, "FLASH PROGRAMMER") \
68 mhi_ee(DISABLE_TRANSITION, "DISABLE") \
69 mhi_ee_end(NOT_SUPPORTED, "NOT SUPPORTED")
71 extern const char * const mhi_ee_str
[MHI_EE_MAX
];
72 #define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \
73 "INVALID_EE" : mhi_ee_str[ee])
75 #define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \
77 #define MHI_POWER_UP_CAPABLE(ee) (MHI_IN_PBL(ee) || ee == MHI_EE_AMSS)
78 #define MHI_FW_LOAD_CAPABLE(ee) (ee == MHI_EE_PBL || ee == MHI_EE_EDL)
79 #define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW || \
82 enum dev_st_transition
{
83 DEV_ST_TRANSITION_PBL
,
84 DEV_ST_TRANSITION_READY
,
85 DEV_ST_TRANSITION_SBL
,
86 DEV_ST_TRANSITION_MISSION_MODE
,
88 DEV_ST_TRANSITION_SYS_ERR
,
89 DEV_ST_TRANSITION_DISABLE
,
90 DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE
,
91 DEV_ST_TRANSITION_MAX
,
94 #define DEV_ST_TRANSITION_LIST \
95 dev_st_trans(PBL, "PBL") \
96 dev_st_trans(READY, "READY") \
97 dev_st_trans(SBL, "SBL") \
98 dev_st_trans(MISSION_MODE, "MISSION MODE") \
99 dev_st_trans(FP, "FLASH PROGRAMMER") \
100 dev_st_trans(SYS_ERR, "SYS ERROR") \
101 dev_st_trans(DISABLE, "DISABLE") \
102 dev_st_trans_end(DISABLE_DESTROY_DEVICE, "DISABLE (DESTROY DEVICE)")
104 extern const char * const dev_state_tran_str
[DEV_ST_TRANSITION_MAX
];
105 #define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \
106 "INVALID_STATE" : dev_state_tran_str[state])
108 /* internal power states */
110 MHI_PM_STATE_DISABLE
,
114 MHI_PM_STATE_M3_ENTER
,
116 MHI_PM_STATE_M3_EXIT
,
117 MHI_PM_STATE_FW_DL_ERR
,
118 MHI_PM_STATE_SYS_ERR_DETECT
,
119 MHI_PM_STATE_SYS_ERR_PROCESS
,
120 MHI_PM_STATE_SYS_ERR_FAIL
,
121 MHI_PM_STATE_SHUTDOWN_PROCESS
,
122 MHI_PM_STATE_LD_ERR_FATAL_DETECT
,
126 #define MHI_PM_STATE_LIST \
127 mhi_pm_state(DISABLE, "DISABLE") \
128 mhi_pm_state(POR, "POWER ON RESET") \
129 mhi_pm_state(M0, "M0") \
130 mhi_pm_state(M2, "M2") \
131 mhi_pm_state(M3_ENTER, "M?->M3") \
132 mhi_pm_state(M3, "M3") \
133 mhi_pm_state(M3_EXIT, "M3->M0") \
134 mhi_pm_state(FW_DL_ERR, "Firmware Download Error") \
135 mhi_pm_state(SYS_ERR_DETECT, "SYS ERROR Detect") \
136 mhi_pm_state(SYS_ERR_PROCESS, "SYS ERROR Process") \
137 mhi_pm_state(SYS_ERR_FAIL, "SYS ERROR Failure") \
138 mhi_pm_state(SHUTDOWN_PROCESS, "SHUTDOWN Process") \
139 mhi_pm_state_end(LD_ERR_FATAL_DETECT, "Linkdown or Error Fatal Detect")
141 #define MHI_PM_DISABLE BIT(0)
142 #define MHI_PM_POR BIT(1)
143 #define MHI_PM_M0 BIT(2)
144 #define MHI_PM_M2 BIT(3)
145 #define MHI_PM_M3_ENTER BIT(4)
146 #define MHI_PM_M3 BIT(5)
147 #define MHI_PM_M3_EXIT BIT(6)
148 /* firmware download failure state */
149 #define MHI_PM_FW_DL_ERR BIT(7)
150 #define MHI_PM_SYS_ERR_DETECT BIT(8)
151 #define MHI_PM_SYS_ERR_PROCESS BIT(9)
152 #define MHI_PM_SYS_ERR_FAIL BIT(10)
153 #define MHI_PM_SHUTDOWN_PROCESS BIT(11)
154 /* link not accessible */
155 #define MHI_PM_LD_ERR_FATAL_DETECT BIT(12)
157 #define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
158 MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
159 MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
160 MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS | \
162 #define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
163 #define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
164 #define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & mhi_cntrl->db_access)
165 #define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
166 MHI_PM_M2 | MHI_PM_M3_EXIT))
167 #define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2)
168 #define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state)
169 #define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \
170 MHI_PM_IN_ERROR_STATE(pm_state))
171 #define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \
172 (MHI_PM_M3_ENTER | MHI_PM_M3))
174 #define NR_OF_CMD_RINGS 1
175 #define CMD_EL_PER_RING 128
176 #define PRIMARY_CMD_RING 0
177 #define MHI_DEV_WAKE_DB 127
178 #define MHI_MAX_MTU 0xffff
179 #define MHI_RANDOM_U32_NONZERO(bmsk) (get_random_u32_inclusive(1, bmsk))
182 MHI_ER_TYPE_INVALID
= 0x0,
183 MHI_ER_TYPE_VALID
= 0x1,
190 enum mhi_db_brst_mode brstmode
;
192 void (*process_db
)(struct mhi_controller
*mhi_cntrl
,
193 struct db_cfg
*db_cfg
, void __iomem
*io_addr
,
197 struct mhi_pm_transitions
{
198 enum mhi_pm_state from_state
;
202 struct state_transition
{
203 struct list_head node
;
204 enum dev_st_transition state
;
208 dma_addr_t dma_handle
;
209 dma_addr_t iommu_base
;
210 __le64
*ctxt_wp
; /* point to ctxt wp */
219 void __iomem
*db_addr
;
223 struct mhi_ring ring
;
227 struct mhi_buf_info
{
234 enum dma_data_direction dir
;
235 bool used
; /* Indicates whether the buffer is used or not */
236 bool pre_mapped
; /* Already pre-mapped by client */
240 struct mhi_controller
*mhi_cntrl
;
241 struct mhi_chan
*mhi_chan
; /* dedicated to channel */
245 int chan
; /* this event ring is dedicated to a channel (optional) */
247 enum mhi_er_data_type data_type
;
248 struct mhi_ring ring
;
249 struct db_cfg db_cfg
;
250 struct tasklet_struct task
;
252 int (*process_event
)(struct mhi_controller
*mhi_cntrl
,
253 struct mhi_event
*mhi_event
,
257 bool offload_ev
; /* managed by a device driver */
263 * Important: When consuming, increment tre_ring first and when
264 * releasing, decrement buf_ring first. If tre_ring has space, buf_ring
265 * is guaranteed to have space so we do not need to check both rings.
267 struct mhi_ring buf_ring
;
268 struct mhi_ring tre_ring
;
272 enum mhi_ch_type type
;
273 enum dma_data_direction dir
;
274 struct db_cfg db_cfg
;
275 enum mhi_ch_ee_mask ee_mask
;
276 enum mhi_ch_state ch_state
;
278 struct mhi_device
*mhi_dev
;
279 void (*xfer_cb
)(struct mhi_device
*mhi_dev
, struct mhi_result
*result
);
281 struct completion completion
;
283 struct list_head node
;
291 /* Default MHI timeout */
292 #define MHI_TIMEOUT_MS (1000)
294 /* debugfs related functions */
295 #ifdef CONFIG_MHI_BUS_DEBUG
296 void mhi_create_debugfs(struct mhi_controller
*mhi_cntrl
);
297 void mhi_destroy_debugfs(struct mhi_controller
*mhi_cntrl
);
298 void mhi_debugfs_init(void);
299 void mhi_debugfs_exit(void);
301 static inline void mhi_create_debugfs(struct mhi_controller
*mhi_cntrl
)
305 static inline void mhi_destroy_debugfs(struct mhi_controller
*mhi_cntrl
)
309 static inline void mhi_debugfs_init(void)
313 static inline void mhi_debugfs_exit(void)
318 struct mhi_device
*mhi_alloc_device(struct mhi_controller
*mhi_cntrl
);
320 int mhi_destroy_device(struct device
*dev
, void *data
);
321 void mhi_create_devices(struct mhi_controller
*mhi_cntrl
);
323 int mhi_alloc_bhie_table(struct mhi_controller
*mhi_cntrl
,
324 struct image_info
**image_info
, size_t alloc_size
);
325 void mhi_free_bhie_table(struct mhi_controller
*mhi_cntrl
,
326 struct image_info
*image_info
);
328 /* Power management APIs */
329 enum mhi_pm_state __must_check
mhi_tryset_pm_state(
330 struct mhi_controller
*mhi_cntrl
,
331 enum mhi_pm_state state
);
332 const char *to_mhi_pm_state_str(u32 state
);
333 int mhi_queue_state_transition(struct mhi_controller
*mhi_cntrl
,
334 enum dev_st_transition state
);
335 void mhi_pm_st_worker(struct work_struct
*work
);
336 void mhi_pm_sys_err_handler(struct mhi_controller
*mhi_cntrl
);
337 int mhi_ready_state_transition(struct mhi_controller
*mhi_cntrl
);
338 int mhi_pm_m0_transition(struct mhi_controller
*mhi_cntrl
);
339 void mhi_pm_m1_transition(struct mhi_controller
*mhi_cntrl
);
340 int mhi_pm_m3_transition(struct mhi_controller
*mhi_cntrl
);
341 int __mhi_device_get_sync(struct mhi_controller
*mhi_cntrl
);
342 int mhi_send_cmd(struct mhi_controller
*mhi_cntrl
, struct mhi_chan
*mhi_chan
,
343 enum mhi_cmd_type cmd
);
344 int mhi_download_amss_image(struct mhi_controller
*mhi_cntrl
);
345 static inline bool mhi_is_active(struct mhi_controller
*mhi_cntrl
)
347 return (mhi_cntrl
->dev_state
>= MHI_STATE_M0
&&
348 mhi_cntrl
->dev_state
<= MHI_STATE_M3_FAST
);
351 static inline void mhi_trigger_resume(struct mhi_controller
*mhi_cntrl
)
353 pm_wakeup_event(&mhi_cntrl
->mhi_dev
->dev
, 0);
354 mhi_cntrl
->runtime_get(mhi_cntrl
);
355 mhi_cntrl
->runtime_put(mhi_cntrl
);
358 /* Register access methods */
359 void mhi_db_brstmode(struct mhi_controller
*mhi_cntrl
, struct db_cfg
*db_cfg
,
360 void __iomem
*db_addr
, dma_addr_t db_val
);
361 void mhi_db_brstmode_disable(struct mhi_controller
*mhi_cntrl
,
362 struct db_cfg
*db_mode
, void __iomem
*db_addr
,
364 int __must_check
mhi_read_reg(struct mhi_controller
*mhi_cntrl
,
365 void __iomem
*base
, u32 offset
, u32
*out
);
366 int __must_check
mhi_read_reg_field(struct mhi_controller
*mhi_cntrl
,
367 void __iomem
*base
, u32 offset
, u32 mask
,
369 int __must_check
mhi_poll_reg_field(struct mhi_controller
*mhi_cntrl
,
370 void __iomem
*base
, u32 offset
, u32 mask
,
371 u32 val
, u32 delayus
, u32 timeout_ms
);
372 void mhi_write_reg(struct mhi_controller
*mhi_cntrl
, void __iomem
*base
,
373 u32 offset
, u32 val
);
374 int __must_check
mhi_write_reg_field(struct mhi_controller
*mhi_cntrl
,
375 void __iomem
*base
, u32 offset
, u32 mask
,
377 void mhi_ring_er_db(struct mhi_event
*mhi_event
);
378 void mhi_write_db(struct mhi_controller
*mhi_cntrl
, void __iomem
*db_addr
,
380 void mhi_ring_cmd_db(struct mhi_controller
*mhi_cntrl
, struct mhi_cmd
*mhi_cmd
);
381 void mhi_ring_chan_db(struct mhi_controller
*mhi_cntrl
,
382 struct mhi_chan
*mhi_chan
);
384 /* Initialization methods */
385 int mhi_init_mmio(struct mhi_controller
*mhi_cntrl
);
386 int mhi_init_dev_ctxt(struct mhi_controller
*mhi_cntrl
);
387 void mhi_deinit_dev_ctxt(struct mhi_controller
*mhi_cntrl
);
388 int mhi_init_irq_setup(struct mhi_controller
*mhi_cntrl
);
389 void mhi_deinit_free_irq(struct mhi_controller
*mhi_cntrl
);
390 int mhi_rddm_prepare(struct mhi_controller
*mhi_cntrl
,
391 struct image_info
*img_info
);
392 void mhi_fw_load_handler(struct mhi_controller
*mhi_cntrl
);
394 /* Automatically allocate and queue inbound buffers */
395 #define MHI_CH_INBOUND_ALLOC_BUFS BIT(0)
396 int mhi_prepare_channel(struct mhi_controller
*mhi_cntrl
,
397 struct mhi_chan
*mhi_chan
, unsigned int flags
);
399 int mhi_init_chan_ctxt(struct mhi_controller
*mhi_cntrl
,
400 struct mhi_chan
*mhi_chan
);
401 void mhi_deinit_chan_ctxt(struct mhi_controller
*mhi_cntrl
,
402 struct mhi_chan
*mhi_chan
);
403 void mhi_reset_chan(struct mhi_controller
*mhi_cntrl
,
404 struct mhi_chan
*mhi_chan
);
406 /* Event processing methods */
407 void mhi_ctrl_ev_task(unsigned long data
);
408 void mhi_ev_task(unsigned long data
);
409 int mhi_process_data_event_ring(struct mhi_controller
*mhi_cntrl
,
410 struct mhi_event
*mhi_event
, u32 event_quota
);
411 int mhi_process_ctrl_ev_ring(struct mhi_controller
*mhi_cntrl
,
412 struct mhi_event
*mhi_event
, u32 event_quota
);
415 irqreturn_t
mhi_irq_handler(int irq_number
, void *dev
);
416 irqreturn_t
mhi_intvec_threaded_handler(int irq_number
, void *dev
);
417 irqreturn_t
mhi_intvec_handler(int irq_number
, void *dev
);
419 int mhi_gen_tre(struct mhi_controller
*mhi_cntrl
, struct mhi_chan
*mhi_chan
,
420 struct mhi_buf_info
*info
, enum mhi_flags flags
);
421 int mhi_map_single_no_bb(struct mhi_controller
*mhi_cntrl
,
422 struct mhi_buf_info
*buf_info
);
423 int mhi_map_single_use_bb(struct mhi_controller
*mhi_cntrl
,
424 struct mhi_buf_info
*buf_info
);
425 void mhi_unmap_single_no_bb(struct mhi_controller
*mhi_cntrl
,
426 struct mhi_buf_info
*buf_info
);
427 void mhi_unmap_single_use_bb(struct mhi_controller
*mhi_cntrl
,
428 struct mhi_buf_info
*buf_info
);
430 #endif /* _MHI_INT_H */