Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / bus / mhi / core / internal.h
blob6f80ec30c0cdcc342ad0faa128bda13e13b96473
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
5 */
7 #ifndef _MHI_INT_H
8 #define _MHI_INT_H
10 #include <linux/mhi.h>
12 extern struct bus_type mhi_bus_type;
14 #define MHIREGLEN (0x0)
15 #define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF)
16 #define MHIREGLEN_MHIREGLEN_SHIFT (0)
18 #define MHIVER (0x8)
19 #define MHIVER_MHIVER_MASK (0xFFFFFFFF)
20 #define MHIVER_MHIVER_SHIFT (0)
22 #define MHICFG (0x10)
23 #define MHICFG_NHWER_MASK (0xFF000000)
24 #define MHICFG_NHWER_SHIFT (24)
25 #define MHICFG_NER_MASK (0xFF0000)
26 #define MHICFG_NER_SHIFT (16)
27 #define MHICFG_NHWCH_MASK (0xFF00)
28 #define MHICFG_NHWCH_SHIFT (8)
29 #define MHICFG_NCH_MASK (0xFF)
30 #define MHICFG_NCH_SHIFT (0)
32 #define CHDBOFF (0x18)
33 #define CHDBOFF_CHDBOFF_MASK (0xFFFFFFFF)
34 #define CHDBOFF_CHDBOFF_SHIFT (0)
36 #define ERDBOFF (0x20)
37 #define ERDBOFF_ERDBOFF_MASK (0xFFFFFFFF)
38 #define ERDBOFF_ERDBOFF_SHIFT (0)
40 #define BHIOFF (0x28)
41 #define BHIOFF_BHIOFF_MASK (0xFFFFFFFF)
42 #define BHIOFF_BHIOFF_SHIFT (0)
44 #define BHIEOFF (0x2C)
45 #define BHIEOFF_BHIEOFF_MASK (0xFFFFFFFF)
46 #define BHIEOFF_BHIEOFF_SHIFT (0)
48 #define DEBUGOFF (0x30)
49 #define DEBUGOFF_DEBUGOFF_MASK (0xFFFFFFFF)
50 #define DEBUGOFF_DEBUGOFF_SHIFT (0)
52 #define MHICTRL (0x38)
53 #define MHICTRL_MHISTATE_MASK (0x0000FF00)
54 #define MHICTRL_MHISTATE_SHIFT (8)
55 #define MHICTRL_RESET_MASK (0x2)
56 #define MHICTRL_RESET_SHIFT (1)
58 #define MHISTATUS (0x48)
59 #define MHISTATUS_MHISTATE_MASK (0x0000FF00)
60 #define MHISTATUS_MHISTATE_SHIFT (8)
61 #define MHISTATUS_SYSERR_MASK (0x4)
62 #define MHISTATUS_SYSERR_SHIFT (2)
63 #define MHISTATUS_READY_MASK (0x1)
64 #define MHISTATUS_READY_SHIFT (0)
66 #define CCABAP_LOWER (0x58)
67 #define CCABAP_LOWER_CCABAP_LOWER_MASK (0xFFFFFFFF)
68 #define CCABAP_LOWER_CCABAP_LOWER_SHIFT (0)
70 #define CCABAP_HIGHER (0x5C)
71 #define CCABAP_HIGHER_CCABAP_HIGHER_MASK (0xFFFFFFFF)
72 #define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT (0)
74 #define ECABAP_LOWER (0x60)
75 #define ECABAP_LOWER_ECABAP_LOWER_MASK (0xFFFFFFFF)
76 #define ECABAP_LOWER_ECABAP_LOWER_SHIFT (0)
78 #define ECABAP_HIGHER (0x64)
79 #define ECABAP_HIGHER_ECABAP_HIGHER_MASK (0xFFFFFFFF)
80 #define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT (0)
82 #define CRCBAP_LOWER (0x68)
83 #define CRCBAP_LOWER_CRCBAP_LOWER_MASK (0xFFFFFFFF)
84 #define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT (0)
86 #define CRCBAP_HIGHER (0x6C)
87 #define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK (0xFFFFFFFF)
88 #define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT (0)
90 #define CRDB_LOWER (0x70)
91 #define CRDB_LOWER_CRDB_LOWER_MASK (0xFFFFFFFF)
92 #define CRDB_LOWER_CRDB_LOWER_SHIFT (0)
94 #define CRDB_HIGHER (0x74)
95 #define CRDB_HIGHER_CRDB_HIGHER_MASK (0xFFFFFFFF)
96 #define CRDB_HIGHER_CRDB_HIGHER_SHIFT (0)
98 #define MHICTRLBASE_LOWER (0x80)
99 #define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK (0xFFFFFFFF)
100 #define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT (0)
102 #define MHICTRLBASE_HIGHER (0x84)
103 #define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK (0xFFFFFFFF)
104 #define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT (0)
106 #define MHICTRLLIMIT_LOWER (0x88)
107 #define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK (0xFFFFFFFF)
108 #define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT (0)
110 #define MHICTRLLIMIT_HIGHER (0x8C)
111 #define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK (0xFFFFFFFF)
112 #define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT (0)
114 #define MHIDATABASE_LOWER (0x98)
115 #define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK (0xFFFFFFFF)
116 #define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT (0)
118 #define MHIDATABASE_HIGHER (0x9C)
119 #define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK (0xFFFFFFFF)
120 #define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT (0)
122 #define MHIDATALIMIT_LOWER (0xA0)
123 #define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK (0xFFFFFFFF)
124 #define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT (0)
126 #define MHIDATALIMIT_HIGHER (0xA4)
127 #define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK (0xFFFFFFFF)
128 #define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT (0)
130 /* Host request register */
131 #define MHI_SOC_RESET_REQ_OFFSET (0xB0)
132 #define MHI_SOC_RESET_REQ BIT(0)
134 /* MHI BHI offfsets */
135 #define BHI_BHIVERSION_MINOR (0x00)
136 #define BHI_BHIVERSION_MAJOR (0x04)
137 #define BHI_IMGADDR_LOW (0x08)
138 #define BHI_IMGADDR_HIGH (0x0C)
139 #define BHI_IMGSIZE (0x10)
140 #define BHI_RSVD1 (0x14)
141 #define BHI_IMGTXDB (0x18)
142 #define BHI_TXDB_SEQNUM_BMSK (0x3FFFFFFF)
143 #define BHI_TXDB_SEQNUM_SHFT (0)
144 #define BHI_RSVD2 (0x1C)
145 #define BHI_INTVEC (0x20)
146 #define BHI_RSVD3 (0x24)
147 #define BHI_EXECENV (0x28)
148 #define BHI_STATUS (0x2C)
149 #define BHI_ERRCODE (0x30)
150 #define BHI_ERRDBG1 (0x34)
151 #define BHI_ERRDBG2 (0x38)
152 #define BHI_ERRDBG3 (0x3C)
153 #define BHI_SERIALNU (0x40)
154 #define BHI_SBLANTIROLLVER (0x44)
155 #define BHI_NUMSEG (0x48)
156 #define BHI_MSMHWID(n) (0x4C + (0x4 * (n)))
157 #define BHI_OEMPKHASH(n) (0x64 + (0x4 * (n)))
158 #define BHI_RSVD5 (0xC4)
159 #define BHI_STATUS_MASK (0xC0000000)
160 #define BHI_STATUS_SHIFT (30)
161 #define BHI_STATUS_ERROR (3)
162 #define BHI_STATUS_SUCCESS (2)
163 #define BHI_STATUS_RESET (0)
165 /* MHI BHIE offsets */
166 #define BHIE_MSMSOCID_OFFS (0x0000)
167 #define BHIE_TXVECADDR_LOW_OFFS (0x002C)
168 #define BHIE_TXVECADDR_HIGH_OFFS (0x0030)
169 #define BHIE_TXVECSIZE_OFFS (0x0034)
170 #define BHIE_TXVECDB_OFFS (0x003C)
171 #define BHIE_TXVECDB_SEQNUM_BMSK (0x3FFFFFFF)
172 #define BHIE_TXVECDB_SEQNUM_SHFT (0)
173 #define BHIE_TXVECSTATUS_OFFS (0x0044)
174 #define BHIE_TXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF)
175 #define BHIE_TXVECSTATUS_SEQNUM_SHFT (0)
176 #define BHIE_TXVECSTATUS_STATUS_BMSK (0xC0000000)
177 #define BHIE_TXVECSTATUS_STATUS_SHFT (30)
178 #define BHIE_TXVECSTATUS_STATUS_RESET (0x00)
179 #define BHIE_TXVECSTATUS_STATUS_XFER_COMPL (0x02)
180 #define BHIE_TXVECSTATUS_STATUS_ERROR (0x03)
181 #define BHIE_RXVECADDR_LOW_OFFS (0x0060)
182 #define BHIE_RXVECADDR_HIGH_OFFS (0x0064)
183 #define BHIE_RXVECSIZE_OFFS (0x0068)
184 #define BHIE_RXVECDB_OFFS (0x0070)
185 #define BHIE_RXVECDB_SEQNUM_BMSK (0x3FFFFFFF)
186 #define BHIE_RXVECDB_SEQNUM_SHFT (0)
187 #define BHIE_RXVECSTATUS_OFFS (0x0078)
188 #define BHIE_RXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF)
189 #define BHIE_RXVECSTATUS_SEQNUM_SHFT (0)
190 #define BHIE_RXVECSTATUS_STATUS_BMSK (0xC0000000)
191 #define BHIE_RXVECSTATUS_STATUS_SHFT (30)
192 #define BHIE_RXVECSTATUS_STATUS_RESET (0x00)
193 #define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02)
194 #define BHIE_RXVECSTATUS_STATUS_ERROR (0x03)
196 #define SOC_HW_VERSION_OFFS (0x224)
197 #define SOC_HW_VERSION_FAM_NUM_BMSK (0xF0000000)
198 #define SOC_HW_VERSION_FAM_NUM_SHFT (28)
199 #define SOC_HW_VERSION_DEV_NUM_BMSK (0x0FFF0000)
200 #define SOC_HW_VERSION_DEV_NUM_SHFT (16)
201 #define SOC_HW_VERSION_MAJOR_VER_BMSK (0x0000FF00)
202 #define SOC_HW_VERSION_MAJOR_VER_SHFT (8)
203 #define SOC_HW_VERSION_MINOR_VER_BMSK (0x000000FF)
204 #define SOC_HW_VERSION_MINOR_VER_SHFT (0)
206 #define EV_CTX_RESERVED_MASK GENMASK(7, 0)
207 #define EV_CTX_INTMODC_MASK GENMASK(15, 8)
208 #define EV_CTX_INTMODC_SHIFT 8
209 #define EV_CTX_INTMODT_MASK GENMASK(31, 16)
210 #define EV_CTX_INTMODT_SHIFT 16
211 struct mhi_event_ctxt {
212 __u32 intmod;
213 __u32 ertype;
214 __u32 msivec;
216 __u64 rbase __packed __aligned(4);
217 __u64 rlen __packed __aligned(4);
218 __u64 rp __packed __aligned(4);
219 __u64 wp __packed __aligned(4);
222 #define CHAN_CTX_CHSTATE_MASK GENMASK(7, 0)
223 #define CHAN_CTX_CHSTATE_SHIFT 0
224 #define CHAN_CTX_BRSTMODE_MASK GENMASK(9, 8)
225 #define CHAN_CTX_BRSTMODE_SHIFT 8
226 #define CHAN_CTX_POLLCFG_MASK GENMASK(15, 10)
227 #define CHAN_CTX_POLLCFG_SHIFT 10
228 #define CHAN_CTX_RESERVED_MASK GENMASK(31, 16)
229 struct mhi_chan_ctxt {
230 __u32 chcfg;
231 __u32 chtype;
232 __u32 erindex;
234 __u64 rbase __packed __aligned(4);
235 __u64 rlen __packed __aligned(4);
236 __u64 rp __packed __aligned(4);
237 __u64 wp __packed __aligned(4);
240 struct mhi_cmd_ctxt {
241 __u32 reserved0;
242 __u32 reserved1;
243 __u32 reserved2;
245 __u64 rbase __packed __aligned(4);
246 __u64 rlen __packed __aligned(4);
247 __u64 rp __packed __aligned(4);
248 __u64 wp __packed __aligned(4);
251 struct mhi_ctxt {
252 struct mhi_event_ctxt *er_ctxt;
253 struct mhi_chan_ctxt *chan_ctxt;
254 struct mhi_cmd_ctxt *cmd_ctxt;
255 dma_addr_t er_ctxt_addr;
256 dma_addr_t chan_ctxt_addr;
257 dma_addr_t cmd_ctxt_addr;
260 struct mhi_tre {
261 u64 ptr;
262 u32 dword[2];
265 struct bhi_vec_entry {
266 u64 dma_addr;
267 u64 size;
270 enum mhi_cmd_type {
271 MHI_CMD_NOP = 1,
272 MHI_CMD_RESET_CHAN = 16,
273 MHI_CMD_STOP_CHAN = 17,
274 MHI_CMD_START_CHAN = 18,
277 /* No operation command */
278 #define MHI_TRE_CMD_NOOP_PTR (0)
279 #define MHI_TRE_CMD_NOOP_DWORD0 (0)
280 #define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_NOP << 16)
282 /* Channel reset command */
283 #define MHI_TRE_CMD_RESET_PTR (0)
284 #define MHI_TRE_CMD_RESET_DWORD0 (0)
285 #define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \
286 (MHI_CMD_RESET_CHAN << 16))
288 /* Channel stop command */
289 #define MHI_TRE_CMD_STOP_PTR (0)
290 #define MHI_TRE_CMD_STOP_DWORD0 (0)
291 #define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | \
292 (MHI_CMD_STOP_CHAN << 16))
294 /* Channel start command */
295 #define MHI_TRE_CMD_START_PTR (0)
296 #define MHI_TRE_CMD_START_DWORD0 (0)
297 #define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \
298 (MHI_CMD_START_CHAN << 16))
300 #define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF)
301 #define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF)
303 /* Event descriptor macros */
304 #define MHI_TRE_EV_PTR(ptr) (ptr)
305 #define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len)
306 #define MHI_TRE_EV_DWORD1(chid, type) ((chid << 24) | (type << 16))
307 #define MHI_TRE_GET_EV_PTR(tre) ((tre)->ptr)
308 #define MHI_TRE_GET_EV_CODE(tre) (((tre)->dword[0] >> 24) & 0xFF)
309 #define MHI_TRE_GET_EV_LEN(tre) ((tre)->dword[0] & 0xFFFF)
310 #define MHI_TRE_GET_EV_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF)
311 #define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF)
312 #define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF)
313 #define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF)
314 #define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0])
315 #define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr)
316 #define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr)
317 #define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF)
318 #define MHI_TRE_GET_EV_LINKSPEED(tre) (((tre)->dword[1] >> 24) & 0xFF)
319 #define MHI_TRE_GET_EV_LINKWIDTH(tre) ((tre)->dword[0] & 0xFF)
321 /* Transfer descriptor macros */
322 #define MHI_TRE_DATA_PTR(ptr) (ptr)
323 #define MHI_TRE_DATA_DWORD0(len) (len & MHI_MAX_MTU)
324 #define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \
325 | (ieot << 9) | (ieob << 8) | chain)
327 /* RSC transfer descriptor macros */
328 #define MHI_RSCTRE_DATA_PTR(ptr, len) (((u64)len << 48) | ptr)
329 #define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie)
330 #define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16)
332 enum mhi_pkt_type {
333 MHI_PKT_TYPE_INVALID = 0x0,
334 MHI_PKT_TYPE_NOOP_CMD = 0x1,
335 MHI_PKT_TYPE_TRANSFER = 0x2,
336 MHI_PKT_TYPE_COALESCING = 0x8,
337 MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10,
338 MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11,
339 MHI_PKT_TYPE_START_CHAN_CMD = 0x12,
340 MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20,
341 MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21,
342 MHI_PKT_TYPE_TX_EVENT = 0x22,
343 MHI_PKT_TYPE_RSC_TX_EVENT = 0x28,
344 MHI_PKT_TYPE_EE_EVENT = 0x40,
345 MHI_PKT_TYPE_TSYNC_EVENT = 0x48,
346 MHI_PKT_TYPE_BW_REQ_EVENT = 0x50,
347 MHI_PKT_TYPE_STALE_EVENT, /* internal event */
350 /* MHI transfer completion events */
351 enum mhi_ev_ccs {
352 MHI_EV_CC_INVALID = 0x0,
353 MHI_EV_CC_SUCCESS = 0x1,
354 MHI_EV_CC_EOT = 0x2, /* End of transfer event */
355 MHI_EV_CC_OVERFLOW = 0x3,
356 MHI_EV_CC_EOB = 0x4, /* End of block event */
357 MHI_EV_CC_OOB = 0x5, /* Out of block event */
358 MHI_EV_CC_DB_MODE = 0x6,
359 MHI_EV_CC_UNDEFINED_ERR = 0x10,
360 MHI_EV_CC_BAD_TRE = 0x11,
363 enum mhi_ch_state {
364 MHI_CH_STATE_DISABLED = 0x0,
365 MHI_CH_STATE_ENABLED = 0x1,
366 MHI_CH_STATE_RUNNING = 0x2,
367 MHI_CH_STATE_SUSPENDED = 0x3,
368 MHI_CH_STATE_STOP = 0x4,
369 MHI_CH_STATE_ERROR = 0x5,
372 #define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \
373 mode != MHI_DB_BRST_ENABLE)
375 extern const char * const mhi_ee_str[MHI_EE_MAX];
376 #define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \
377 "INVALID_EE" : mhi_ee_str[ee])
379 #define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \
380 ee == MHI_EE_EDL)
382 #define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW)
384 enum dev_st_transition {
385 DEV_ST_TRANSITION_PBL,
386 DEV_ST_TRANSITION_READY,
387 DEV_ST_TRANSITION_SBL,
388 DEV_ST_TRANSITION_MISSION_MODE,
389 DEV_ST_TRANSITION_SYS_ERR,
390 DEV_ST_TRANSITION_DISABLE,
391 DEV_ST_TRANSITION_MAX,
394 extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX];
395 #define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \
396 "INVALID_STATE" : dev_state_tran_str[state])
398 extern const char * const mhi_state_str[MHI_STATE_MAX];
399 #define TO_MHI_STATE_STR(state) ((state >= MHI_STATE_MAX || \
400 !mhi_state_str[state]) ? \
401 "INVALID_STATE" : mhi_state_str[state])
403 /* internal power states */
404 enum mhi_pm_state {
405 MHI_PM_STATE_DISABLE,
406 MHI_PM_STATE_POR,
407 MHI_PM_STATE_M0,
408 MHI_PM_STATE_M2,
409 MHI_PM_STATE_M3_ENTER,
410 MHI_PM_STATE_M3,
411 MHI_PM_STATE_M3_EXIT,
412 MHI_PM_STATE_FW_DL_ERR,
413 MHI_PM_STATE_SYS_ERR_DETECT,
414 MHI_PM_STATE_SYS_ERR_PROCESS,
415 MHI_PM_STATE_SHUTDOWN_PROCESS,
416 MHI_PM_STATE_LD_ERR_FATAL_DETECT,
417 MHI_PM_STATE_MAX
420 #define MHI_PM_DISABLE BIT(0)
421 #define MHI_PM_POR BIT(1)
422 #define MHI_PM_M0 BIT(2)
423 #define MHI_PM_M2 BIT(3)
424 #define MHI_PM_M3_ENTER BIT(4)
425 #define MHI_PM_M3 BIT(5)
426 #define MHI_PM_M3_EXIT BIT(6)
427 /* firmware download failure state */
428 #define MHI_PM_FW_DL_ERR BIT(7)
429 #define MHI_PM_SYS_ERR_DETECT BIT(8)
430 #define MHI_PM_SYS_ERR_PROCESS BIT(9)
431 #define MHI_PM_SHUTDOWN_PROCESS BIT(10)
432 /* link not accessible */
433 #define MHI_PM_LD_ERR_FATAL_DETECT BIT(11)
435 #define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
436 MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
437 MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
438 MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
439 #define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
440 #define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
441 #define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & \
442 mhi_cntrl->db_access)
443 #define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
444 MHI_PM_M2 | MHI_PM_M3_EXIT))
445 #define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2)
446 #define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state)
447 #define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \
448 MHI_PM_IN_ERROR_STATE(pm_state))
449 #define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \
450 (MHI_PM_M3_ENTER | MHI_PM_M3))
452 #define NR_OF_CMD_RINGS 1
453 #define CMD_EL_PER_RING 128
454 #define PRIMARY_CMD_RING 0
455 #define MHI_DEV_WAKE_DB 127
456 #define MHI_MAX_MTU 0xffff
457 #define MHI_RANDOM_U32_NONZERO(bmsk) (prandom_u32_max(bmsk) + 1)
459 enum mhi_er_type {
460 MHI_ER_TYPE_INVALID = 0x0,
461 MHI_ER_TYPE_VALID = 0x1,
464 struct db_cfg {
465 bool reset_req;
466 bool db_mode;
467 u32 pollcfg;
468 enum mhi_db_brst_mode brstmode;
469 dma_addr_t db_val;
470 void (*process_db)(struct mhi_controller *mhi_cntrl,
471 struct db_cfg *db_cfg, void __iomem *io_addr,
472 dma_addr_t db_val);
475 struct mhi_pm_transitions {
476 enum mhi_pm_state from_state;
477 u32 to_states;
480 struct state_transition {
481 struct list_head node;
482 enum dev_st_transition state;
485 struct mhi_ring {
486 dma_addr_t dma_handle;
487 dma_addr_t iommu_base;
488 u64 *ctxt_wp; /* point to ctxt wp */
489 void *pre_aligned;
490 void *base;
491 void *rp;
492 void *wp;
493 size_t el_size;
494 size_t len;
495 size_t elements;
496 size_t alloc_size;
497 void __iomem *db_addr;
500 struct mhi_cmd {
501 struct mhi_ring ring;
502 spinlock_t lock;
505 struct mhi_buf_info {
506 void *v_addr;
507 void *bb_addr;
508 void *wp;
509 void *cb_buf;
510 dma_addr_t p_addr;
511 size_t len;
512 enum dma_data_direction dir;
513 bool used; /* Indicates whether the buffer is used or not */
514 bool pre_mapped; /* Already pre-mapped by client */
517 struct mhi_event {
518 struct mhi_controller *mhi_cntrl;
519 struct mhi_chan *mhi_chan; /* dedicated to channel */
520 u32 er_index;
521 u32 intmod;
522 u32 irq;
523 int chan; /* this event ring is dedicated to a channel (optional) */
524 u32 priority;
525 enum mhi_er_data_type data_type;
526 struct mhi_ring ring;
527 struct db_cfg db_cfg;
528 struct tasklet_struct task;
529 spinlock_t lock;
530 int (*process_event)(struct mhi_controller *mhi_cntrl,
531 struct mhi_event *mhi_event,
532 u32 event_quota);
533 bool hw_ring;
534 bool cl_manage;
535 bool offload_ev; /* managed by a device driver */
538 struct mhi_chan {
539 const char *name;
541 * Important: When consuming, increment tre_ring first and when
542 * releasing, decrement buf_ring first. If tre_ring has space, buf_ring
543 * is guranteed to have space so we do not need to check both rings.
545 struct mhi_ring buf_ring;
546 struct mhi_ring tre_ring;
547 u32 chan;
548 u32 er_index;
549 u32 intmod;
550 enum mhi_ch_type type;
551 enum dma_data_direction dir;
552 struct db_cfg db_cfg;
553 enum mhi_ch_ee_mask ee_mask;
554 enum mhi_ch_state ch_state;
555 enum mhi_ev_ccs ccs;
556 struct mhi_device *mhi_dev;
557 void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result);
558 struct mutex mutex;
559 struct completion completion;
560 rwlock_t lock;
561 struct list_head node;
562 bool lpm_notify;
563 bool configured;
564 bool offload_ch;
565 bool pre_alloc;
566 bool wake_capable;
569 /* Default MHI timeout */
570 #define MHI_TIMEOUT_MS (1000)
572 /* debugfs related functions */
573 #ifdef CONFIG_MHI_BUS_DEBUG
574 void mhi_create_debugfs(struct mhi_controller *mhi_cntrl);
575 void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl);
576 void mhi_debugfs_init(void);
577 void mhi_debugfs_exit(void);
578 #else
579 static inline void mhi_create_debugfs(struct mhi_controller *mhi_cntrl)
583 static inline void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl)
587 static inline void mhi_debugfs_init(void)
591 static inline void mhi_debugfs_exit(void)
594 #endif
596 struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl);
598 int mhi_destroy_device(struct device *dev, void *data);
599 void mhi_create_devices(struct mhi_controller *mhi_cntrl);
601 int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
602 struct image_info **image_info, size_t alloc_size);
603 void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
604 struct image_info *image_info);
606 /* Power management APIs */
607 enum mhi_pm_state __must_check mhi_tryset_pm_state(
608 struct mhi_controller *mhi_cntrl,
609 enum mhi_pm_state state);
610 const char *to_mhi_pm_state_str(enum mhi_pm_state state);
611 int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
612 enum dev_st_transition state);
613 void mhi_pm_st_worker(struct work_struct *work);
614 void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl);
615 int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
616 int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl);
617 void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl);
618 int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl);
619 int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
620 int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
621 enum mhi_cmd_type cmd);
622 static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl)
624 return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
625 mhi_cntrl->dev_state <= MHI_STATE_M3_FAST);
628 static inline void mhi_trigger_resume(struct mhi_controller *mhi_cntrl)
630 pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
631 mhi_cntrl->runtime_get(mhi_cntrl);
632 mhi_cntrl->runtime_put(mhi_cntrl);
635 /* Register access methods */
636 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg,
637 void __iomem *db_addr, dma_addr_t db_val);
638 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
639 struct db_cfg *db_mode, void __iomem *db_addr,
640 dma_addr_t db_val);
641 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
642 void __iomem *base, u32 offset, u32 *out);
643 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
644 void __iomem *base, u32 offset, u32 mask,
645 u32 shift, u32 *out);
646 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
647 u32 offset, u32 val);
648 void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
649 u32 offset, u32 mask, u32 shift, u32 val);
650 void mhi_ring_er_db(struct mhi_event *mhi_event);
651 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
652 dma_addr_t db_val);
653 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd);
654 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
655 struct mhi_chan *mhi_chan);
657 /* Initialization methods */
658 int mhi_init_mmio(struct mhi_controller *mhi_cntrl);
659 int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl);
660 void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl);
661 int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
662 void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
663 void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
664 struct image_info *img_info);
665 void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
666 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
667 struct mhi_chan *mhi_chan);
668 int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
669 struct mhi_chan *mhi_chan);
670 void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
671 struct mhi_chan *mhi_chan);
672 void mhi_reset_chan(struct mhi_controller *mhi_cntrl,
673 struct mhi_chan *mhi_chan);
675 /* Memory allocation methods */
676 static inline void *mhi_alloc_coherent(struct mhi_controller *mhi_cntrl,
677 size_t size,
678 dma_addr_t *dma_handle,
679 gfp_t gfp)
681 void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, dma_handle,
682 gfp);
684 return buf;
687 static inline void mhi_free_coherent(struct mhi_controller *mhi_cntrl,
688 size_t size,
689 void *vaddr,
690 dma_addr_t dma_handle)
692 dma_free_coherent(mhi_cntrl->cntrl_dev, size, vaddr, dma_handle);
695 /* Event processing methods */
696 void mhi_ctrl_ev_task(unsigned long data);
697 void mhi_ev_task(unsigned long data);
698 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
699 struct mhi_event *mhi_event, u32 event_quota);
700 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
701 struct mhi_event *mhi_event, u32 event_quota);
703 /* ISR handlers */
704 irqreturn_t mhi_irq_handler(int irq_number, void *dev);
705 irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev);
706 irqreturn_t mhi_intvec_handler(int irq_number, void *dev);
708 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
709 struct mhi_buf_info *info, enum mhi_flags flags);
710 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
711 struct mhi_buf_info *buf_info);
712 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
713 struct mhi_buf_info *buf_info);
714 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
715 struct mhi_buf_info *buf_info);
716 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
717 struct mhi_buf_info *buf_info);
719 #endif /* _MHI_INT_H */