1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
7 #include <linux/debugfs.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/idr.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/mhi.h>
15 #include <linux/mod_devicetable.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19 #include <linux/wait.h>
22 static DEFINE_IDA(mhi_controller_ida
);
24 const char * const mhi_ee_str
[MHI_EE_MAX
] = {
27 [MHI_EE_AMSS
] = "AMSS",
28 [MHI_EE_RDDM
] = "RDDM",
30 [MHI_EE_PTHRU
] = "PASS THRU",
32 [MHI_EE_DISABLE_TRANSITION
] = "DISABLE",
33 [MHI_EE_NOT_SUPPORTED
] = "NOT SUPPORTED",
36 const char * const dev_state_tran_str
[DEV_ST_TRANSITION_MAX
] = {
37 [DEV_ST_TRANSITION_PBL
] = "PBL",
38 [DEV_ST_TRANSITION_READY
] = "READY",
39 [DEV_ST_TRANSITION_SBL
] = "SBL",
40 [DEV_ST_TRANSITION_MISSION_MODE
] = "MISSION_MODE",
41 [DEV_ST_TRANSITION_SYS_ERR
] = "SYS_ERR",
42 [DEV_ST_TRANSITION_DISABLE
] = "DISABLE",
45 const char * const mhi_state_str
[MHI_STATE_MAX
] = {
46 [MHI_STATE_RESET
] = "RESET",
47 [MHI_STATE_READY
] = "READY",
48 [MHI_STATE_M0
] = "M0",
49 [MHI_STATE_M1
] = "M1",
50 [MHI_STATE_M2
] = "M2",
51 [MHI_STATE_M3
] = "M3",
52 [MHI_STATE_M3_FAST
] = "M3_FAST",
53 [MHI_STATE_BHI
] = "BHI",
54 [MHI_STATE_SYS_ERR
] = "SYS_ERR",
57 static const char * const mhi_pm_state_str
[] = {
58 [MHI_PM_STATE_DISABLE
] = "DISABLE",
59 [MHI_PM_STATE_POR
] = "POR",
60 [MHI_PM_STATE_M0
] = "M0",
61 [MHI_PM_STATE_M2
] = "M2",
62 [MHI_PM_STATE_M3_ENTER
] = "M?->M3",
63 [MHI_PM_STATE_M3
] = "M3",
64 [MHI_PM_STATE_M3_EXIT
] = "M3->M0",
65 [MHI_PM_STATE_FW_DL_ERR
] = "FW DL Error",
66 [MHI_PM_STATE_SYS_ERR_DETECT
] = "SYS_ERR Detect",
67 [MHI_PM_STATE_SYS_ERR_PROCESS
] = "SYS_ERR Process",
68 [MHI_PM_STATE_SHUTDOWN_PROCESS
] = "SHUTDOWN Process",
69 [MHI_PM_STATE_LD_ERR_FATAL_DETECT
] = "LD or Error Fatal Detect",
72 const char *to_mhi_pm_state_str(enum mhi_pm_state state
)
74 int index
= find_last_bit((unsigned long *)&state
, 32);
76 if (index
>= ARRAY_SIZE(mhi_pm_state_str
))
77 return "Invalid State";
79 return mhi_pm_state_str
[index
];
82 static ssize_t
serial_number_show(struct device
*dev
,
83 struct device_attribute
*attr
,
86 struct mhi_device
*mhi_dev
= to_mhi_device(dev
);
87 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
89 return snprintf(buf
, PAGE_SIZE
, "Serial Number: %u\n",
90 mhi_cntrl
->serial_number
);
92 static DEVICE_ATTR_RO(serial_number
);
94 static ssize_t
oem_pk_hash_show(struct device
*dev
,
95 struct device_attribute
*attr
,
98 struct mhi_device
*mhi_dev
= to_mhi_device(dev
);
99 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
102 for (i
= 0; i
< ARRAY_SIZE(mhi_cntrl
->oem_pk_hash
); i
++)
103 cnt
+= snprintf(buf
+ cnt
, PAGE_SIZE
- cnt
,
104 "OEMPKHASH[%d]: 0x%x\n", i
,
105 mhi_cntrl
->oem_pk_hash
[i
]);
109 static DEVICE_ATTR_RO(oem_pk_hash
);
111 static struct attribute
*mhi_dev_attrs
[] = {
112 &dev_attr_serial_number
.attr
,
113 &dev_attr_oem_pk_hash
.attr
,
116 ATTRIBUTE_GROUPS(mhi_dev
);
118 /* MHI protocol requires the transfer ring to be aligned with ring length */
119 static int mhi_alloc_aligned_ring(struct mhi_controller
*mhi_cntrl
,
120 struct mhi_ring
*ring
,
123 ring
->alloc_size
= len
+ (len
- 1);
124 ring
->pre_aligned
= mhi_alloc_coherent(mhi_cntrl
, ring
->alloc_size
,
125 &ring
->dma_handle
, GFP_KERNEL
);
126 if (!ring
->pre_aligned
)
129 ring
->iommu_base
= (ring
->dma_handle
+ (len
- 1)) & ~(len
- 1);
130 ring
->base
= ring
->pre_aligned
+ (ring
->iommu_base
- ring
->dma_handle
);
135 void mhi_deinit_free_irq(struct mhi_controller
*mhi_cntrl
)
138 struct mhi_event
*mhi_event
= mhi_cntrl
->mhi_event
;
140 for (i
= 0; i
< mhi_cntrl
->total_ev_rings
; i
++, mhi_event
++) {
141 if (mhi_event
->offload_ev
)
144 free_irq(mhi_cntrl
->irq
[mhi_event
->irq
], mhi_event
);
147 free_irq(mhi_cntrl
->irq
[0], mhi_cntrl
);
150 int mhi_init_irq_setup(struct mhi_controller
*mhi_cntrl
)
152 struct mhi_event
*mhi_event
= mhi_cntrl
->mhi_event
;
153 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
156 /* Setup BHI_INTVEC IRQ */
157 ret
= request_threaded_irq(mhi_cntrl
->irq
[0], mhi_intvec_handler
,
158 mhi_intvec_threaded_handler
,
159 IRQF_SHARED
| IRQF_NO_SUSPEND
,
164 for (i
= 0; i
< mhi_cntrl
->total_ev_rings
; i
++, mhi_event
++) {
165 if (mhi_event
->offload_ev
)
168 if (mhi_event
->irq
>= mhi_cntrl
->nr_irqs
) {
169 dev_err(dev
, "irq %d not available for event ring\n",
175 ret
= request_irq(mhi_cntrl
->irq
[mhi_event
->irq
],
177 IRQF_SHARED
| IRQF_NO_SUSPEND
,
180 dev_err(dev
, "Error requesting irq:%d for ev:%d\n",
181 mhi_cntrl
->irq
[mhi_event
->irq
], i
);
189 for (--i
, --mhi_event
; i
>= 0; i
--, mhi_event
--) {
190 if (mhi_event
->offload_ev
)
193 free_irq(mhi_cntrl
->irq
[mhi_event
->irq
], mhi_event
);
195 free_irq(mhi_cntrl
->irq
[0], mhi_cntrl
);
200 void mhi_deinit_dev_ctxt(struct mhi_controller
*mhi_cntrl
)
203 struct mhi_ctxt
*mhi_ctxt
= mhi_cntrl
->mhi_ctxt
;
204 struct mhi_cmd
*mhi_cmd
;
205 struct mhi_event
*mhi_event
;
206 struct mhi_ring
*ring
;
208 mhi_cmd
= mhi_cntrl
->mhi_cmd
;
209 for (i
= 0; i
< NR_OF_CMD_RINGS
; i
++, mhi_cmd
++) {
210 ring
= &mhi_cmd
->ring
;
211 mhi_free_coherent(mhi_cntrl
, ring
->alloc_size
,
212 ring
->pre_aligned
, ring
->dma_handle
);
214 ring
->iommu_base
= 0;
217 mhi_free_coherent(mhi_cntrl
,
218 sizeof(*mhi_ctxt
->cmd_ctxt
) * NR_OF_CMD_RINGS
,
219 mhi_ctxt
->cmd_ctxt
, mhi_ctxt
->cmd_ctxt_addr
);
221 mhi_event
= mhi_cntrl
->mhi_event
;
222 for (i
= 0; i
< mhi_cntrl
->total_ev_rings
; i
++, mhi_event
++) {
223 if (mhi_event
->offload_ev
)
226 ring
= &mhi_event
->ring
;
227 mhi_free_coherent(mhi_cntrl
, ring
->alloc_size
,
228 ring
->pre_aligned
, ring
->dma_handle
);
230 ring
->iommu_base
= 0;
233 mhi_free_coherent(mhi_cntrl
, sizeof(*mhi_ctxt
->er_ctxt
) *
234 mhi_cntrl
->total_ev_rings
, mhi_ctxt
->er_ctxt
,
235 mhi_ctxt
->er_ctxt_addr
);
237 mhi_free_coherent(mhi_cntrl
, sizeof(*mhi_ctxt
->chan_ctxt
) *
238 mhi_cntrl
->max_chan
, mhi_ctxt
->chan_ctxt
,
239 mhi_ctxt
->chan_ctxt_addr
);
242 mhi_cntrl
->mhi_ctxt
= NULL
;
245 int mhi_init_dev_ctxt(struct mhi_controller
*mhi_cntrl
)
247 struct mhi_ctxt
*mhi_ctxt
;
248 struct mhi_chan_ctxt
*chan_ctxt
;
249 struct mhi_event_ctxt
*er_ctxt
;
250 struct mhi_cmd_ctxt
*cmd_ctxt
;
251 struct mhi_chan
*mhi_chan
;
252 struct mhi_event
*mhi_event
;
253 struct mhi_cmd
*mhi_cmd
;
255 int ret
= -ENOMEM
, i
;
257 atomic_set(&mhi_cntrl
->dev_wake
, 0);
258 atomic_set(&mhi_cntrl
->pending_pkts
, 0);
260 mhi_ctxt
= kzalloc(sizeof(*mhi_ctxt
), GFP_KERNEL
);
264 /* Setup channel ctxt */
265 mhi_ctxt
->chan_ctxt
= mhi_alloc_coherent(mhi_cntrl
,
266 sizeof(*mhi_ctxt
->chan_ctxt
) *
268 &mhi_ctxt
->chan_ctxt_addr
,
270 if (!mhi_ctxt
->chan_ctxt
)
271 goto error_alloc_chan_ctxt
;
273 mhi_chan
= mhi_cntrl
->mhi_chan
;
274 chan_ctxt
= mhi_ctxt
->chan_ctxt
;
275 for (i
= 0; i
< mhi_cntrl
->max_chan
; i
++, chan_ctxt
++, mhi_chan
++) {
276 /* Skip if it is an offload channel */
277 if (mhi_chan
->offload_ch
)
280 tmp
= chan_ctxt
->chcfg
;
281 tmp
&= ~CHAN_CTX_CHSTATE_MASK
;
282 tmp
|= (MHI_CH_STATE_DISABLED
<< CHAN_CTX_CHSTATE_SHIFT
);
283 tmp
&= ~CHAN_CTX_BRSTMODE_MASK
;
284 tmp
|= (mhi_chan
->db_cfg
.brstmode
<< CHAN_CTX_BRSTMODE_SHIFT
);
285 tmp
&= ~CHAN_CTX_POLLCFG_MASK
;
286 tmp
|= (mhi_chan
->db_cfg
.pollcfg
<< CHAN_CTX_POLLCFG_SHIFT
);
287 chan_ctxt
->chcfg
= tmp
;
289 chan_ctxt
->chtype
= mhi_chan
->type
;
290 chan_ctxt
->erindex
= mhi_chan
->er_index
;
292 mhi_chan
->ch_state
= MHI_CH_STATE_DISABLED
;
293 mhi_chan
->tre_ring
.db_addr
= (void __iomem
*)&chan_ctxt
->wp
;
296 /* Setup event context */
297 mhi_ctxt
->er_ctxt
= mhi_alloc_coherent(mhi_cntrl
,
298 sizeof(*mhi_ctxt
->er_ctxt
) *
299 mhi_cntrl
->total_ev_rings
,
300 &mhi_ctxt
->er_ctxt_addr
,
302 if (!mhi_ctxt
->er_ctxt
)
303 goto error_alloc_er_ctxt
;
305 er_ctxt
= mhi_ctxt
->er_ctxt
;
306 mhi_event
= mhi_cntrl
->mhi_event
;
307 for (i
= 0; i
< mhi_cntrl
->total_ev_rings
; i
++, er_ctxt
++,
309 struct mhi_ring
*ring
= &mhi_event
->ring
;
311 /* Skip if it is an offload event */
312 if (mhi_event
->offload_ev
)
315 tmp
= er_ctxt
->intmod
;
316 tmp
&= ~EV_CTX_INTMODC_MASK
;
317 tmp
&= ~EV_CTX_INTMODT_MASK
;
318 tmp
|= (mhi_event
->intmod
<< EV_CTX_INTMODT_SHIFT
);
319 er_ctxt
->intmod
= tmp
;
321 er_ctxt
->ertype
= MHI_ER_TYPE_VALID
;
322 er_ctxt
->msivec
= mhi_event
->irq
;
323 mhi_event
->db_cfg
.db_mode
= true;
325 ring
->el_size
= sizeof(struct mhi_tre
);
326 ring
->len
= ring
->el_size
* ring
->elements
;
327 ret
= mhi_alloc_aligned_ring(mhi_cntrl
, ring
, ring
->len
);
332 * If the read pointer equals to the write pointer, then the
335 ring
->rp
= ring
->wp
= ring
->base
;
336 er_ctxt
->rbase
= ring
->iommu_base
;
337 er_ctxt
->rp
= er_ctxt
->wp
= er_ctxt
->rbase
;
338 er_ctxt
->rlen
= ring
->len
;
339 ring
->ctxt_wp
= &er_ctxt
->wp
;
342 /* Setup cmd context */
344 mhi_ctxt
->cmd_ctxt
= mhi_alloc_coherent(mhi_cntrl
,
345 sizeof(*mhi_ctxt
->cmd_ctxt
) *
347 &mhi_ctxt
->cmd_ctxt_addr
,
349 if (!mhi_ctxt
->cmd_ctxt
)
352 mhi_cmd
= mhi_cntrl
->mhi_cmd
;
353 cmd_ctxt
= mhi_ctxt
->cmd_ctxt
;
354 for (i
= 0; i
< NR_OF_CMD_RINGS
; i
++, mhi_cmd
++, cmd_ctxt
++) {
355 struct mhi_ring
*ring
= &mhi_cmd
->ring
;
357 ring
->el_size
= sizeof(struct mhi_tre
);
358 ring
->elements
= CMD_EL_PER_RING
;
359 ring
->len
= ring
->el_size
* ring
->elements
;
360 ret
= mhi_alloc_aligned_ring(mhi_cntrl
, ring
, ring
->len
);
362 goto error_alloc_cmd
;
364 ring
->rp
= ring
->wp
= ring
->base
;
365 cmd_ctxt
->rbase
= ring
->iommu_base
;
366 cmd_ctxt
->rp
= cmd_ctxt
->wp
= cmd_ctxt
->rbase
;
367 cmd_ctxt
->rlen
= ring
->len
;
368 ring
->ctxt_wp
= &cmd_ctxt
->wp
;
371 mhi_cntrl
->mhi_ctxt
= mhi_ctxt
;
376 for (--i
, --mhi_cmd
; i
>= 0; i
--, mhi_cmd
--) {
377 struct mhi_ring
*ring
= &mhi_cmd
->ring
;
379 mhi_free_coherent(mhi_cntrl
, ring
->alloc_size
,
380 ring
->pre_aligned
, ring
->dma_handle
);
382 mhi_free_coherent(mhi_cntrl
,
383 sizeof(*mhi_ctxt
->cmd_ctxt
) * NR_OF_CMD_RINGS
,
384 mhi_ctxt
->cmd_ctxt
, mhi_ctxt
->cmd_ctxt_addr
);
385 i
= mhi_cntrl
->total_ev_rings
;
386 mhi_event
= mhi_cntrl
->mhi_event
+ i
;
389 for (--i
, --mhi_event
; i
>= 0; i
--, mhi_event
--) {
390 struct mhi_ring
*ring
= &mhi_event
->ring
;
392 if (mhi_event
->offload_ev
)
395 mhi_free_coherent(mhi_cntrl
, ring
->alloc_size
,
396 ring
->pre_aligned
, ring
->dma_handle
);
398 mhi_free_coherent(mhi_cntrl
, sizeof(*mhi_ctxt
->er_ctxt
) *
399 mhi_cntrl
->total_ev_rings
, mhi_ctxt
->er_ctxt
,
400 mhi_ctxt
->er_ctxt_addr
);
403 mhi_free_coherent(mhi_cntrl
, sizeof(*mhi_ctxt
->chan_ctxt
) *
404 mhi_cntrl
->max_chan
, mhi_ctxt
->chan_ctxt
,
405 mhi_ctxt
->chan_ctxt_addr
);
407 error_alloc_chan_ctxt
:
413 int mhi_init_mmio(struct mhi_controller
*mhi_cntrl
)
417 struct mhi_chan
*mhi_chan
;
418 struct mhi_event
*mhi_event
;
419 void __iomem
*base
= mhi_cntrl
->regs
;
420 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
428 CCABAP_HIGHER
, U32_MAX
, 0,
429 upper_32_bits(mhi_cntrl
->mhi_ctxt
->chan_ctxt_addr
),
432 CCABAP_LOWER
, U32_MAX
, 0,
433 lower_32_bits(mhi_cntrl
->mhi_ctxt
->chan_ctxt_addr
),
436 ECABAP_HIGHER
, U32_MAX
, 0,
437 upper_32_bits(mhi_cntrl
->mhi_ctxt
->er_ctxt_addr
),
440 ECABAP_LOWER
, U32_MAX
, 0,
441 lower_32_bits(mhi_cntrl
->mhi_ctxt
->er_ctxt_addr
),
444 CRCBAP_HIGHER
, U32_MAX
, 0,
445 upper_32_bits(mhi_cntrl
->mhi_ctxt
->cmd_ctxt_addr
),
448 CRCBAP_LOWER
, U32_MAX
, 0,
449 lower_32_bits(mhi_cntrl
->mhi_ctxt
->cmd_ctxt_addr
),
452 MHICFG
, MHICFG_NER_MASK
, MHICFG_NER_SHIFT
,
453 mhi_cntrl
->total_ev_rings
,
456 MHICFG
, MHICFG_NHWER_MASK
, MHICFG_NHWER_SHIFT
,
457 mhi_cntrl
->hw_ev_rings
,
460 MHICTRLBASE_HIGHER
, U32_MAX
, 0,
461 upper_32_bits(mhi_cntrl
->iova_start
),
464 MHICTRLBASE_LOWER
, U32_MAX
, 0,
465 lower_32_bits(mhi_cntrl
->iova_start
),
468 MHIDATABASE_HIGHER
, U32_MAX
, 0,
469 upper_32_bits(mhi_cntrl
->iova_start
),
472 MHIDATABASE_LOWER
, U32_MAX
, 0,
473 lower_32_bits(mhi_cntrl
->iova_start
),
476 MHICTRLLIMIT_HIGHER
, U32_MAX
, 0,
477 upper_32_bits(mhi_cntrl
->iova_stop
),
480 MHICTRLLIMIT_LOWER
, U32_MAX
, 0,
481 lower_32_bits(mhi_cntrl
->iova_stop
),
484 MHIDATALIMIT_HIGHER
, U32_MAX
, 0,
485 upper_32_bits(mhi_cntrl
->iova_stop
),
488 MHIDATALIMIT_LOWER
, U32_MAX
, 0,
489 lower_32_bits(mhi_cntrl
->iova_stop
),
494 dev_dbg(dev
, "Initializing MHI registers\n");
496 /* Read channel db offset */
497 ret
= mhi_read_reg_field(mhi_cntrl
, base
, CHDBOFF
, CHDBOFF_CHDBOFF_MASK
,
498 CHDBOFF_CHDBOFF_SHIFT
, &val
);
500 dev_err(dev
, "Unable to read CHDBOFF register\n");
505 mhi_cntrl
->wake_db
= base
+ val
+ (8 * MHI_DEV_WAKE_DB
);
506 mhi_write_reg(mhi_cntrl
, mhi_cntrl
->wake_db
, 4, 0);
507 mhi_write_reg(mhi_cntrl
, mhi_cntrl
->wake_db
, 0, 0);
508 mhi_cntrl
->wake_set
= false;
510 /* Setup channel db address for each channel in tre_ring */
511 mhi_chan
= mhi_cntrl
->mhi_chan
;
512 for (i
= 0; i
< mhi_cntrl
->max_chan
; i
++, val
+= 8, mhi_chan
++)
513 mhi_chan
->tre_ring
.db_addr
= base
+ val
;
515 /* Read event ring db offset */
516 ret
= mhi_read_reg_field(mhi_cntrl
, base
, ERDBOFF
, ERDBOFF_ERDBOFF_MASK
,
517 ERDBOFF_ERDBOFF_SHIFT
, &val
);
519 dev_err(dev
, "Unable to read ERDBOFF register\n");
523 /* Setup event db address for each ev_ring */
524 mhi_event
= mhi_cntrl
->mhi_event
;
525 for (i
= 0; i
< mhi_cntrl
->total_ev_rings
; i
++, val
+= 8, mhi_event
++) {
526 if (mhi_event
->offload_ev
)
529 mhi_event
->ring
.db_addr
= base
+ val
;
532 /* Setup DB register for primary CMD rings */
533 mhi_cntrl
->mhi_cmd
[PRIMARY_CMD_RING
].ring
.db_addr
= base
+ CRDB_LOWER
;
535 /* Write to MMIO registers */
536 for (i
= 0; reg_info
[i
].offset
; i
++)
537 mhi_write_reg_field(mhi_cntrl
, base
, reg_info
[i
].offset
,
538 reg_info
[i
].mask
, reg_info
[i
].shift
,
544 void mhi_deinit_chan_ctxt(struct mhi_controller
*mhi_cntrl
,
545 struct mhi_chan
*mhi_chan
)
547 struct mhi_ring
*buf_ring
;
548 struct mhi_ring
*tre_ring
;
549 struct mhi_chan_ctxt
*chan_ctxt
;
551 buf_ring
= &mhi_chan
->buf_ring
;
552 tre_ring
= &mhi_chan
->tre_ring
;
553 chan_ctxt
= &mhi_cntrl
->mhi_ctxt
->chan_ctxt
[mhi_chan
->chan
];
555 mhi_free_coherent(mhi_cntrl
, tre_ring
->alloc_size
,
556 tre_ring
->pre_aligned
, tre_ring
->dma_handle
);
557 vfree(buf_ring
->base
);
559 buf_ring
->base
= tre_ring
->base
= NULL
;
560 chan_ctxt
->rbase
= 0;
563 int mhi_init_chan_ctxt(struct mhi_controller
*mhi_cntrl
,
564 struct mhi_chan
*mhi_chan
)
566 struct mhi_ring
*buf_ring
;
567 struct mhi_ring
*tre_ring
;
568 struct mhi_chan_ctxt
*chan_ctxt
;
572 buf_ring
= &mhi_chan
->buf_ring
;
573 tre_ring
= &mhi_chan
->tre_ring
;
574 tre_ring
->el_size
= sizeof(struct mhi_tre
);
575 tre_ring
->len
= tre_ring
->el_size
* tre_ring
->elements
;
576 chan_ctxt
= &mhi_cntrl
->mhi_ctxt
->chan_ctxt
[mhi_chan
->chan
];
577 ret
= mhi_alloc_aligned_ring(mhi_cntrl
, tre_ring
, tre_ring
->len
);
581 buf_ring
->el_size
= sizeof(struct mhi_buf_info
);
582 buf_ring
->len
= buf_ring
->el_size
* buf_ring
->elements
;
583 buf_ring
->base
= vzalloc(buf_ring
->len
);
585 if (!buf_ring
->base
) {
586 mhi_free_coherent(mhi_cntrl
, tre_ring
->alloc_size
,
587 tre_ring
->pre_aligned
, tre_ring
->dma_handle
);
591 tmp
= chan_ctxt
->chcfg
;
592 tmp
&= ~CHAN_CTX_CHSTATE_MASK
;
593 tmp
|= (MHI_CH_STATE_ENABLED
<< CHAN_CTX_CHSTATE_SHIFT
);
594 chan_ctxt
->chcfg
= tmp
;
596 chan_ctxt
->rbase
= tre_ring
->iommu_base
;
597 chan_ctxt
->rp
= chan_ctxt
->wp
= chan_ctxt
->rbase
;
598 chan_ctxt
->rlen
= tre_ring
->len
;
599 tre_ring
->ctxt_wp
= &chan_ctxt
->wp
;
601 tre_ring
->rp
= tre_ring
->wp
= tre_ring
->base
;
602 buf_ring
->rp
= buf_ring
->wp
= buf_ring
->base
;
603 mhi_chan
->db_cfg
.db_mode
= 1;
605 /* Update to all cores */
611 static int parse_ev_cfg(struct mhi_controller
*mhi_cntrl
,
612 const struct mhi_controller_config
*config
)
614 struct mhi_event
*mhi_event
;
615 const struct mhi_event_config
*event_cfg
;
616 struct device
*dev
= mhi_cntrl
->cntrl_dev
;
619 num
= config
->num_events
;
620 mhi_cntrl
->total_ev_rings
= num
;
621 mhi_cntrl
->mhi_event
= kcalloc(num
, sizeof(*mhi_cntrl
->mhi_event
),
623 if (!mhi_cntrl
->mhi_event
)
626 /* Populate event ring */
627 mhi_event
= mhi_cntrl
->mhi_event
;
628 for (i
= 0; i
< num
; i
++) {
629 event_cfg
= &config
->event_cfg
[i
];
631 mhi_event
->er_index
= i
;
632 mhi_event
->ring
.elements
= event_cfg
->num_elements
;
633 mhi_event
->intmod
= event_cfg
->irq_moderation_ms
;
634 mhi_event
->irq
= event_cfg
->irq
;
636 if (event_cfg
->channel
!= U32_MAX
) {
637 /* This event ring has a dedicated channel */
638 mhi_event
->chan
= event_cfg
->channel
;
639 if (mhi_event
->chan
>= mhi_cntrl
->max_chan
) {
641 "Event Ring channel not available\n");
645 mhi_event
->mhi_chan
=
646 &mhi_cntrl
->mhi_chan
[mhi_event
->chan
];
649 /* Priority is fixed to 1 for now */
650 mhi_event
->priority
= 1;
652 mhi_event
->db_cfg
.brstmode
= event_cfg
->mode
;
653 if (MHI_INVALID_BRSTMODE(mhi_event
->db_cfg
.brstmode
))
656 if (mhi_event
->db_cfg
.brstmode
== MHI_DB_BRST_ENABLE
)
657 mhi_event
->db_cfg
.process_db
= mhi_db_brstmode
;
659 mhi_event
->db_cfg
.process_db
= mhi_db_brstmode_disable
;
661 mhi_event
->data_type
= event_cfg
->data_type
;
663 switch (mhi_event
->data_type
) {
665 mhi_event
->process_event
= mhi_process_data_event_ring
;
668 mhi_event
->process_event
= mhi_process_ctrl_ev_ring
;
671 dev_err(dev
, "Event Ring type not supported\n");
675 mhi_event
->hw_ring
= event_cfg
->hardware_event
;
676 if (mhi_event
->hw_ring
)
677 mhi_cntrl
->hw_ev_rings
++;
679 mhi_cntrl
->sw_ev_rings
++;
681 mhi_event
->cl_manage
= event_cfg
->client_managed
;
682 mhi_event
->offload_ev
= event_cfg
->offload_channel
;
690 kfree(mhi_cntrl
->mhi_event
);
694 static int parse_ch_cfg(struct mhi_controller
*mhi_cntrl
,
695 const struct mhi_controller_config
*config
)
697 const struct mhi_channel_config
*ch_cfg
;
698 struct device
*dev
= mhi_cntrl
->cntrl_dev
;
702 mhi_cntrl
->max_chan
= config
->max_channels
;
705 * The allocation of MHI channels can exceed 32KB in some scenarios,
706 * so to avoid any memory possible allocation failures, vzalloc is
709 mhi_cntrl
->mhi_chan
= vzalloc(mhi_cntrl
->max_chan
*
710 sizeof(*mhi_cntrl
->mhi_chan
));
711 if (!mhi_cntrl
->mhi_chan
)
714 INIT_LIST_HEAD(&mhi_cntrl
->lpm_chans
);
716 /* Populate channel configurations */
717 for (i
= 0; i
< config
->num_channels
; i
++) {
718 struct mhi_chan
*mhi_chan
;
720 ch_cfg
= &config
->ch_cfg
[i
];
723 if (chan
>= mhi_cntrl
->max_chan
) {
724 dev_err(dev
, "Channel %d not available\n", chan
);
728 mhi_chan
= &mhi_cntrl
->mhi_chan
[chan
];
729 mhi_chan
->name
= ch_cfg
->name
;
730 mhi_chan
->chan
= chan
;
732 mhi_chan
->tre_ring
.elements
= ch_cfg
->num_elements
;
733 if (!mhi_chan
->tre_ring
.elements
)
737 * For some channels, local ring length should be bigger than
738 * the transfer ring length due to internal logical channels
739 * in device. So host can queue much more buffers than transfer
740 * ring length. Example, RSC channels should have a larger local
741 * channel length than transfer ring length.
743 mhi_chan
->buf_ring
.elements
= ch_cfg
->local_elements
;
744 if (!mhi_chan
->buf_ring
.elements
)
745 mhi_chan
->buf_ring
.elements
= mhi_chan
->tre_ring
.elements
;
746 mhi_chan
->er_index
= ch_cfg
->event_ring
;
747 mhi_chan
->dir
= ch_cfg
->dir
;
750 * For most channels, chtype is identical to channel directions.
751 * So, if it is not defined then assign channel direction to
754 mhi_chan
->type
= ch_cfg
->type
;
756 mhi_chan
->type
= (enum mhi_ch_type
)mhi_chan
->dir
;
758 mhi_chan
->ee_mask
= ch_cfg
->ee_mask
;
759 mhi_chan
->db_cfg
.pollcfg
= ch_cfg
->pollcfg
;
760 mhi_chan
->lpm_notify
= ch_cfg
->lpm_notify
;
761 mhi_chan
->offload_ch
= ch_cfg
->offload_channel
;
762 mhi_chan
->db_cfg
.reset_req
= ch_cfg
->doorbell_mode_switch
;
763 mhi_chan
->pre_alloc
= ch_cfg
->auto_queue
;
766 * If MHI host allocates buffers, then the channel direction
767 * should be DMA_FROM_DEVICE
769 if (mhi_chan
->pre_alloc
&& mhi_chan
->dir
!= DMA_FROM_DEVICE
) {
770 dev_err(dev
, "Invalid channel configuration\n");
775 * Bi-directional and direction less channel must be an
778 if ((mhi_chan
->dir
== DMA_BIDIRECTIONAL
||
779 mhi_chan
->dir
== DMA_NONE
) && !mhi_chan
->offload_ch
) {
780 dev_err(dev
, "Invalid channel configuration\n");
784 if (!mhi_chan
->offload_ch
) {
785 mhi_chan
->db_cfg
.brstmode
= ch_cfg
->doorbell
;
786 if (MHI_INVALID_BRSTMODE(mhi_chan
->db_cfg
.brstmode
)) {
787 dev_err(dev
, "Invalid Door bell mode\n");
792 if (mhi_chan
->db_cfg
.brstmode
== MHI_DB_BRST_ENABLE
)
793 mhi_chan
->db_cfg
.process_db
= mhi_db_brstmode
;
795 mhi_chan
->db_cfg
.process_db
= mhi_db_brstmode_disable
;
797 mhi_chan
->configured
= true;
799 if (mhi_chan
->lpm_notify
)
800 list_add_tail(&mhi_chan
->node
, &mhi_cntrl
->lpm_chans
);
806 vfree(mhi_cntrl
->mhi_chan
);
811 static int parse_config(struct mhi_controller
*mhi_cntrl
,
812 const struct mhi_controller_config
*config
)
816 /* Parse MHI channel configuration */
817 ret
= parse_ch_cfg(mhi_cntrl
, config
);
821 /* Parse MHI event configuration */
822 ret
= parse_ev_cfg(mhi_cntrl
, config
);
826 mhi_cntrl
->timeout_ms
= config
->timeout_ms
;
827 if (!mhi_cntrl
->timeout_ms
)
828 mhi_cntrl
->timeout_ms
= MHI_TIMEOUT_MS
;
830 mhi_cntrl
->bounce_buf
= config
->use_bounce_buf
;
831 mhi_cntrl
->buffer_len
= config
->buf_len
;
832 if (!mhi_cntrl
->buffer_len
)
833 mhi_cntrl
->buffer_len
= MHI_MAX_MTU
;
835 /* By default, host is allowed to ring DB in both M0 and M2 states */
836 mhi_cntrl
->db_access
= MHI_PM_M0
| MHI_PM_M2
;
837 if (config
->m2_no_db
)
838 mhi_cntrl
->db_access
&= ~MHI_PM_M2
;
843 vfree(mhi_cntrl
->mhi_chan
);
848 int mhi_register_controller(struct mhi_controller
*mhi_cntrl
,
849 const struct mhi_controller_config
*config
)
851 struct mhi_event
*mhi_event
;
852 struct mhi_chan
*mhi_chan
;
853 struct mhi_cmd
*mhi_cmd
;
854 struct mhi_device
*mhi_dev
;
861 if (!mhi_cntrl
->runtime_get
|| !mhi_cntrl
->runtime_put
||
862 !mhi_cntrl
->status_cb
|| !mhi_cntrl
->read_reg
||
863 !mhi_cntrl
->write_reg
|| !mhi_cntrl
->nr_irqs
)
866 ret
= parse_config(mhi_cntrl
, config
);
870 mhi_cntrl
->mhi_cmd
= kcalloc(NR_OF_CMD_RINGS
,
871 sizeof(*mhi_cntrl
->mhi_cmd
), GFP_KERNEL
);
872 if (!mhi_cntrl
->mhi_cmd
) {
877 INIT_LIST_HEAD(&mhi_cntrl
->transition_list
);
878 mutex_init(&mhi_cntrl
->pm_mutex
);
879 rwlock_init(&mhi_cntrl
->pm_lock
);
880 spin_lock_init(&mhi_cntrl
->transition_lock
);
881 spin_lock_init(&mhi_cntrl
->wlock
);
882 INIT_WORK(&mhi_cntrl
->st_worker
, mhi_pm_st_worker
);
883 init_waitqueue_head(&mhi_cntrl
->state_event
);
885 mhi_cntrl
->hiprio_wq
= alloc_ordered_workqueue
886 ("mhi_hiprio_wq", WQ_MEM_RECLAIM
| WQ_HIGHPRI
);
887 if (!mhi_cntrl
->hiprio_wq
) {
888 dev_err(mhi_cntrl
->cntrl_dev
, "Failed to allocate workqueue\n");
893 mhi_cmd
= mhi_cntrl
->mhi_cmd
;
894 for (i
= 0; i
< NR_OF_CMD_RINGS
; i
++, mhi_cmd
++)
895 spin_lock_init(&mhi_cmd
->lock
);
897 mhi_event
= mhi_cntrl
->mhi_event
;
898 for (i
= 0; i
< mhi_cntrl
->total_ev_rings
; i
++, mhi_event
++) {
899 /* Skip for offload events */
900 if (mhi_event
->offload_ev
)
903 mhi_event
->mhi_cntrl
= mhi_cntrl
;
904 spin_lock_init(&mhi_event
->lock
);
905 if (mhi_event
->data_type
== MHI_ER_CTRL
)
906 tasklet_init(&mhi_event
->task
, mhi_ctrl_ev_task
,
909 tasklet_init(&mhi_event
->task
, mhi_ev_task
,
913 mhi_chan
= mhi_cntrl
->mhi_chan
;
914 for (i
= 0; i
< mhi_cntrl
->max_chan
; i
++, mhi_chan
++) {
915 mutex_init(&mhi_chan
->mutex
);
916 init_completion(&mhi_chan
->completion
);
917 rwlock_init(&mhi_chan
->lock
);
919 /* used in setting bei field of TRE */
920 mhi_event
= &mhi_cntrl
->mhi_event
[mhi_chan
->er_index
];
921 mhi_chan
->intmod
= mhi_event
->intmod
;
924 if (mhi_cntrl
->bounce_buf
) {
925 mhi_cntrl
->map_single
= mhi_map_single_use_bb
;
926 mhi_cntrl
->unmap_single
= mhi_unmap_single_use_bb
;
928 mhi_cntrl
->map_single
= mhi_map_single_no_bb
;
929 mhi_cntrl
->unmap_single
= mhi_unmap_single_no_bb
;
932 /* Read the MHI device info */
933 ret
= mhi_read_reg(mhi_cntrl
, mhi_cntrl
->regs
,
934 SOC_HW_VERSION_OFFS
, &soc_info
);
938 mhi_cntrl
->family_number
= (soc_info
& SOC_HW_VERSION_FAM_NUM_BMSK
) >>
939 SOC_HW_VERSION_FAM_NUM_SHFT
;
940 mhi_cntrl
->device_number
= (soc_info
& SOC_HW_VERSION_DEV_NUM_BMSK
) >>
941 SOC_HW_VERSION_DEV_NUM_SHFT
;
942 mhi_cntrl
->major_version
= (soc_info
& SOC_HW_VERSION_MAJOR_VER_BMSK
) >>
943 SOC_HW_VERSION_MAJOR_VER_SHFT
;
944 mhi_cntrl
->minor_version
= (soc_info
& SOC_HW_VERSION_MINOR_VER_BMSK
) >>
945 SOC_HW_VERSION_MINOR_VER_SHFT
;
947 mhi_cntrl
->index
= ida_alloc(&mhi_controller_ida
, GFP_KERNEL
);
948 if (mhi_cntrl
->index
< 0) {
949 ret
= mhi_cntrl
->index
;
953 /* Register controller with MHI bus */
954 mhi_dev
= mhi_alloc_device(mhi_cntrl
);
955 if (IS_ERR(mhi_dev
)) {
956 dev_err(mhi_cntrl
->cntrl_dev
, "Failed to allocate MHI device\n");
957 ret
= PTR_ERR(mhi_dev
);
961 mhi_dev
->dev_type
= MHI_DEVICE_CONTROLLER
;
962 mhi_dev
->mhi_cntrl
= mhi_cntrl
;
963 dev_set_name(&mhi_dev
->dev
, "mhi%d", mhi_cntrl
->index
);
964 mhi_dev
->name
= dev_name(&mhi_dev
->dev
);
966 /* Init wakeup source */
967 device_init_wakeup(&mhi_dev
->dev
, true);
969 ret
= device_add(&mhi_dev
->dev
);
971 goto err_release_dev
;
973 mhi_cntrl
->mhi_dev
= mhi_dev
;
975 mhi_create_debugfs(mhi_cntrl
);
980 put_device(&mhi_dev
->dev
);
982 ida_free(&mhi_controller_ida
, mhi_cntrl
->index
);
984 destroy_workqueue(mhi_cntrl
->hiprio_wq
);
986 kfree(mhi_cntrl
->mhi_cmd
);
988 kfree(mhi_cntrl
->mhi_event
);
989 vfree(mhi_cntrl
->mhi_chan
);
993 EXPORT_SYMBOL_GPL(mhi_register_controller
);
995 void mhi_unregister_controller(struct mhi_controller
*mhi_cntrl
)
997 struct mhi_device
*mhi_dev
= mhi_cntrl
->mhi_dev
;
998 struct mhi_chan
*mhi_chan
= mhi_cntrl
->mhi_chan
;
1001 mhi_destroy_debugfs(mhi_cntrl
);
1003 destroy_workqueue(mhi_cntrl
->hiprio_wq
);
1004 kfree(mhi_cntrl
->mhi_cmd
);
1005 kfree(mhi_cntrl
->mhi_event
);
1007 /* Drop the references to MHI devices created for channels */
1008 for (i
= 0; i
< mhi_cntrl
->max_chan
; i
++, mhi_chan
++) {
1009 if (!mhi_chan
->mhi_dev
)
1012 put_device(&mhi_chan
->mhi_dev
->dev
);
1014 vfree(mhi_cntrl
->mhi_chan
);
1016 device_del(&mhi_dev
->dev
);
1017 put_device(&mhi_dev
->dev
);
1019 ida_free(&mhi_controller_ida
, mhi_cntrl
->index
);
1021 EXPORT_SYMBOL_GPL(mhi_unregister_controller
);
1023 struct mhi_controller
*mhi_alloc_controller(void)
1025 struct mhi_controller
*mhi_cntrl
;
1027 mhi_cntrl
= kzalloc(sizeof(*mhi_cntrl
), GFP_KERNEL
);
1031 EXPORT_SYMBOL_GPL(mhi_alloc_controller
);
1033 void mhi_free_controller(struct mhi_controller
*mhi_cntrl
)
1037 EXPORT_SYMBOL_GPL(mhi_free_controller
);
1039 int mhi_prepare_for_power_up(struct mhi_controller
*mhi_cntrl
)
1041 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
1045 mutex_lock(&mhi_cntrl
->pm_mutex
);
1047 ret
= mhi_init_dev_ctxt(mhi_cntrl
);
1049 goto error_dev_ctxt
;
1052 * Allocate RDDM table if specified, this table is for debugging purpose
1054 if (mhi_cntrl
->rddm_size
) {
1055 mhi_alloc_bhie_table(mhi_cntrl
, &mhi_cntrl
->rddm_image
,
1056 mhi_cntrl
->rddm_size
);
1059 * This controller supports RDDM, so we need to manually clear
1060 * BHIE RX registers since POR values are undefined.
1062 ret
= mhi_read_reg(mhi_cntrl
, mhi_cntrl
->regs
, BHIEOFF
,
1065 dev_err(dev
, "Error getting BHIE offset\n");
1069 mhi_cntrl
->bhie
= mhi_cntrl
->regs
+ bhie_off
;
1070 memset_io(mhi_cntrl
->bhie
+ BHIE_RXVECADDR_LOW_OFFS
,
1071 0, BHIE_RXVECSTATUS_OFFS
- BHIE_RXVECADDR_LOW_OFFS
+
1074 if (mhi_cntrl
->rddm_image
)
1075 mhi_rddm_prepare(mhi_cntrl
, mhi_cntrl
->rddm_image
);
1078 mhi_cntrl
->pre_init
= true;
1080 mutex_unlock(&mhi_cntrl
->pm_mutex
);
1085 if (mhi_cntrl
->rddm_image
) {
1086 mhi_free_bhie_table(mhi_cntrl
, mhi_cntrl
->rddm_image
);
1087 mhi_cntrl
->rddm_image
= NULL
;
1091 mutex_unlock(&mhi_cntrl
->pm_mutex
);
1095 EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up
);
1097 void mhi_unprepare_after_power_down(struct mhi_controller
*mhi_cntrl
)
1099 if (mhi_cntrl
->fbc_image
) {
1100 mhi_free_bhie_table(mhi_cntrl
, mhi_cntrl
->fbc_image
);
1101 mhi_cntrl
->fbc_image
= NULL
;
1104 if (mhi_cntrl
->rddm_image
) {
1105 mhi_free_bhie_table(mhi_cntrl
, mhi_cntrl
->rddm_image
);
1106 mhi_cntrl
->rddm_image
= NULL
;
1109 mhi_deinit_dev_ctxt(mhi_cntrl
);
1110 mhi_cntrl
->pre_init
= false;
1112 EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down
);
1114 static void mhi_release_device(struct device
*dev
)
1116 struct mhi_device
*mhi_dev
= to_mhi_device(dev
);
1119 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
1120 * devices for the channels will only get created if the mhi_dev
1121 * associated with it is NULL. This scenario will happen during the
1122 * controller suspend and resume.
1124 if (mhi_dev
->ul_chan
)
1125 mhi_dev
->ul_chan
->mhi_dev
= NULL
;
1127 if (mhi_dev
->dl_chan
)
1128 mhi_dev
->dl_chan
->mhi_dev
= NULL
;
1133 struct mhi_device
*mhi_alloc_device(struct mhi_controller
*mhi_cntrl
)
1135 struct mhi_device
*mhi_dev
;
1138 mhi_dev
= kzalloc(sizeof(*mhi_dev
), GFP_KERNEL
);
1140 return ERR_PTR(-ENOMEM
);
1142 dev
= &mhi_dev
->dev
;
1143 device_initialize(dev
);
1144 dev
->bus
= &mhi_bus_type
;
1145 dev
->release
= mhi_release_device
;
1147 if (mhi_cntrl
->mhi_dev
) {
1148 /* for MHI client devices, parent is the MHI controller device */
1149 dev
->parent
= &mhi_cntrl
->mhi_dev
->dev
;
1151 /* for MHI controller device, parent is the bus device (e.g. pci device) */
1152 dev
->parent
= mhi_cntrl
->cntrl_dev
;
1155 mhi_dev
->mhi_cntrl
= mhi_cntrl
;
1156 mhi_dev
->dev_wake
= 0;
1161 static int mhi_driver_probe(struct device
*dev
)
1163 struct mhi_device
*mhi_dev
= to_mhi_device(dev
);
1164 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1165 struct device_driver
*drv
= dev
->driver
;
1166 struct mhi_driver
*mhi_drv
= to_mhi_driver(drv
);
1167 struct mhi_event
*mhi_event
;
1168 struct mhi_chan
*ul_chan
= mhi_dev
->ul_chan
;
1169 struct mhi_chan
*dl_chan
= mhi_dev
->dl_chan
;
1172 /* Bring device out of LPM */
1173 ret
= mhi_device_get_sync(mhi_dev
);
1181 * If channel supports LPM notifications then status_cb should
1184 if (ul_chan
->lpm_notify
&& !mhi_drv
->status_cb
)
1187 /* For non-offload channels then xfer_cb should be provided */
1188 if (!ul_chan
->offload_ch
&& !mhi_drv
->ul_xfer_cb
)
1191 ul_chan
->xfer_cb
= mhi_drv
->ul_xfer_cb
;
1197 * If channel supports LPM notifications then status_cb should
1200 if (dl_chan
->lpm_notify
&& !mhi_drv
->status_cb
)
1203 /* For non-offload channels then xfer_cb should be provided */
1204 if (!dl_chan
->offload_ch
&& !mhi_drv
->dl_xfer_cb
)
1207 mhi_event
= &mhi_cntrl
->mhi_event
[dl_chan
->er_index
];
1210 * If the channel event ring is managed by client, then
1211 * status_cb must be provided so that the framework can
1212 * notify pending data
1214 if (mhi_event
->cl_manage
&& !mhi_drv
->status_cb
)
1217 dl_chan
->xfer_cb
= mhi_drv
->dl_xfer_cb
;
1220 /* Call the user provided probe function */
1221 ret
= mhi_drv
->probe(mhi_dev
, mhi_dev
->id
);
1225 mhi_device_put(mhi_dev
);
1230 mhi_unprepare_from_transfer(mhi_dev
);
1232 mhi_device_put(mhi_dev
);
1237 static int mhi_driver_remove(struct device
*dev
)
1239 struct mhi_device
*mhi_dev
= to_mhi_device(dev
);
1240 struct mhi_driver
*mhi_drv
= to_mhi_driver(dev
->driver
);
1241 struct mhi_controller
*mhi_cntrl
= mhi_dev
->mhi_cntrl
;
1242 struct mhi_chan
*mhi_chan
;
1243 enum mhi_ch_state ch_state
[] = {
1244 MHI_CH_STATE_DISABLED
,
1245 MHI_CH_STATE_DISABLED
1249 /* Skip if it is a controller device */
1250 if (mhi_dev
->dev_type
== MHI_DEVICE_CONTROLLER
)
1253 /* Reset both channels */
1254 for (dir
= 0; dir
< 2; dir
++) {
1255 mhi_chan
= dir
? mhi_dev
->ul_chan
: mhi_dev
->dl_chan
;
1260 /* Wake all threads waiting for completion */
1261 write_lock_irq(&mhi_chan
->lock
);
1262 mhi_chan
->ccs
= MHI_EV_CC_INVALID
;
1263 complete_all(&mhi_chan
->completion
);
1264 write_unlock_irq(&mhi_chan
->lock
);
1266 /* Set the channel state to disabled */
1267 mutex_lock(&mhi_chan
->mutex
);
1268 write_lock_irq(&mhi_chan
->lock
);
1269 ch_state
[dir
] = mhi_chan
->ch_state
;
1270 mhi_chan
->ch_state
= MHI_CH_STATE_SUSPENDED
;
1271 write_unlock_irq(&mhi_chan
->lock
);
1273 /* Reset the non-offload channel */
1274 if (!mhi_chan
->offload_ch
)
1275 mhi_reset_chan(mhi_cntrl
, mhi_chan
);
1277 mutex_unlock(&mhi_chan
->mutex
);
1280 mhi_drv
->remove(mhi_dev
);
1282 /* De-init channel if it was enabled */
1283 for (dir
= 0; dir
< 2; dir
++) {
1284 mhi_chan
= dir
? mhi_dev
->ul_chan
: mhi_dev
->dl_chan
;
1289 mutex_lock(&mhi_chan
->mutex
);
1291 if (ch_state
[dir
] == MHI_CH_STATE_ENABLED
&&
1292 !mhi_chan
->offload_ch
)
1293 mhi_deinit_chan_ctxt(mhi_cntrl
, mhi_chan
);
1295 mhi_chan
->ch_state
= MHI_CH_STATE_DISABLED
;
1297 mutex_unlock(&mhi_chan
->mutex
);
1300 while (mhi_dev
->dev_wake
)
1301 mhi_device_put(mhi_dev
);
1306 int __mhi_driver_register(struct mhi_driver
*mhi_drv
, struct module
*owner
)
1308 struct device_driver
*driver
= &mhi_drv
->driver
;
1310 if (!mhi_drv
->probe
|| !mhi_drv
->remove
)
1313 driver
->bus
= &mhi_bus_type
;
1314 driver
->owner
= owner
;
1315 driver
->probe
= mhi_driver_probe
;
1316 driver
->remove
= mhi_driver_remove
;
1318 return driver_register(driver
);
1320 EXPORT_SYMBOL_GPL(__mhi_driver_register
);
1322 void mhi_driver_unregister(struct mhi_driver
*mhi_drv
)
1324 driver_unregister(&mhi_drv
->driver
);
1326 EXPORT_SYMBOL_GPL(mhi_driver_unregister
);
1328 static int mhi_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
1330 struct mhi_device
*mhi_dev
= to_mhi_device(dev
);
1332 return add_uevent_var(env
, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT
,
1336 static int mhi_match(struct device
*dev
, struct device_driver
*drv
)
1338 struct mhi_device
*mhi_dev
= to_mhi_device(dev
);
1339 struct mhi_driver
*mhi_drv
= to_mhi_driver(drv
);
1340 const struct mhi_device_id
*id
;
1343 * If the device is a controller type then there is no client driver
1344 * associated with it
1346 if (mhi_dev
->dev_type
== MHI_DEVICE_CONTROLLER
)
1349 for (id
= mhi_drv
->id_table
; id
->chan
[0]; id
++)
1350 if (!strcmp(mhi_dev
->name
, id
->chan
)) {
1358 struct bus_type mhi_bus_type
= {
1362 .uevent
= mhi_uevent
,
1363 .dev_groups
= mhi_dev_groups
,
1366 static int __init
mhi_init(void)
1369 return bus_register(&mhi_bus_type
);
1372 static void __exit
mhi_exit(void)
1375 bus_unregister(&mhi_bus_type
);
1378 postcore_initcall(mhi_init
);
1379 module_exit(mhi_exit
);
1381 MODULE_LICENSE("GPL v2");
1382 MODULE_DESCRIPTION("MHI Host Interface");