1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/firmware.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/mhi.h>
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/wait.h>
21 /* Setup RDDM vector table for RDDM transfer and program RXVEC */
22 void mhi_rddm_prepare(struct mhi_controller
*mhi_cntrl
,
23 struct image_info
*img_info
)
25 struct mhi_buf
*mhi_buf
= img_info
->mhi_buf
;
26 struct bhi_vec_entry
*bhi_vec
= img_info
->bhi_vec
;
27 void __iomem
*base
= mhi_cntrl
->bhie
;
28 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
32 for (i
= 0; i
< img_info
->entries
- 1; i
++, mhi_buf
++, bhi_vec
++) {
33 bhi_vec
->dma_addr
= mhi_buf
->dma_addr
;
34 bhi_vec
->size
= mhi_buf
->len
;
37 dev_dbg(dev
, "BHIe programming for RDDM\n");
39 mhi_write_reg(mhi_cntrl
, base
, BHIE_RXVECADDR_HIGH_OFFS
,
40 upper_32_bits(mhi_buf
->dma_addr
));
42 mhi_write_reg(mhi_cntrl
, base
, BHIE_RXVECADDR_LOW_OFFS
,
43 lower_32_bits(mhi_buf
->dma_addr
));
45 mhi_write_reg(mhi_cntrl
, base
, BHIE_RXVECSIZE_OFFS
, mhi_buf
->len
);
46 sequence_id
= MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK
);
48 mhi_write_reg_field(mhi_cntrl
, base
, BHIE_RXVECDB_OFFS
,
49 BHIE_RXVECDB_SEQNUM_BMSK
, BHIE_RXVECDB_SEQNUM_SHFT
,
52 dev_dbg(dev
, "Address: %p and len: 0x%zx sequence: %u\n",
53 &mhi_buf
->dma_addr
, mhi_buf
->len
, sequence_id
);
56 /* Collect RDDM buffer during kernel panic */
57 static int __mhi_download_rddm_in_panic(struct mhi_controller
*mhi_cntrl
)
62 const u32 delayus
= 2000;
63 u32 retry
= (mhi_cntrl
->timeout_ms
* 1000) / delayus
;
64 const u32 rddm_timeout_us
= 200000;
65 int rddm_retry
= rddm_timeout_us
/ delayus
;
66 void __iomem
*base
= mhi_cntrl
->bhie
;
67 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
69 dev_dbg(dev
, "Entered with pm_state:%s dev_state:%s ee:%s\n",
70 to_mhi_pm_state_str(mhi_cntrl
->pm_state
),
71 TO_MHI_STATE_STR(mhi_cntrl
->dev_state
),
72 TO_MHI_EXEC_STR(mhi_cntrl
->ee
));
75 * This should only be executing during a kernel panic, we expect all
76 * other cores to shutdown while we're collecting RDDM buffer. After
77 * returning from this function, we expect the device to reset.
79 * Normaly, we read/write pm_state only after grabbing the
80 * pm_lock, since we're in a panic, skipping it. Also there is no
81 * gurantee that this state change would take effect since
82 * we're setting it w/o grabbing pm_lock
84 mhi_cntrl
->pm_state
= MHI_PM_LD_ERR_FATAL_DETECT
;
85 /* update should take the effect immediately */
89 * Make sure device is not already in RDDM. In case the device asserts
90 * and a kernel panic follows, device will already be in RDDM.
91 * Do not trigger SYS ERR again and proceed with waiting for
92 * image download completion.
94 ee
= mhi_get_exec_env(mhi_cntrl
);
98 if (ee
!= MHI_EE_RDDM
) {
99 dev_dbg(dev
, "Trigger device into RDDM mode using SYS ERR\n");
100 mhi_set_mhi_state(mhi_cntrl
, MHI_STATE_SYS_ERR
);
102 dev_dbg(dev
, "Waiting for device to enter RDDM\n");
103 while (rddm_retry
--) {
104 ee
= mhi_get_exec_env(mhi_cntrl
);
105 if (ee
== MHI_EE_RDDM
)
111 if (rddm_retry
<= 0) {
112 /* Hardware reset so force device to enter RDDM */
114 "Did not enter RDDM, do a host req reset\n");
115 mhi_write_reg(mhi_cntrl
, mhi_cntrl
->regs
,
116 MHI_SOC_RESET_REQ_OFFSET
,
121 ee
= mhi_get_exec_env(mhi_cntrl
);
125 "Waiting for RDDM image download via BHIe, current EE:%s\n",
126 TO_MHI_EXEC_STR(ee
));
129 ret
= mhi_read_reg_field(mhi_cntrl
, base
, BHIE_RXVECSTATUS_OFFS
,
130 BHIE_RXVECSTATUS_STATUS_BMSK
,
131 BHIE_RXVECSTATUS_STATUS_SHFT
,
136 if (rx_status
== BHIE_RXVECSTATUS_STATUS_XFER_COMPL
)
142 ee
= mhi_get_exec_env(mhi_cntrl
);
143 ret
= mhi_read_reg(mhi_cntrl
, base
, BHIE_RXVECSTATUS_OFFS
, &rx_status
);
145 dev_err(dev
, "RXVEC_STATUS: 0x%x\n", rx_status
);
148 dev_err(dev
, "RDDM transfer failed. Current EE: %s\n",
149 TO_MHI_EXEC_STR(ee
));
154 /* Download RDDM image from device */
155 int mhi_download_rddm_image(struct mhi_controller
*mhi_cntrl
, bool in_panic
)
157 void __iomem
*base
= mhi_cntrl
->bhie
;
158 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
162 return __mhi_download_rddm_in_panic(mhi_cntrl
);
164 dev_dbg(dev
, "Waiting for RDDM image download via BHIe\n");
166 /* Wait for the image download to complete */
167 wait_event_timeout(mhi_cntrl
->state_event
,
168 mhi_read_reg_field(mhi_cntrl
, base
,
169 BHIE_RXVECSTATUS_OFFS
,
170 BHIE_RXVECSTATUS_STATUS_BMSK
,
171 BHIE_RXVECSTATUS_STATUS_SHFT
,
172 &rx_status
) || rx_status
,
173 msecs_to_jiffies(mhi_cntrl
->timeout_ms
));
175 return (rx_status
== BHIE_RXVECSTATUS_STATUS_XFER_COMPL
) ? 0 : -EIO
;
177 EXPORT_SYMBOL_GPL(mhi_download_rddm_image
);
179 static int mhi_fw_load_bhie(struct mhi_controller
*mhi_cntrl
,
180 const struct mhi_buf
*mhi_buf
)
182 void __iomem
*base
= mhi_cntrl
->bhie
;
183 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
184 rwlock_t
*pm_lock
= &mhi_cntrl
->pm_lock
;
185 u32 tx_status
, sequence_id
;
188 read_lock_bh(pm_lock
);
189 if (!MHI_REG_ACCESS_VALID(mhi_cntrl
->pm_state
)) {
190 read_unlock_bh(pm_lock
);
194 sequence_id
= MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK
);
195 dev_dbg(dev
, "Starting image download via BHIe. Sequence ID: %u\n",
197 mhi_write_reg(mhi_cntrl
, base
, BHIE_TXVECADDR_HIGH_OFFS
,
198 upper_32_bits(mhi_buf
->dma_addr
));
200 mhi_write_reg(mhi_cntrl
, base
, BHIE_TXVECADDR_LOW_OFFS
,
201 lower_32_bits(mhi_buf
->dma_addr
));
203 mhi_write_reg(mhi_cntrl
, base
, BHIE_TXVECSIZE_OFFS
, mhi_buf
->len
);
205 mhi_write_reg_field(mhi_cntrl
, base
, BHIE_TXVECDB_OFFS
,
206 BHIE_TXVECDB_SEQNUM_BMSK
, BHIE_TXVECDB_SEQNUM_SHFT
,
208 read_unlock_bh(pm_lock
);
210 /* Wait for the image download to complete */
211 ret
= wait_event_timeout(mhi_cntrl
->state_event
,
212 MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
) ||
213 mhi_read_reg_field(mhi_cntrl
, base
,
214 BHIE_TXVECSTATUS_OFFS
,
215 BHIE_TXVECSTATUS_STATUS_BMSK
,
216 BHIE_TXVECSTATUS_STATUS_SHFT
,
217 &tx_status
) || tx_status
,
218 msecs_to_jiffies(mhi_cntrl
->timeout_ms
));
219 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
) ||
220 tx_status
!= BHIE_TXVECSTATUS_STATUS_XFER_COMPL
)
223 return (!ret
) ? -ETIMEDOUT
: 0;
226 static int mhi_fw_load_bhi(struct mhi_controller
*mhi_cntrl
,
230 u32 tx_status
, val
, session_id
;
232 void __iomem
*base
= mhi_cntrl
->bhi
;
233 rwlock_t
*pm_lock
= &mhi_cntrl
->pm_lock
;
234 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
239 { "ERROR_CODE", BHI_ERRCODE
},
240 { "ERROR_DBG1", BHI_ERRDBG1
},
241 { "ERROR_DBG2", BHI_ERRDBG2
},
242 { "ERROR_DBG3", BHI_ERRDBG3
},
246 read_lock_bh(pm_lock
);
247 if (!MHI_REG_ACCESS_VALID(mhi_cntrl
->pm_state
)) {
248 read_unlock_bh(pm_lock
);
249 goto invalid_pm_state
;
252 session_id
= MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK
);
253 dev_dbg(dev
, "Starting image download via BHI. Session ID: %u\n",
255 mhi_write_reg(mhi_cntrl
, base
, BHI_STATUS
, 0);
256 mhi_write_reg(mhi_cntrl
, base
, BHI_IMGADDR_HIGH
,
257 upper_32_bits(dma_addr
));
258 mhi_write_reg(mhi_cntrl
, base
, BHI_IMGADDR_LOW
,
259 lower_32_bits(dma_addr
));
260 mhi_write_reg(mhi_cntrl
, base
, BHI_IMGSIZE
, size
);
261 mhi_write_reg(mhi_cntrl
, base
, BHI_IMGTXDB
, session_id
);
262 read_unlock_bh(pm_lock
);
264 /* Wait for the image download to complete */
265 ret
= wait_event_timeout(mhi_cntrl
->state_event
,
266 MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
) ||
267 mhi_read_reg_field(mhi_cntrl
, base
, BHI_STATUS
,
268 BHI_STATUS_MASK
, BHI_STATUS_SHIFT
,
269 &tx_status
) || tx_status
,
270 msecs_to_jiffies(mhi_cntrl
->timeout_ms
));
271 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
))
272 goto invalid_pm_state
;
274 if (tx_status
== BHI_STATUS_ERROR
) {
275 dev_err(dev
, "Image transfer failed\n");
276 read_lock_bh(pm_lock
);
277 if (MHI_REG_ACCESS_VALID(mhi_cntrl
->pm_state
)) {
278 for (i
= 0; error_reg
[i
].name
; i
++) {
279 ret
= mhi_read_reg(mhi_cntrl
, base
,
280 error_reg
[i
].offset
, &val
);
283 dev_err(dev
, "Reg: %s value: 0x%x\n",
284 error_reg
[i
].name
, val
);
287 read_unlock_bh(pm_lock
);
288 goto invalid_pm_state
;
291 return (!ret
) ? -ETIMEDOUT
: 0;
298 void mhi_free_bhie_table(struct mhi_controller
*mhi_cntrl
,
299 struct image_info
*image_info
)
302 struct mhi_buf
*mhi_buf
= image_info
->mhi_buf
;
304 for (i
= 0; i
< image_info
->entries
; i
++, mhi_buf
++)
305 mhi_free_coherent(mhi_cntrl
, mhi_buf
->len
, mhi_buf
->buf
,
308 kfree(image_info
->mhi_buf
);
312 int mhi_alloc_bhie_table(struct mhi_controller
*mhi_cntrl
,
313 struct image_info
**image_info
,
316 size_t seg_size
= mhi_cntrl
->seg_len
;
317 int segments
= DIV_ROUND_UP(alloc_size
, seg_size
) + 1;
319 struct image_info
*img_info
;
320 struct mhi_buf
*mhi_buf
;
322 img_info
= kzalloc(sizeof(*img_info
), GFP_KERNEL
);
326 /* Allocate memory for entries */
327 img_info
->mhi_buf
= kcalloc(segments
, sizeof(*img_info
->mhi_buf
),
329 if (!img_info
->mhi_buf
)
330 goto error_alloc_mhi_buf
;
332 /* Allocate and populate vector table */
333 mhi_buf
= img_info
->mhi_buf
;
334 for (i
= 0; i
< segments
; i
++, mhi_buf
++) {
335 size_t vec_size
= seg_size
;
337 /* Vector table is the last entry */
338 if (i
== segments
- 1)
339 vec_size
= sizeof(struct bhi_vec_entry
) * i
;
341 mhi_buf
->len
= vec_size
;
342 mhi_buf
->buf
= mhi_alloc_coherent(mhi_cntrl
, vec_size
,
346 goto error_alloc_segment
;
349 img_info
->bhi_vec
= img_info
->mhi_buf
[segments
- 1].buf
;
350 img_info
->entries
= segments
;
351 *image_info
= img_info
;
356 for (--i
, --mhi_buf
; i
>= 0; i
--, mhi_buf
--)
357 mhi_free_coherent(mhi_cntrl
, mhi_buf
->len
, mhi_buf
->buf
,
366 static void mhi_firmware_copy(struct mhi_controller
*mhi_cntrl
,
367 const struct firmware
*firmware
,
368 struct image_info
*img_info
)
370 size_t remainder
= firmware
->size
;
372 const u8
*buf
= firmware
->data
;
373 struct mhi_buf
*mhi_buf
= img_info
->mhi_buf
;
374 struct bhi_vec_entry
*bhi_vec
= img_info
->bhi_vec
;
377 to_cpy
= min(remainder
, mhi_buf
->len
);
378 memcpy(mhi_buf
->buf
, buf
, to_cpy
);
379 bhi_vec
->dma_addr
= mhi_buf
->dma_addr
;
380 bhi_vec
->size
= to_cpy
;
389 void mhi_fw_load_handler(struct mhi_controller
*mhi_cntrl
)
391 const struct firmware
*firmware
= NULL
;
392 struct image_info
*image_info
;
393 struct device
*dev
= &mhi_cntrl
->mhi_dev
->dev
;
400 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
)) {
401 dev_err(dev
, "Device MHI is not in valid state\n");
405 /* save hardware info from BHI */
406 ret
= mhi_read_reg(mhi_cntrl
, mhi_cntrl
->bhi
, BHI_SERIALNU
,
407 &mhi_cntrl
->serial_number
);
409 dev_err(dev
, "Could not capture serial number via BHI\n");
411 for (i
= 0; i
< ARRAY_SIZE(mhi_cntrl
->oem_pk_hash
); i
++) {
412 ret
= mhi_read_reg(mhi_cntrl
, mhi_cntrl
->bhi
, BHI_OEMPKHASH(i
),
413 &mhi_cntrl
->oem_pk_hash
[i
]);
415 dev_err(dev
, "Could not capture OEM PK HASH via BHI\n");
420 /* If device is in pass through, do reset to ready state transition */
421 if (mhi_cntrl
->ee
== MHI_EE_PTHRU
)
422 goto fw_load_ee_pthru
;
424 fw_name
= (mhi_cntrl
->ee
== MHI_EE_EDL
) ?
425 mhi_cntrl
->edl_image
: mhi_cntrl
->fw_image
;
427 if (!fw_name
|| (mhi_cntrl
->fbc_download
&& (!mhi_cntrl
->sbl_size
||
428 !mhi_cntrl
->seg_len
))) {
430 "No firmware image defined or !sbl_size || !seg_len\n");
434 ret
= request_firmware(&firmware
, fw_name
, dev
);
436 dev_err(dev
, "Error loading firmware: %d\n", ret
);
440 size
= (mhi_cntrl
->fbc_download
) ? mhi_cntrl
->sbl_size
: firmware
->size
;
442 /* SBL size provided is maximum size, not necessarily the image size */
443 if (size
> firmware
->size
)
444 size
= firmware
->size
;
446 buf
= mhi_alloc_coherent(mhi_cntrl
, size
, &dma_addr
, GFP_KERNEL
);
448 release_firmware(firmware
);
452 /* Download image using BHI */
453 memcpy(buf
, firmware
->data
, size
);
454 ret
= mhi_fw_load_bhi(mhi_cntrl
, dma_addr
, size
);
455 mhi_free_coherent(mhi_cntrl
, size
, buf
, dma_addr
);
457 /* Error or in EDL mode, we're done */
459 dev_err(dev
, "MHI did not load image over BHI, ret: %d\n", ret
);
460 release_firmware(firmware
);
464 if (mhi_cntrl
->ee
== MHI_EE_EDL
) {
465 release_firmware(firmware
);
469 write_lock_irq(&mhi_cntrl
->pm_lock
);
470 mhi_cntrl
->dev_state
= MHI_STATE_RESET
;
471 write_unlock_irq(&mhi_cntrl
->pm_lock
);
474 * If we're doing fbc, populate vector tables while
475 * device transitioning into MHI READY state
477 if (mhi_cntrl
->fbc_download
) {
478 ret
= mhi_alloc_bhie_table(mhi_cntrl
, &mhi_cntrl
->fbc_image
,
481 release_firmware(firmware
);
485 /* Load the firmware into BHIE vec table */
486 mhi_firmware_copy(mhi_cntrl
, firmware
, mhi_cntrl
->fbc_image
);
489 release_firmware(firmware
);
492 /* Transitioning into MHI RESET->READY state */
493 ret
= mhi_ready_state_transition(mhi_cntrl
);
495 if (!mhi_cntrl
->fbc_download
)
499 dev_err(dev
, "MHI did not enter READY state\n");
500 goto error_ready_state
;
503 /* Wait for the SBL event */
504 ret
= wait_event_timeout(mhi_cntrl
->state_event
,
505 mhi_cntrl
->ee
== MHI_EE_SBL
||
506 MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
),
507 msecs_to_jiffies(mhi_cntrl
->timeout_ms
));
509 if (!ret
|| MHI_PM_IN_ERROR_STATE(mhi_cntrl
->pm_state
)) {
510 dev_err(dev
, "MHI did not enter SBL\n");
511 goto error_ready_state
;
514 /* Start full firmware image download */
515 image_info
= mhi_cntrl
->fbc_image
;
516 ret
= mhi_fw_load_bhie(mhi_cntrl
,
517 /* Vector table is the last entry */
518 &image_info
->mhi_buf
[image_info
->entries
- 1]);
520 dev_err(dev
, "MHI did not load image over BHIe, ret: %d\n",
528 mhi_free_bhie_table(mhi_cntrl
, mhi_cntrl
->fbc_image
);
529 mhi_cntrl
->fbc_image
= NULL
;
532 mhi_cntrl
->pm_state
= MHI_PM_FW_DL_ERR
;
533 wake_up_all(&mhi_cntrl
->state_event
);