2 * Qualcomm Peripheral Image Loader
4 * Copyright (C) 2016 Linaro Ltd.
5 * Copyright (C) 2014 Sony Mobile Communications AB
6 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/clk.h>
19 #include <linux/delay.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/interrupt.h>
22 #include <linux/kernel.h>
23 #include <linux/mfd/syscon.h>
24 #include <linux/module.h>
25 #include <linux/of_address.h>
26 #include <linux/of_device.h>
27 #include <linux/platform_device.h>
28 #include <linux/regmap.h>
29 #include <linux/regulator/consumer.h>
30 #include <linux/remoteproc.h>
31 #include <linux/reset.h>
32 #include <linux/soc/qcom/mdt_loader.h>
33 #include <linux/soc/qcom/smem.h>
34 #include <linux/soc/qcom/smem_state.h>
35 #include <linux/iopoll.h>
37 #include "remoteproc_internal.h"
38 #include "qcom_common.h"
40 #include <linux/qcom_scm.h>
42 #define MPSS_CRASH_REASON_SMEM 421
44 /* RMB Status Register Values */
45 #define RMB_PBL_SUCCESS 0x1
47 #define RMB_MBA_XPU_UNLOCKED 0x1
48 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
49 #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
50 #define RMB_MBA_AUTH_COMPLETE 0x4
52 /* PBL/MBA interface registers */
53 #define RMB_MBA_IMAGE_REG 0x00
54 #define RMB_PBL_STATUS_REG 0x04
55 #define RMB_MBA_COMMAND_REG 0x08
56 #define RMB_MBA_STATUS_REG 0x0C
57 #define RMB_PMI_META_DATA_REG 0x10
58 #define RMB_PMI_CODE_START_REG 0x14
59 #define RMB_PMI_CODE_LENGTH_REG 0x18
61 #define RMB_CMD_META_DATA_READY 0x1
62 #define RMB_CMD_LOAD_READY 0x2
64 /* QDSP6SS Register Offsets */
65 #define QDSP6SS_RESET_REG 0x014
66 #define QDSP6SS_GFMUX_CTL_REG 0x020
67 #define QDSP6SS_PWR_CTL_REG 0x030
68 #define QDSP6SS_MEM_PWR_CTL 0x0B0
69 #define QDSP6SS_STRAP_ACC 0x110
71 /* AXI Halt Register Offsets */
72 #define AXI_HALTREQ_REG 0x0
73 #define AXI_HALTACK_REG 0x4
74 #define AXI_IDLE_REG 0x8
76 #define HALT_ACK_TIMEOUT_MS 100
79 #define Q6SS_STOP_CORE BIT(0)
80 #define Q6SS_CORE_ARES BIT(1)
81 #define Q6SS_BUS_ARES_ENABLE BIT(2)
83 /* QDSP6SS_GFMUX_CTL */
84 #define Q6SS_CLK_ENABLE BIT(1)
87 #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
88 #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
89 #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
90 #define Q6SS_L2TAG_SLP_NRET_N BIT(16)
91 #define Q6SS_ETB_SLP_NRET_N BIT(17)
92 #define Q6SS_L2DATA_STBY_N BIT(18)
93 #define Q6SS_SLP_RET_N BIT(19)
94 #define Q6SS_CLAMP_IO BIT(20)
95 #define QDSS_BHS_ON BIT(21)
96 #define QDSS_LDO_BYP BIT(22)
98 /* QDSP6v56 parameters */
99 #define QDSP6v56_LDO_BYP BIT(25)
100 #define QDSP6v56_BHS_ON BIT(24)
101 #define QDSP6v56_CLAMP_WL BIT(21)
102 #define QDSP6v56_CLAMP_QMC_MEM BIT(22)
103 #define HALT_CHECK_MAX_LOOPS 200
104 #define QDSP6SS_XO_CBCR 0x0038
105 #define QDSP6SS_ACC_OVERRIDE_VAL 0x20
108 struct regulator
*reg
;
113 struct qcom_mss_reg_res
{
119 struct rproc_hexagon_res
{
120 const char *hexagon_mba_image
;
121 struct qcom_mss_reg_res
*proxy_supply
;
122 struct qcom_mss_reg_res
*active_supply
;
123 char **proxy_clk_names
;
124 char **active_clk_names
;
126 bool need_mem_protection
;
133 void __iomem
*reg_base
;
134 void __iomem
*rmb_base
;
136 struct regmap
*halt_map
;
141 struct reset_control
*mss_restart
;
143 struct qcom_smem_state
*state
;
146 struct clk
*active_clks
[8];
147 struct clk
*proxy_clks
[4];
148 int active_clk_count
;
151 struct reg_info active_regs
[1];
152 struct reg_info proxy_regs
[3];
153 int active_reg_count
;
156 struct completion start_done
;
157 struct completion stop_done
;
160 phys_addr_t mba_phys
;
164 phys_addr_t mpss_phys
;
165 phys_addr_t mpss_reloc
;
169 struct qcom_rproc_subdev smd_subdev
;
170 struct qcom_rproc_ssr ssr_subdev
;
171 bool need_mem_protection
;
183 static int q6v5_regulator_init(struct device
*dev
, struct reg_info
*regs
,
184 const struct qcom_mss_reg_res
*reg_res
)
192 for (i
= 0; reg_res
[i
].supply
; i
++) {
193 regs
[i
].reg
= devm_regulator_get(dev
, reg_res
[i
].supply
);
194 if (IS_ERR(regs
[i
].reg
)) {
195 rc
= PTR_ERR(regs
[i
].reg
);
196 if (rc
!= -EPROBE_DEFER
)
197 dev_err(dev
, "Failed to get %s\n regulator",
202 regs
[i
].uV
= reg_res
[i
].uV
;
203 regs
[i
].uA
= reg_res
[i
].uA
;
209 static int q6v5_regulator_enable(struct q6v5
*qproc
,
210 struct reg_info
*regs
, int count
)
215 for (i
= 0; i
< count
; i
++) {
216 if (regs
[i
].uV
> 0) {
217 ret
= regulator_set_voltage(regs
[i
].reg
,
218 regs
[i
].uV
, INT_MAX
);
221 "Failed to request voltage for %d.\n",
227 if (regs
[i
].uA
> 0) {
228 ret
= regulator_set_load(regs
[i
].reg
,
232 "Failed to set regulator mode\n");
237 ret
= regulator_enable(regs
[i
].reg
);
239 dev_err(qproc
->dev
, "Regulator enable failed\n");
246 for (; i
>= 0; i
--) {
248 regulator_set_voltage(regs
[i
].reg
, 0, INT_MAX
);
251 regulator_set_load(regs
[i
].reg
, 0);
253 regulator_disable(regs
[i
].reg
);
259 static void q6v5_regulator_disable(struct q6v5
*qproc
,
260 struct reg_info
*regs
, int count
)
264 for (i
= 0; i
< count
; i
++) {
266 regulator_set_voltage(regs
[i
].reg
, 0, INT_MAX
);
269 regulator_set_load(regs
[i
].reg
, 0);
271 regulator_disable(regs
[i
].reg
);
275 static int q6v5_clk_enable(struct device
*dev
,
276 struct clk
**clks
, int count
)
281 for (i
= 0; i
< count
; i
++) {
282 rc
= clk_prepare_enable(clks
[i
]);
284 dev_err(dev
, "Clock enable failed\n");
291 for (i
--; i
>= 0; i
--)
292 clk_disable_unprepare(clks
[i
]);
297 static void q6v5_clk_disable(struct device
*dev
,
298 struct clk
**clks
, int count
)
302 for (i
= 0; i
< count
; i
++)
303 clk_disable_unprepare(clks
[i
]);
306 static struct resource_table
*q6v5_find_rsc_table(struct rproc
*rproc
,
307 const struct firmware
*fw
,
310 static struct resource_table table
= { .ver
= 1, };
312 *tablesz
= sizeof(table
);
316 static int q6v5_xfer_mem_ownership(struct q6v5
*qproc
, int *current_perm
,
317 bool remote_owner
, phys_addr_t addr
,
320 struct qcom_scm_vmperm next
;
322 if (!qproc
->need_mem_protection
)
324 if (remote_owner
&& *current_perm
== BIT(QCOM_SCM_VMID_MSS_MSA
))
326 if (!remote_owner
&& *current_perm
== BIT(QCOM_SCM_VMID_HLOS
))
329 next
.vmid
= remote_owner
? QCOM_SCM_VMID_MSS_MSA
: QCOM_SCM_VMID_HLOS
;
330 next
.perm
= remote_owner
? QCOM_SCM_PERM_RW
: QCOM_SCM_PERM_RWX
;
332 return qcom_scm_assign_mem(addr
, ALIGN(size
, SZ_4K
),
333 current_perm
, &next
, 1);
336 static int q6v5_load(struct rproc
*rproc
, const struct firmware
*fw
)
338 struct q6v5
*qproc
= rproc
->priv
;
340 memcpy(qproc
->mba_region
, fw
->data
, fw
->size
);
345 static const struct rproc_fw_ops q6v5_fw_ops
= {
346 .find_rsc_table
= q6v5_find_rsc_table
,
350 static int q6v5_rmb_pbl_wait(struct q6v5
*qproc
, int ms
)
352 unsigned long timeout
;
355 timeout
= jiffies
+ msecs_to_jiffies(ms
);
357 val
= readl(qproc
->rmb_base
+ RMB_PBL_STATUS_REG
);
361 if (time_after(jiffies
, timeout
))
370 static int q6v5_rmb_mba_wait(struct q6v5
*qproc
, u32 status
, int ms
)
373 unsigned long timeout
;
376 timeout
= jiffies
+ msecs_to_jiffies(ms
);
378 val
= readl(qproc
->rmb_base
+ RMB_MBA_STATUS_REG
);
384 else if (status
&& val
== status
)
387 if (time_after(jiffies
, timeout
))
396 static int q6v5proc_reset(struct q6v5
*qproc
)
403 if (qproc
->version
== MSS_MSM8996
) {
404 /* Override the ACC value if required */
405 writel(QDSP6SS_ACC_OVERRIDE_VAL
,
406 qproc
->reg_base
+ QDSP6SS_STRAP_ACC
);
408 /* Assert resets, stop core */
409 val
= readl(qproc
->reg_base
+ QDSP6SS_RESET_REG
);
410 val
|= Q6SS_CORE_ARES
| Q6SS_BUS_ARES_ENABLE
| Q6SS_STOP_CORE
;
411 writel(val
, qproc
->reg_base
+ QDSP6SS_RESET_REG
);
413 /* BHS require xo cbcr to be enabled */
414 val
= readl(qproc
->reg_base
+ QDSP6SS_XO_CBCR
);
416 writel(val
, qproc
->reg_base
+ QDSP6SS_XO_CBCR
);
418 /* Read CLKOFF bit to go low indicating CLK is enabled */
419 ret
= readl_poll_timeout(qproc
->reg_base
+ QDSP6SS_XO_CBCR
,
420 val
, !(val
& BIT(31)), 1,
421 HALT_CHECK_MAX_LOOPS
);
424 "xo cbcr enabling timed out (rc:%d)\n", ret
);
427 /* Enable power block headswitch and wait for it to stabilize */
428 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
429 val
|= QDSP6v56_BHS_ON
;
430 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
431 val
|= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
434 /* Put LDO in bypass mode */
435 val
|= QDSP6v56_LDO_BYP
;
436 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
438 /* Deassert QDSP6 compiler memory clamp */
439 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
440 val
&= ~QDSP6v56_CLAMP_QMC_MEM
;
441 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
443 /* Deassert memory peripheral sleep and L2 memory standby */
444 val
|= Q6SS_L2DATA_STBY_N
| Q6SS_SLP_RET_N
;
445 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
447 /* Turn on L1, L2, ETB and JU memories 1 at a time */
448 val
= readl(qproc
->reg_base
+ QDSP6SS_MEM_PWR_CTL
);
449 for (i
= 19; i
>= 0; i
--) {
451 writel(val
, qproc
->reg_base
+
452 QDSP6SS_MEM_PWR_CTL
);
454 * Read back value to ensure the write is done then
455 * wait for 1us for both memory peripheral and data
458 val
|= readl(qproc
->reg_base
+ QDSP6SS_MEM_PWR_CTL
);
461 /* Remove word line clamp */
462 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
463 val
&= ~QDSP6v56_CLAMP_WL
;
464 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
466 /* Assert resets, stop core */
467 val
= readl(qproc
->reg_base
+ QDSP6SS_RESET_REG
);
468 val
|= Q6SS_CORE_ARES
| Q6SS_BUS_ARES_ENABLE
| Q6SS_STOP_CORE
;
469 writel(val
, qproc
->reg_base
+ QDSP6SS_RESET_REG
);
471 /* Enable power block headswitch and wait for it to stabilize */
472 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
473 val
|= QDSS_BHS_ON
| QDSS_LDO_BYP
;
474 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
475 val
|= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
478 * Turn on memories. L2 banks should be done individually
479 * to minimize inrush current.
481 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
482 val
|= Q6SS_SLP_RET_N
| Q6SS_L2TAG_SLP_NRET_N
|
483 Q6SS_ETB_SLP_NRET_N
| Q6SS_L2DATA_STBY_N
;
484 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
485 val
|= Q6SS_L2DATA_SLP_NRET_N_2
;
486 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
487 val
|= Q6SS_L2DATA_SLP_NRET_N_1
;
488 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
489 val
|= Q6SS_L2DATA_SLP_NRET_N_0
;
490 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
492 /* Remove IO clamp */
493 val
&= ~Q6SS_CLAMP_IO
;
494 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
496 /* Bring core out of reset */
497 val
= readl(qproc
->reg_base
+ QDSP6SS_RESET_REG
);
498 val
&= ~Q6SS_CORE_ARES
;
499 writel(val
, qproc
->reg_base
+ QDSP6SS_RESET_REG
);
501 /* Turn on core clock */
502 val
= readl(qproc
->reg_base
+ QDSP6SS_GFMUX_CTL_REG
);
503 val
|= Q6SS_CLK_ENABLE
;
504 writel(val
, qproc
->reg_base
+ QDSP6SS_GFMUX_CTL_REG
);
506 /* Start core execution */
507 val
= readl(qproc
->reg_base
+ QDSP6SS_RESET_REG
);
508 val
&= ~Q6SS_STOP_CORE
;
509 writel(val
, qproc
->reg_base
+ QDSP6SS_RESET_REG
);
511 /* Wait for PBL status */
512 ret
= q6v5_rmb_pbl_wait(qproc
, 1000);
513 if (ret
== -ETIMEDOUT
) {
514 dev_err(qproc
->dev
, "PBL boot timed out\n");
515 } else if (ret
!= RMB_PBL_SUCCESS
) {
516 dev_err(qproc
->dev
, "PBL returned unexpected status %d\n", ret
);
525 static void q6v5proc_halt_axi_port(struct q6v5
*qproc
,
526 struct regmap
*halt_map
,
529 unsigned long timeout
;
533 /* Check if we're already idle */
534 ret
= regmap_read(halt_map
, offset
+ AXI_IDLE_REG
, &val
);
538 /* Assert halt request */
539 regmap_write(halt_map
, offset
+ AXI_HALTREQ_REG
, 1);
542 timeout
= jiffies
+ msecs_to_jiffies(HALT_ACK_TIMEOUT_MS
);
544 ret
= regmap_read(halt_map
, offset
+ AXI_HALTACK_REG
, &val
);
545 if (ret
|| val
|| time_after(jiffies
, timeout
))
551 ret
= regmap_read(halt_map
, offset
+ AXI_IDLE_REG
, &val
);
553 dev_err(qproc
->dev
, "port failed halt\n");
555 /* Clear halt request (port will remain halted until reset) */
556 regmap_write(halt_map
, offset
+ AXI_HALTREQ_REG
, 0);
559 static int q6v5_mpss_init_image(struct q6v5
*qproc
, const struct firmware
*fw
)
561 unsigned long dma_attrs
= DMA_ATTR_FORCE_CONTIGUOUS
;
568 ptr
= dma_alloc_attrs(qproc
->dev
, fw
->size
, &phys
, GFP_KERNEL
, dma_attrs
);
570 dev_err(qproc
->dev
, "failed to allocate mdt buffer\n");
574 memcpy(ptr
, fw
->data
, fw
->size
);
576 /* Hypervisor mapping to access metadata by modem */
577 mdata_perm
= BIT(QCOM_SCM_VMID_HLOS
);
578 ret
= q6v5_xfer_mem_ownership(qproc
, &mdata_perm
,
579 true, phys
, fw
->size
);
582 "assigning Q6 access to metadata failed: %d\n", ret
);
587 writel(phys
, qproc
->rmb_base
+ RMB_PMI_META_DATA_REG
);
588 writel(RMB_CMD_META_DATA_READY
, qproc
->rmb_base
+ RMB_MBA_COMMAND_REG
);
590 ret
= q6v5_rmb_mba_wait(qproc
, RMB_MBA_META_DATA_AUTH_SUCCESS
, 1000);
591 if (ret
== -ETIMEDOUT
)
592 dev_err(qproc
->dev
, "MPSS header authentication timed out\n");
594 dev_err(qproc
->dev
, "MPSS header authentication failed: %d\n", ret
);
596 /* Metadata authentication done, remove modem access */
597 xferop_ret
= q6v5_xfer_mem_ownership(qproc
, &mdata_perm
,
598 false, phys
, fw
->size
);
601 "mdt buffer not reclaimed system may become unstable\n");
604 dma_free_attrs(qproc
->dev
, fw
->size
, ptr
, phys
, dma_attrs
);
606 return ret
< 0 ? ret
: 0;
609 static bool q6v5_phdr_valid(const struct elf32_phdr
*phdr
)
611 if (phdr
->p_type
!= PT_LOAD
)
614 if ((phdr
->p_flags
& QCOM_MDT_TYPE_MASK
) == QCOM_MDT_TYPE_HASH
)
623 static int q6v5_mpss_load(struct q6v5
*qproc
)
625 const struct elf32_phdr
*phdrs
;
626 const struct elf32_phdr
*phdr
;
627 const struct firmware
*seg_fw
;
628 const struct firmware
*fw
;
629 struct elf32_hdr
*ehdr
;
630 phys_addr_t mpss_reloc
;
631 phys_addr_t boot_addr
;
632 phys_addr_t min_addr
= (phys_addr_t
)ULLONG_MAX
;
633 phys_addr_t max_addr
= 0;
634 bool relocate
= false;
642 ret
= request_firmware(&fw
, "modem.mdt", qproc
->dev
);
644 dev_err(qproc
->dev
, "unable to load modem.mdt\n");
648 /* Initialize the RMB validator */
649 writel(0, qproc
->rmb_base
+ RMB_PMI_CODE_LENGTH_REG
);
651 ret
= q6v5_mpss_init_image(qproc
, fw
);
653 goto release_firmware
;
655 ehdr
= (struct elf32_hdr
*)fw
->data
;
656 phdrs
= (struct elf32_phdr
*)(ehdr
+ 1);
658 for (i
= 0; i
< ehdr
->e_phnum
; i
++) {
661 if (!q6v5_phdr_valid(phdr
))
664 if (phdr
->p_flags
& QCOM_MDT_RELOCATABLE
)
667 if (phdr
->p_paddr
< min_addr
)
668 min_addr
= phdr
->p_paddr
;
670 if (phdr
->p_paddr
+ phdr
->p_memsz
> max_addr
)
671 max_addr
= ALIGN(phdr
->p_paddr
+ phdr
->p_memsz
, SZ_4K
);
674 mpss_reloc
= relocate
? min_addr
: qproc
->mpss_phys
;
675 /* Load firmware segments */
676 for (i
= 0; i
< ehdr
->e_phnum
; i
++) {
679 if (!q6v5_phdr_valid(phdr
))
682 offset
= phdr
->p_paddr
- mpss_reloc
;
683 if (offset
< 0 || offset
+ phdr
->p_memsz
> qproc
->mpss_size
) {
684 dev_err(qproc
->dev
, "segment outside memory range\n");
686 goto release_firmware
;
689 ptr
= qproc
->mpss_region
+ offset
;
691 if (phdr
->p_filesz
) {
692 snprintf(seg_name
, sizeof(seg_name
), "modem.b%02d", i
);
693 ret
= request_firmware(&seg_fw
, seg_name
, qproc
->dev
);
695 dev_err(qproc
->dev
, "failed to load %s\n", seg_name
);
696 goto release_firmware
;
699 memcpy(ptr
, seg_fw
->data
, seg_fw
->size
);
701 release_firmware(seg_fw
);
704 if (phdr
->p_memsz
> phdr
->p_filesz
) {
705 memset(ptr
+ phdr
->p_filesz
, 0,
706 phdr
->p_memsz
- phdr
->p_filesz
);
708 size
+= phdr
->p_memsz
;
711 /* Transfer ownership of modem ddr region to q6 */
712 ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mpss_perm
, true,
713 qproc
->mpss_phys
, qproc
->mpss_size
);
716 "assigning Q6 access to mpss memory failed: %d\n", ret
);
718 goto release_firmware
;
721 boot_addr
= relocate
? qproc
->mpss_phys
: min_addr
;
722 writel(boot_addr
, qproc
->rmb_base
+ RMB_PMI_CODE_START_REG
);
723 writel(RMB_CMD_LOAD_READY
, qproc
->rmb_base
+ RMB_MBA_COMMAND_REG
);
724 writel(size
, qproc
->rmb_base
+ RMB_PMI_CODE_LENGTH_REG
);
726 ret
= q6v5_rmb_mba_wait(qproc
, RMB_MBA_AUTH_COMPLETE
, 10000);
727 if (ret
== -ETIMEDOUT
)
728 dev_err(qproc
->dev
, "MPSS authentication timed out\n");
730 dev_err(qproc
->dev
, "MPSS authentication failed: %d\n", ret
);
733 release_firmware(fw
);
735 return ret
< 0 ? ret
: 0;
738 static int q6v5_start(struct rproc
*rproc
)
740 struct q6v5
*qproc
= (struct q6v5
*)rproc
->priv
;
744 ret
= q6v5_regulator_enable(qproc
, qproc
->proxy_regs
,
745 qproc
->proxy_reg_count
);
747 dev_err(qproc
->dev
, "failed to enable proxy supplies\n");
751 ret
= q6v5_clk_enable(qproc
->dev
, qproc
->proxy_clks
,
752 qproc
->proxy_clk_count
);
754 dev_err(qproc
->dev
, "failed to enable proxy clocks\n");
755 goto disable_proxy_reg
;
758 ret
= q6v5_regulator_enable(qproc
, qproc
->active_regs
,
759 qproc
->active_reg_count
);
761 dev_err(qproc
->dev
, "failed to enable supplies\n");
762 goto disable_proxy_clk
;
764 ret
= reset_control_deassert(qproc
->mss_restart
);
766 dev_err(qproc
->dev
, "failed to deassert mss restart\n");
770 ret
= q6v5_clk_enable(qproc
->dev
, qproc
->active_clks
,
771 qproc
->active_clk_count
);
773 dev_err(qproc
->dev
, "failed to enable clocks\n");
777 /* Assign MBA image access in DDR to q6 */
778 xfermemop_ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mba_perm
, true,
783 "assigning Q6 access to mba memory failed: %d\n",
785 goto disable_active_clks
;
788 writel(qproc
->mba_phys
, qproc
->rmb_base
+ RMB_MBA_IMAGE_REG
);
790 ret
= q6v5proc_reset(qproc
);
794 ret
= q6v5_rmb_mba_wait(qproc
, 0, 5000);
795 if (ret
== -ETIMEDOUT
) {
796 dev_err(qproc
->dev
, "MBA boot timed out\n");
798 } else if (ret
!= RMB_MBA_XPU_UNLOCKED
&&
799 ret
!= RMB_MBA_XPU_UNLOCKED_SCRIBBLED
) {
800 dev_err(qproc
->dev
, "MBA returned unexpected status %d\n", ret
);
805 dev_info(qproc
->dev
, "MBA booted, loading mpss\n");
807 ret
= q6v5_mpss_load(qproc
);
811 ret
= wait_for_completion_timeout(&qproc
->start_done
,
812 msecs_to_jiffies(5000));
814 dev_err(qproc
->dev
, "start timed out\n");
819 xfermemop_ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mba_perm
, false,
824 "Failed to reclaim mba buffer system may become unstable\n");
825 qproc
->running
= true;
827 q6v5_clk_disable(qproc
->dev
, qproc
->proxy_clks
,
828 qproc
->proxy_clk_count
);
829 q6v5_regulator_disable(qproc
, qproc
->proxy_regs
,
830 qproc
->proxy_reg_count
);
835 xfermemop_ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mpss_perm
,
836 false, qproc
->mpss_phys
,
838 WARN_ON(xfermemop_ret
);
841 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_q6
);
842 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_modem
);
843 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_nc
);
846 xfermemop_ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mba_perm
, false,
851 "Failed to reclaim mba buffer, system may become unstable\n");
855 q6v5_clk_disable(qproc
->dev
, qproc
->active_clks
,
856 qproc
->active_clk_count
);
859 reset_control_assert(qproc
->mss_restart
);
861 q6v5_regulator_disable(qproc
, qproc
->active_regs
,
862 qproc
->active_reg_count
);
864 q6v5_clk_disable(qproc
->dev
, qproc
->proxy_clks
,
865 qproc
->proxy_clk_count
);
867 q6v5_regulator_disable(qproc
, qproc
->proxy_regs
,
868 qproc
->proxy_reg_count
);
873 static int q6v5_stop(struct rproc
*rproc
)
875 struct q6v5
*qproc
= (struct q6v5
*)rproc
->priv
;
879 qproc
->running
= false;
881 qcom_smem_state_update_bits(qproc
->state
,
882 BIT(qproc
->stop_bit
), BIT(qproc
->stop_bit
));
884 ret
= wait_for_completion_timeout(&qproc
->stop_done
,
885 msecs_to_jiffies(5000));
887 dev_err(qproc
->dev
, "timed out on wait\n");
889 qcom_smem_state_update_bits(qproc
->state
, BIT(qproc
->stop_bit
), 0);
891 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_q6
);
892 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_modem
);
893 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_nc
);
894 if (qproc
->version
== MSS_MSM8996
) {
896 * To avoid high MX current during LPASS/MSS restart.
898 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
899 val
|= Q6SS_CLAMP_IO
| QDSP6v56_CLAMP_WL
|
900 QDSP6v56_CLAMP_QMC_MEM
;
901 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
905 ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mpss_perm
, false,
906 qproc
->mpss_phys
, qproc
->mpss_size
);
909 reset_control_assert(qproc
->mss_restart
);
910 q6v5_clk_disable(qproc
->dev
, qproc
->active_clks
,
911 qproc
->active_clk_count
);
912 q6v5_regulator_disable(qproc
, qproc
->active_regs
,
913 qproc
->active_reg_count
);
918 static void *q6v5_da_to_va(struct rproc
*rproc
, u64 da
, int len
)
920 struct q6v5
*qproc
= rproc
->priv
;
923 offset
= da
- qproc
->mpss_reloc
;
924 if (offset
< 0 || offset
+ len
> qproc
->mpss_size
)
927 return qproc
->mpss_region
+ offset
;
930 static const struct rproc_ops q6v5_ops
= {
933 .da_to_va
= q6v5_da_to_va
,
936 static irqreturn_t
q6v5_wdog_interrupt(int irq
, void *dev
)
938 struct q6v5
*qproc
= dev
;
942 /* Sometimes the stop triggers a watchdog rather than a stop-ack */
943 if (!qproc
->running
) {
944 complete(&qproc
->stop_done
);
948 msg
= qcom_smem_get(QCOM_SMEM_HOST_ANY
, MPSS_CRASH_REASON_SMEM
, &len
);
949 if (!IS_ERR(msg
) && len
> 0 && msg
[0])
950 dev_err(qproc
->dev
, "watchdog received: %s\n", msg
);
952 dev_err(qproc
->dev
, "watchdog without message\n");
954 rproc_report_crash(qproc
->rproc
, RPROC_WATCHDOG
);
962 static irqreturn_t
q6v5_fatal_interrupt(int irq
, void *dev
)
964 struct q6v5
*qproc
= dev
;
968 msg
= qcom_smem_get(QCOM_SMEM_HOST_ANY
, MPSS_CRASH_REASON_SMEM
, &len
);
969 if (!IS_ERR(msg
) && len
> 0 && msg
[0])
970 dev_err(qproc
->dev
, "fatal error received: %s\n", msg
);
972 dev_err(qproc
->dev
, "fatal error without message\n");
974 rproc_report_crash(qproc
->rproc
, RPROC_FATAL_ERROR
);
982 static irqreturn_t
q6v5_handover_interrupt(int irq
, void *dev
)
984 struct q6v5
*qproc
= dev
;
986 complete(&qproc
->start_done
);
990 static irqreturn_t
q6v5_stop_ack_interrupt(int irq
, void *dev
)
992 struct q6v5
*qproc
= dev
;
994 complete(&qproc
->stop_done
);
998 static int q6v5_init_mem(struct q6v5
*qproc
, struct platform_device
*pdev
)
1000 struct of_phandle_args args
;
1001 struct resource
*res
;
1004 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "qdsp6");
1005 qproc
->reg_base
= devm_ioremap_resource(&pdev
->dev
, res
);
1006 if (IS_ERR(qproc
->reg_base
))
1007 return PTR_ERR(qproc
->reg_base
);
1009 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "rmb");
1010 qproc
->rmb_base
= devm_ioremap_resource(&pdev
->dev
, res
);
1011 if (IS_ERR(qproc
->rmb_base
))
1012 return PTR_ERR(qproc
->rmb_base
);
1014 ret
= of_parse_phandle_with_fixed_args(pdev
->dev
.of_node
,
1015 "qcom,halt-regs", 3, 0, &args
);
1017 dev_err(&pdev
->dev
, "failed to parse qcom,halt-regs\n");
1021 qproc
->halt_map
= syscon_node_to_regmap(args
.np
);
1022 of_node_put(args
.np
);
1023 if (IS_ERR(qproc
->halt_map
))
1024 return PTR_ERR(qproc
->halt_map
);
1026 qproc
->halt_q6
= args
.args
[0];
1027 qproc
->halt_modem
= args
.args
[1];
1028 qproc
->halt_nc
= args
.args
[2];
1033 static int q6v5_init_clocks(struct device
*dev
, struct clk
**clks
,
1041 for (i
= 0; clk_names
[i
]; i
++) {
1042 clks
[i
] = devm_clk_get(dev
, clk_names
[i
]);
1043 if (IS_ERR(clks
[i
])) {
1044 int rc
= PTR_ERR(clks
[i
]);
1046 if (rc
!= -EPROBE_DEFER
)
1047 dev_err(dev
, "Failed to get %s clock\n",
1056 static int q6v5_init_reset(struct q6v5
*qproc
)
1058 qproc
->mss_restart
= devm_reset_control_get_exclusive(qproc
->dev
,
1060 if (IS_ERR(qproc
->mss_restart
)) {
1061 dev_err(qproc
->dev
, "failed to acquire mss restart\n");
1062 return PTR_ERR(qproc
->mss_restart
);
1068 static int q6v5_request_irq(struct q6v5
*qproc
,
1069 struct platform_device
*pdev
,
1071 irq_handler_t thread_fn
)
1075 ret
= platform_get_irq_byname(pdev
, name
);
1077 dev_err(&pdev
->dev
, "no %s IRQ defined\n", name
);
1081 ret
= devm_request_threaded_irq(&pdev
->dev
, ret
,
1083 IRQF_TRIGGER_RISING
| IRQF_ONESHOT
,
1086 dev_err(&pdev
->dev
, "request %s IRQ failed\n", name
);
1091 static int q6v5_alloc_memory_region(struct q6v5
*qproc
)
1093 struct device_node
*child
;
1094 struct device_node
*node
;
1098 child
= of_get_child_by_name(qproc
->dev
->of_node
, "mba");
1099 node
= of_parse_phandle(child
, "memory-region", 0);
1100 ret
= of_address_to_resource(node
, 0, &r
);
1102 dev_err(qproc
->dev
, "unable to resolve mba region\n");
1106 qproc
->mba_phys
= r
.start
;
1107 qproc
->mba_size
= resource_size(&r
);
1108 qproc
->mba_region
= devm_ioremap_wc(qproc
->dev
, qproc
->mba_phys
, qproc
->mba_size
);
1109 if (!qproc
->mba_region
) {
1110 dev_err(qproc
->dev
, "unable to map memory region: %pa+%zx\n",
1111 &r
.start
, qproc
->mba_size
);
1115 child
= of_get_child_by_name(qproc
->dev
->of_node
, "mpss");
1116 node
= of_parse_phandle(child
, "memory-region", 0);
1117 ret
= of_address_to_resource(node
, 0, &r
);
1119 dev_err(qproc
->dev
, "unable to resolve mpss region\n");
1123 qproc
->mpss_phys
= qproc
->mpss_reloc
= r
.start
;
1124 qproc
->mpss_size
= resource_size(&r
);
1125 qproc
->mpss_region
= devm_ioremap_wc(qproc
->dev
, qproc
->mpss_phys
, qproc
->mpss_size
);
1126 if (!qproc
->mpss_region
) {
1127 dev_err(qproc
->dev
, "unable to map memory region: %pa+%zx\n",
1128 &r
.start
, qproc
->mpss_size
);
1135 static int q6v5_probe(struct platform_device
*pdev
)
1137 const struct rproc_hexagon_res
*desc
;
1139 struct rproc
*rproc
;
1142 desc
= of_device_get_match_data(&pdev
->dev
);
1146 rproc
= rproc_alloc(&pdev
->dev
, pdev
->name
, &q6v5_ops
,
1147 desc
->hexagon_mba_image
, sizeof(*qproc
));
1149 dev_err(&pdev
->dev
, "failed to allocate rproc\n");
1153 rproc
->fw_ops
= &q6v5_fw_ops
;
1155 qproc
= (struct q6v5
*)rproc
->priv
;
1156 qproc
->dev
= &pdev
->dev
;
1157 qproc
->rproc
= rproc
;
1158 platform_set_drvdata(pdev
, qproc
);
1160 init_completion(&qproc
->start_done
);
1161 init_completion(&qproc
->stop_done
);
1163 ret
= q6v5_init_mem(qproc
, pdev
);
1167 ret
= q6v5_alloc_memory_region(qproc
);
1171 ret
= q6v5_init_clocks(&pdev
->dev
, qproc
->proxy_clks
,
1172 desc
->proxy_clk_names
);
1174 dev_err(&pdev
->dev
, "Failed to get proxy clocks.\n");
1177 qproc
->proxy_clk_count
= ret
;
1179 ret
= q6v5_init_clocks(&pdev
->dev
, qproc
->active_clks
,
1180 desc
->active_clk_names
);
1182 dev_err(&pdev
->dev
, "Failed to get active clocks.\n");
1185 qproc
->active_clk_count
= ret
;
1187 ret
= q6v5_regulator_init(&pdev
->dev
, qproc
->proxy_regs
,
1188 desc
->proxy_supply
);
1190 dev_err(&pdev
->dev
, "Failed to get proxy regulators.\n");
1193 qproc
->proxy_reg_count
= ret
;
1195 ret
= q6v5_regulator_init(&pdev
->dev
, qproc
->active_regs
,
1196 desc
->active_supply
);
1198 dev_err(&pdev
->dev
, "Failed to get active regulators.\n");
1201 qproc
->active_reg_count
= ret
;
1203 ret
= q6v5_init_reset(qproc
);
1207 qproc
->version
= desc
->version
;
1208 qproc
->need_mem_protection
= desc
->need_mem_protection
;
1209 ret
= q6v5_request_irq(qproc
, pdev
, "wdog", q6v5_wdog_interrupt
);
1213 ret
= q6v5_request_irq(qproc
, pdev
, "fatal", q6v5_fatal_interrupt
);
1217 ret
= q6v5_request_irq(qproc
, pdev
, "handover", q6v5_handover_interrupt
);
1221 ret
= q6v5_request_irq(qproc
, pdev
, "stop-ack", q6v5_stop_ack_interrupt
);
1225 qproc
->state
= qcom_smem_state_get(&pdev
->dev
, "stop", &qproc
->stop_bit
);
1226 if (IS_ERR(qproc
->state
)) {
1227 ret
= PTR_ERR(qproc
->state
);
1230 qproc
->mpss_perm
= BIT(QCOM_SCM_VMID_HLOS
);
1231 qproc
->mba_perm
= BIT(QCOM_SCM_VMID_HLOS
);
1232 qcom_add_smd_subdev(rproc
, &qproc
->smd_subdev
);
1233 qcom_add_ssr_subdev(rproc
, &qproc
->ssr_subdev
, "mpss");
1235 ret
= rproc_add(rproc
);
1247 static int q6v5_remove(struct platform_device
*pdev
)
1249 struct q6v5
*qproc
= platform_get_drvdata(pdev
);
1251 rproc_del(qproc
->rproc
);
1253 qcom_remove_smd_subdev(qproc
->rproc
, &qproc
->smd_subdev
);
1254 qcom_remove_ssr_subdev(qproc
->rproc
, &qproc
->ssr_subdev
);
1255 rproc_free(qproc
->rproc
);
1260 static const struct rproc_hexagon_res msm8996_mss
= {
1261 .hexagon_mba_image
= "mba.mbn",
1262 .proxy_clk_names
= (char*[]){
1267 .active_clk_names
= (char*[]){
1274 .need_mem_protection
= true,
1275 .version
= MSS_MSM8996
,
1278 static const struct rproc_hexagon_res msm8916_mss
= {
1279 .hexagon_mba_image
= "mba.mbn",
1280 .proxy_supply
= (struct qcom_mss_reg_res
[]) {
1295 .proxy_clk_names
= (char*[]){
1299 .active_clk_names
= (char*[]){
1305 .need_mem_protection
= false,
1306 .version
= MSS_MSM8916
,
1309 static const struct rproc_hexagon_res msm8974_mss
= {
1310 .hexagon_mba_image
= "mba.b00",
1311 .proxy_supply
= (struct qcom_mss_reg_res
[]) {
1326 .active_supply
= (struct qcom_mss_reg_res
[]) {
1334 .proxy_clk_names
= (char*[]){
1338 .active_clk_names
= (char*[]){
1344 .need_mem_protection
= false,
1345 .version
= MSS_MSM8974
,
1348 static const struct of_device_id q6v5_of_match
[] = {
1349 { .compatible
= "qcom,q6v5-pil", .data
= &msm8916_mss
},
1350 { .compatible
= "qcom,msm8916-mss-pil", .data
= &msm8916_mss
},
1351 { .compatible
= "qcom,msm8974-mss-pil", .data
= &msm8974_mss
},
1352 { .compatible
= "qcom,msm8996-mss-pil", .data
= &msm8996_mss
},
1355 MODULE_DEVICE_TABLE(of
, q6v5_of_match
);
1357 static struct platform_driver q6v5_driver
= {
1358 .probe
= q6v5_probe
,
1359 .remove
= q6v5_remove
,
1361 .name
= "qcom-q6v5-pil",
1362 .of_match_table
= q6v5_of_match
,
1365 module_platform_driver(q6v5_driver
);
1367 MODULE_DESCRIPTION("Peripheral Image Loader for Hexagon");
1368 MODULE_LICENSE("GPL v2");