1 // SPDX-License-Identifier: GPL-2.0-only
3 * Qualcomm self-authenticating modem subsystem remoteproc driver
5 * Copyright (C) 2016 Linaro Ltd.
6 * Copyright (C) 2014 Sony Mobile Communications AB
7 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/mfd/syscon.h>
16 #include <linux/module.h>
17 #include <linux/of_address.h>
18 #include <linux/of_device.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/regmap.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/remoteproc.h>
25 #include "linux/remoteproc/qcom_q6v5_ipa_notify.h"
26 #include <linux/reset.h>
27 #include <linux/soc/qcom/mdt_loader.h>
28 #include <linux/iopoll.h>
30 #include "remoteproc_internal.h"
31 #include "qcom_common.h"
32 #include "qcom_q6v5.h"
34 #include <linux/qcom_scm.h>
36 #define MPSS_CRASH_REASON_SMEM 421
38 /* RMB Status Register Values */
39 #define RMB_PBL_SUCCESS 0x1
41 #define RMB_MBA_XPU_UNLOCKED 0x1
42 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
43 #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
44 #define RMB_MBA_AUTH_COMPLETE 0x4
46 /* PBL/MBA interface registers */
47 #define RMB_MBA_IMAGE_REG 0x00
48 #define RMB_PBL_STATUS_REG 0x04
49 #define RMB_MBA_COMMAND_REG 0x08
50 #define RMB_MBA_STATUS_REG 0x0C
51 #define RMB_PMI_META_DATA_REG 0x10
52 #define RMB_PMI_CODE_START_REG 0x14
53 #define RMB_PMI_CODE_LENGTH_REG 0x18
54 #define RMB_MBA_MSS_STATUS 0x40
55 #define RMB_MBA_ALT_RESET 0x44
57 #define RMB_CMD_META_DATA_READY 0x1
58 #define RMB_CMD_LOAD_READY 0x2
60 /* QDSP6SS Register Offsets */
61 #define QDSP6SS_RESET_REG 0x014
62 #define QDSP6SS_GFMUX_CTL_REG 0x020
63 #define QDSP6SS_PWR_CTL_REG 0x030
64 #define QDSP6SS_MEM_PWR_CTL 0x0B0
65 #define QDSP6V6SS_MEM_PWR_CTL 0x034
66 #define QDSP6SS_STRAP_ACC 0x110
68 /* AXI Halt Register Offsets */
69 #define AXI_HALTREQ_REG 0x0
70 #define AXI_HALTACK_REG 0x4
71 #define AXI_IDLE_REG 0x8
72 #define NAV_AXI_HALTREQ_BIT BIT(0)
73 #define NAV_AXI_HALTACK_BIT BIT(1)
74 #define NAV_AXI_IDLE_BIT BIT(2)
75 #define AXI_GATING_VALID_OVERRIDE BIT(0)
77 #define HALT_ACK_TIMEOUT_US 100000
78 #define NAV_HALT_ACK_TIMEOUT_US 200
81 #define Q6SS_STOP_CORE BIT(0)
82 #define Q6SS_CORE_ARES BIT(1)
83 #define Q6SS_BUS_ARES_ENABLE BIT(2)
86 #define Q6SS_CBCR_CLKEN BIT(0)
87 #define Q6SS_CBCR_CLKOFF BIT(31)
88 #define Q6SS_CBCR_TIMEOUT_US 200
90 /* QDSP6SS_GFMUX_CTL */
91 #define Q6SS_CLK_ENABLE BIT(1)
94 #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
95 #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
96 #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
97 #define Q6SS_L2TAG_SLP_NRET_N BIT(16)
98 #define Q6SS_ETB_SLP_NRET_N BIT(17)
99 #define Q6SS_L2DATA_STBY_N BIT(18)
100 #define Q6SS_SLP_RET_N BIT(19)
101 #define Q6SS_CLAMP_IO BIT(20)
102 #define QDSS_BHS_ON BIT(21)
103 #define QDSS_LDO_BYP BIT(22)
105 /* QDSP6v56 parameters */
106 #define QDSP6v56_LDO_BYP BIT(25)
107 #define QDSP6v56_BHS_ON BIT(24)
108 #define QDSP6v56_CLAMP_WL BIT(21)
109 #define QDSP6v56_CLAMP_QMC_MEM BIT(22)
110 #define QDSP6SS_XO_CBCR 0x0038
111 #define QDSP6SS_ACC_OVERRIDE_VAL 0x20
113 /* QDSP6v65 parameters */
114 #define QDSP6SS_CORE_CBCR 0x20
115 #define QDSP6SS_SLEEP 0x3C
116 #define QDSP6SS_BOOT_CORE_START 0x400
117 #define QDSP6SS_BOOT_CMD 0x404
118 #define QDSP6SS_BOOT_STATUS 0x408
119 #define BOOT_STATUS_TIMEOUT_US 200
120 #define BOOT_FSM_TIMEOUT 10000
123 struct regulator
*reg
;
128 struct qcom_mss_reg_res
{
134 struct rproc_hexagon_res
{
135 const char *hexagon_mba_image
;
136 struct qcom_mss_reg_res
*proxy_supply
;
137 struct qcom_mss_reg_res
*active_supply
;
138 char **proxy_clk_names
;
139 char **reset_clk_names
;
140 char **active_clk_names
;
141 char **active_pd_names
;
142 char **proxy_pd_names
;
144 bool need_mem_protection
;
153 void __iomem
*reg_base
;
154 void __iomem
*rmb_base
;
156 struct regmap
*halt_map
;
157 struct regmap
*halt_nav_map
;
158 struct regmap
*conn_map
;
166 struct reset_control
*mss_restart
;
167 struct reset_control
*pdc_reset
;
169 struct qcom_q6v5 q6v5
;
171 struct clk
*active_clks
[8];
172 struct clk
*reset_clks
[4];
173 struct clk
*proxy_clks
[4];
174 struct device
*active_pds
[1];
175 struct device
*proxy_pds
[3];
176 int active_clk_count
;
182 struct reg_info active_regs
[1];
183 struct reg_info proxy_regs
[3];
184 int active_reg_count
;
189 bool dump_mba_loaded
;
190 unsigned long dump_segment_mask
;
191 unsigned long dump_complete_mask
;
193 phys_addr_t mba_phys
;
197 phys_addr_t mpss_phys
;
198 phys_addr_t mpss_reloc
;
202 struct qcom_rproc_glink glink_subdev
;
203 struct qcom_rproc_subdev smd_subdev
;
204 struct qcom_rproc_ssr ssr_subdev
;
205 struct qcom_rproc_ipa_notify ipa_notify_subdev
;
206 struct qcom_sysmon
*sysmon
;
207 bool need_mem_protection
;
212 const char *hexagon_mdt_image
;
225 static int q6v5_regulator_init(struct device
*dev
, struct reg_info
*regs
,
226 const struct qcom_mss_reg_res
*reg_res
)
234 for (i
= 0; reg_res
[i
].supply
; i
++) {
235 regs
[i
].reg
= devm_regulator_get(dev
, reg_res
[i
].supply
);
236 if (IS_ERR(regs
[i
].reg
)) {
237 rc
= PTR_ERR(regs
[i
].reg
);
238 if (rc
!= -EPROBE_DEFER
)
239 dev_err(dev
, "Failed to get %s\n regulator",
244 regs
[i
].uV
= reg_res
[i
].uV
;
245 regs
[i
].uA
= reg_res
[i
].uA
;
251 static int q6v5_regulator_enable(struct q6v5
*qproc
,
252 struct reg_info
*regs
, int count
)
257 for (i
= 0; i
< count
; i
++) {
258 if (regs
[i
].uV
> 0) {
259 ret
= regulator_set_voltage(regs
[i
].reg
,
260 regs
[i
].uV
, INT_MAX
);
263 "Failed to request voltage for %d.\n",
269 if (regs
[i
].uA
> 0) {
270 ret
= regulator_set_load(regs
[i
].reg
,
274 "Failed to set regulator mode\n");
279 ret
= regulator_enable(regs
[i
].reg
);
281 dev_err(qproc
->dev
, "Regulator enable failed\n");
288 for (; i
>= 0; i
--) {
290 regulator_set_voltage(regs
[i
].reg
, 0, INT_MAX
);
293 regulator_set_load(regs
[i
].reg
, 0);
295 regulator_disable(regs
[i
].reg
);
301 static void q6v5_regulator_disable(struct q6v5
*qproc
,
302 struct reg_info
*regs
, int count
)
306 for (i
= 0; i
< count
; i
++) {
308 regulator_set_voltage(regs
[i
].reg
, 0, INT_MAX
);
311 regulator_set_load(regs
[i
].reg
, 0);
313 regulator_disable(regs
[i
].reg
);
317 static int q6v5_clk_enable(struct device
*dev
,
318 struct clk
**clks
, int count
)
323 for (i
= 0; i
< count
; i
++) {
324 rc
= clk_prepare_enable(clks
[i
]);
326 dev_err(dev
, "Clock enable failed\n");
333 for (i
--; i
>= 0; i
--)
334 clk_disable_unprepare(clks
[i
]);
339 static void q6v5_clk_disable(struct device
*dev
,
340 struct clk
**clks
, int count
)
344 for (i
= 0; i
< count
; i
++)
345 clk_disable_unprepare(clks
[i
]);
348 static int q6v5_pds_enable(struct q6v5
*qproc
, struct device
**pds
,
354 for (i
= 0; i
< pd_count
; i
++) {
355 dev_pm_genpd_set_performance_state(pds
[i
], INT_MAX
);
356 ret
= pm_runtime_get_sync(pds
[i
]);
358 goto unroll_pd_votes
;
364 for (i
--; i
>= 0; i
--) {
365 dev_pm_genpd_set_performance_state(pds
[i
], 0);
366 pm_runtime_put(pds
[i
]);
372 static void q6v5_pds_disable(struct q6v5
*qproc
, struct device
**pds
,
377 for (i
= 0; i
< pd_count
; i
++) {
378 dev_pm_genpd_set_performance_state(pds
[i
], 0);
379 pm_runtime_put(pds
[i
]);
383 static int q6v5_xfer_mem_ownership(struct q6v5
*qproc
, int *current_perm
,
384 bool local
, bool remote
, phys_addr_t addr
,
387 struct qcom_scm_vmperm next
[2];
390 if (!qproc
->need_mem_protection
)
393 if (local
== !!(*current_perm
& BIT(QCOM_SCM_VMID_HLOS
)) &&
394 remote
== !!(*current_perm
& BIT(QCOM_SCM_VMID_MSS_MSA
)))
398 next
[perms
].vmid
= QCOM_SCM_VMID_HLOS
;
399 next
[perms
].perm
= QCOM_SCM_PERM_RWX
;
404 next
[perms
].vmid
= QCOM_SCM_VMID_MSS_MSA
;
405 next
[perms
].perm
= QCOM_SCM_PERM_RW
;
409 return qcom_scm_assign_mem(addr
, ALIGN(size
, SZ_4K
),
410 current_perm
, next
, perms
);
413 static int q6v5_load(struct rproc
*rproc
, const struct firmware
*fw
)
415 struct q6v5
*qproc
= rproc
->priv
;
417 memcpy(qproc
->mba_region
, fw
->data
, fw
->size
);
422 static int q6v5_reset_assert(struct q6v5
*qproc
)
426 if (qproc
->has_alt_reset
) {
427 reset_control_assert(qproc
->pdc_reset
);
428 ret
= reset_control_reset(qproc
->mss_restart
);
429 reset_control_deassert(qproc
->pdc_reset
);
430 } else if (qproc
->has_halt_nav
) {
432 * When the AXI pipeline is being reset with the Q6 modem partly
433 * operational there is possibility of AXI valid signal to
434 * glitch, leading to spurious transactions and Q6 hangs. A work
435 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE
436 * BIT before triggering Q6 MSS reset. Both the HALTREQ and
437 * AXI_GATING_VALID_OVERRIDE are withdrawn post MSS assert
438 * followed by a MSS deassert, while holding the PDC reset.
440 reset_control_assert(qproc
->pdc_reset
);
441 regmap_update_bits(qproc
->conn_map
, qproc
->conn_box
,
442 AXI_GATING_VALID_OVERRIDE
, 1);
443 regmap_update_bits(qproc
->halt_nav_map
, qproc
->halt_nav
,
444 NAV_AXI_HALTREQ_BIT
, 0);
445 reset_control_assert(qproc
->mss_restart
);
446 reset_control_deassert(qproc
->pdc_reset
);
447 regmap_update_bits(qproc
->conn_map
, qproc
->conn_box
,
448 AXI_GATING_VALID_OVERRIDE
, 0);
449 ret
= reset_control_deassert(qproc
->mss_restart
);
451 ret
= reset_control_assert(qproc
->mss_restart
);
457 static int q6v5_reset_deassert(struct q6v5
*qproc
)
461 if (qproc
->has_alt_reset
) {
462 reset_control_assert(qproc
->pdc_reset
);
463 writel(1, qproc
->rmb_base
+ RMB_MBA_ALT_RESET
);
464 ret
= reset_control_reset(qproc
->mss_restart
);
465 writel(0, qproc
->rmb_base
+ RMB_MBA_ALT_RESET
);
466 reset_control_deassert(qproc
->pdc_reset
);
467 } else if (qproc
->has_halt_nav
) {
468 ret
= reset_control_reset(qproc
->mss_restart
);
470 ret
= reset_control_deassert(qproc
->mss_restart
);
476 static int q6v5_rmb_pbl_wait(struct q6v5
*qproc
, int ms
)
478 unsigned long timeout
;
481 timeout
= jiffies
+ msecs_to_jiffies(ms
);
483 val
= readl(qproc
->rmb_base
+ RMB_PBL_STATUS_REG
);
487 if (time_after(jiffies
, timeout
))
496 static int q6v5_rmb_mba_wait(struct q6v5
*qproc
, u32 status
, int ms
)
499 unsigned long timeout
;
502 timeout
= jiffies
+ msecs_to_jiffies(ms
);
504 val
= readl(qproc
->rmb_base
+ RMB_MBA_STATUS_REG
);
510 else if (status
&& val
== status
)
513 if (time_after(jiffies
, timeout
))
522 static int q6v5proc_reset(struct q6v5
*qproc
)
528 if (qproc
->version
== MSS_SDM845
) {
529 val
= readl(qproc
->reg_base
+ QDSP6SS_SLEEP
);
530 val
|= Q6SS_CBCR_CLKEN
;
531 writel(val
, qproc
->reg_base
+ QDSP6SS_SLEEP
);
533 ret
= readl_poll_timeout(qproc
->reg_base
+ QDSP6SS_SLEEP
,
534 val
, !(val
& Q6SS_CBCR_CLKOFF
), 1,
535 Q6SS_CBCR_TIMEOUT_US
);
537 dev_err(qproc
->dev
, "QDSP6SS Sleep clock timed out\n");
541 /* De-assert QDSP6 stop core */
542 writel(1, qproc
->reg_base
+ QDSP6SS_BOOT_CORE_START
);
543 /* Trigger boot FSM */
544 writel(1, qproc
->reg_base
+ QDSP6SS_BOOT_CMD
);
546 ret
= readl_poll_timeout(qproc
->rmb_base
+ RMB_MBA_MSS_STATUS
,
547 val
, (val
& BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT
);
549 dev_err(qproc
->dev
, "Boot FSM failed to complete.\n");
550 /* Reset the modem so that boot FSM is in reset state */
551 q6v5_reset_deassert(qproc
);
556 } else if (qproc
->version
== MSS_SC7180
) {
557 val
= readl(qproc
->reg_base
+ QDSP6SS_SLEEP
);
558 val
|= Q6SS_CBCR_CLKEN
;
559 writel(val
, qproc
->reg_base
+ QDSP6SS_SLEEP
);
561 ret
= readl_poll_timeout(qproc
->reg_base
+ QDSP6SS_SLEEP
,
562 val
, !(val
& Q6SS_CBCR_CLKOFF
), 1,
563 Q6SS_CBCR_TIMEOUT_US
);
565 dev_err(qproc
->dev
, "QDSP6SS Sleep clock timed out\n");
569 /* Turn on the XO clock needed for PLL setup */
570 val
= readl(qproc
->reg_base
+ QDSP6SS_XO_CBCR
);
571 val
|= Q6SS_CBCR_CLKEN
;
572 writel(val
, qproc
->reg_base
+ QDSP6SS_XO_CBCR
);
574 ret
= readl_poll_timeout(qproc
->reg_base
+ QDSP6SS_XO_CBCR
,
575 val
, !(val
& Q6SS_CBCR_CLKOFF
), 1,
576 Q6SS_CBCR_TIMEOUT_US
);
578 dev_err(qproc
->dev
, "QDSP6SS XO clock timed out\n");
582 /* Configure Q6 core CBCR to auto-enable after reset sequence */
583 val
= readl(qproc
->reg_base
+ QDSP6SS_CORE_CBCR
);
584 val
|= Q6SS_CBCR_CLKEN
;
585 writel(val
, qproc
->reg_base
+ QDSP6SS_CORE_CBCR
);
587 /* De-assert the Q6 stop core signal */
588 writel(1, qproc
->reg_base
+ QDSP6SS_BOOT_CORE_START
);
590 /* Trigger the boot FSM to start the Q6 out-of-reset sequence */
591 writel(1, qproc
->reg_base
+ QDSP6SS_BOOT_CMD
);
593 /* Poll the QDSP6SS_BOOT_STATUS for FSM completion */
594 ret
= readl_poll_timeout(qproc
->reg_base
+ QDSP6SS_BOOT_STATUS
,
595 val
, (val
& BIT(0)) != 0, 1,
596 BOOT_STATUS_TIMEOUT_US
);
598 dev_err(qproc
->dev
, "Boot FSM failed to complete.\n");
599 /* Reset the modem so that boot FSM is in reset state */
600 q6v5_reset_deassert(qproc
);
604 } else if (qproc
->version
== MSS_MSM8996
||
605 qproc
->version
== MSS_MSM8998
) {
608 /* Override the ACC value if required */
609 writel(QDSP6SS_ACC_OVERRIDE_VAL
,
610 qproc
->reg_base
+ QDSP6SS_STRAP_ACC
);
612 /* Assert resets, stop core */
613 val
= readl(qproc
->reg_base
+ QDSP6SS_RESET_REG
);
614 val
|= Q6SS_CORE_ARES
| Q6SS_BUS_ARES_ENABLE
| Q6SS_STOP_CORE
;
615 writel(val
, qproc
->reg_base
+ QDSP6SS_RESET_REG
);
617 /* BHS require xo cbcr to be enabled */
618 val
= readl(qproc
->reg_base
+ QDSP6SS_XO_CBCR
);
619 val
|= Q6SS_CBCR_CLKEN
;
620 writel(val
, qproc
->reg_base
+ QDSP6SS_XO_CBCR
);
622 /* Read CLKOFF bit to go low indicating CLK is enabled */
623 ret
= readl_poll_timeout(qproc
->reg_base
+ QDSP6SS_XO_CBCR
,
624 val
, !(val
& Q6SS_CBCR_CLKOFF
), 1,
625 Q6SS_CBCR_TIMEOUT_US
);
628 "xo cbcr enabling timed out (rc:%d)\n", ret
);
631 /* Enable power block headswitch and wait for it to stabilize */
632 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
633 val
|= QDSP6v56_BHS_ON
;
634 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
635 val
|= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
638 /* Put LDO in bypass mode */
639 val
|= QDSP6v56_LDO_BYP
;
640 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
642 /* Deassert QDSP6 compiler memory clamp */
643 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
644 val
&= ~QDSP6v56_CLAMP_QMC_MEM
;
645 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
647 /* Deassert memory peripheral sleep and L2 memory standby */
648 val
|= Q6SS_L2DATA_STBY_N
| Q6SS_SLP_RET_N
;
649 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
651 /* Turn on L1, L2, ETB and JU memories 1 at a time */
652 if (qproc
->version
== MSS_MSM8996
) {
653 mem_pwr_ctl
= QDSP6SS_MEM_PWR_CTL
;
657 mem_pwr_ctl
= QDSP6V6SS_MEM_PWR_CTL
;
660 val
= readl(qproc
->reg_base
+ mem_pwr_ctl
);
661 for (; i
>= 0; i
--) {
663 writel(val
, qproc
->reg_base
+ mem_pwr_ctl
);
665 * Read back value to ensure the write is done then
666 * wait for 1us for both memory peripheral and data
669 val
|= readl(qproc
->reg_base
+ mem_pwr_ctl
);
672 /* Remove word line clamp */
673 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
674 val
&= ~QDSP6v56_CLAMP_WL
;
675 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
677 /* Assert resets, stop core */
678 val
= readl(qproc
->reg_base
+ QDSP6SS_RESET_REG
);
679 val
|= Q6SS_CORE_ARES
| Q6SS_BUS_ARES_ENABLE
| Q6SS_STOP_CORE
;
680 writel(val
, qproc
->reg_base
+ QDSP6SS_RESET_REG
);
682 /* Enable power block headswitch and wait for it to stabilize */
683 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
684 val
|= QDSS_BHS_ON
| QDSS_LDO_BYP
;
685 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
686 val
|= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
689 * Turn on memories. L2 banks should be done individually
690 * to minimize inrush current.
692 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
693 val
|= Q6SS_SLP_RET_N
| Q6SS_L2TAG_SLP_NRET_N
|
694 Q6SS_ETB_SLP_NRET_N
| Q6SS_L2DATA_STBY_N
;
695 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
696 val
|= Q6SS_L2DATA_SLP_NRET_N_2
;
697 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
698 val
|= Q6SS_L2DATA_SLP_NRET_N_1
;
699 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
700 val
|= Q6SS_L2DATA_SLP_NRET_N_0
;
701 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
703 /* Remove IO clamp */
704 val
&= ~Q6SS_CLAMP_IO
;
705 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
707 /* Bring core out of reset */
708 val
= readl(qproc
->reg_base
+ QDSP6SS_RESET_REG
);
709 val
&= ~Q6SS_CORE_ARES
;
710 writel(val
, qproc
->reg_base
+ QDSP6SS_RESET_REG
);
712 /* Turn on core clock */
713 val
= readl(qproc
->reg_base
+ QDSP6SS_GFMUX_CTL_REG
);
714 val
|= Q6SS_CLK_ENABLE
;
715 writel(val
, qproc
->reg_base
+ QDSP6SS_GFMUX_CTL_REG
);
717 /* Start core execution */
718 val
= readl(qproc
->reg_base
+ QDSP6SS_RESET_REG
);
719 val
&= ~Q6SS_STOP_CORE
;
720 writel(val
, qproc
->reg_base
+ QDSP6SS_RESET_REG
);
723 /* Wait for PBL status */
724 ret
= q6v5_rmb_pbl_wait(qproc
, 1000);
725 if (ret
== -ETIMEDOUT
) {
726 dev_err(qproc
->dev
, "PBL boot timed out\n");
727 } else if (ret
!= RMB_PBL_SUCCESS
) {
728 dev_err(qproc
->dev
, "PBL returned unexpected status %d\n", ret
);
737 static void q6v5proc_halt_axi_port(struct q6v5
*qproc
,
738 struct regmap
*halt_map
,
744 /* Check if we're already idle */
745 ret
= regmap_read(halt_map
, offset
+ AXI_IDLE_REG
, &val
);
749 /* Assert halt request */
750 regmap_write(halt_map
, offset
+ AXI_HALTREQ_REG
, 1);
753 regmap_read_poll_timeout(halt_map
, offset
+ AXI_HALTACK_REG
, val
,
754 val
, 1000, HALT_ACK_TIMEOUT_US
);
756 ret
= regmap_read(halt_map
, offset
+ AXI_IDLE_REG
, &val
);
758 dev_err(qproc
->dev
, "port failed halt\n");
760 /* Clear halt request (port will remain halted until reset) */
761 regmap_write(halt_map
, offset
+ AXI_HALTREQ_REG
, 0);
764 static void q6v5proc_halt_nav_axi_port(struct q6v5
*qproc
,
765 struct regmap
*halt_map
,
771 /* Check if we're already idle */
772 ret
= regmap_read(halt_map
, offset
, &val
);
773 if (!ret
&& (val
& NAV_AXI_IDLE_BIT
))
776 /* Assert halt request */
777 regmap_update_bits(halt_map
, offset
, NAV_AXI_HALTREQ_BIT
,
778 NAV_AXI_HALTREQ_BIT
);
780 /* Wait for halt ack*/
781 regmap_read_poll_timeout(halt_map
, offset
, val
,
782 (val
& NAV_AXI_HALTACK_BIT
),
783 5, NAV_HALT_ACK_TIMEOUT_US
);
785 ret
= regmap_read(halt_map
, offset
, &val
);
786 if (ret
|| !(val
& NAV_AXI_IDLE_BIT
))
787 dev_err(qproc
->dev
, "port failed halt\n");
790 static int q6v5_mpss_init_image(struct q6v5
*qproc
, const struct firmware
*fw
)
792 unsigned long dma_attrs
= DMA_ATTR_FORCE_CONTIGUOUS
;
801 metadata
= qcom_mdt_read_metadata(fw
, &size
);
802 if (IS_ERR(metadata
))
803 return PTR_ERR(metadata
);
805 ptr
= dma_alloc_attrs(qproc
->dev
, size
, &phys
, GFP_KERNEL
, dma_attrs
);
808 dev_err(qproc
->dev
, "failed to allocate mdt buffer\n");
812 memcpy(ptr
, metadata
, size
);
814 /* Hypervisor mapping to access metadata by modem */
815 mdata_perm
= BIT(QCOM_SCM_VMID_HLOS
);
816 ret
= q6v5_xfer_mem_ownership(qproc
, &mdata_perm
, false, true,
820 "assigning Q6 access to metadata failed: %d\n", ret
);
825 writel(phys
, qproc
->rmb_base
+ RMB_PMI_META_DATA_REG
);
826 writel(RMB_CMD_META_DATA_READY
, qproc
->rmb_base
+ RMB_MBA_COMMAND_REG
);
828 ret
= q6v5_rmb_mba_wait(qproc
, RMB_MBA_META_DATA_AUTH_SUCCESS
, 1000);
829 if (ret
== -ETIMEDOUT
)
830 dev_err(qproc
->dev
, "MPSS header authentication timed out\n");
832 dev_err(qproc
->dev
, "MPSS header authentication failed: %d\n", ret
);
834 /* Metadata authentication done, remove modem access */
835 xferop_ret
= q6v5_xfer_mem_ownership(qproc
, &mdata_perm
, true, false,
839 "mdt buffer not reclaimed system may become unstable\n");
842 dma_free_attrs(qproc
->dev
, size
, ptr
, phys
, dma_attrs
);
845 return ret
< 0 ? ret
: 0;
848 static bool q6v5_phdr_valid(const struct elf32_phdr
*phdr
)
850 if (phdr
->p_type
!= PT_LOAD
)
853 if ((phdr
->p_flags
& QCOM_MDT_TYPE_MASK
) == QCOM_MDT_TYPE_HASH
)
862 static int q6v5_mba_load(struct q6v5
*qproc
)
867 qcom_q6v5_prepare(&qproc
->q6v5
);
869 ret
= q6v5_pds_enable(qproc
, qproc
->active_pds
, qproc
->active_pd_count
);
871 dev_err(qproc
->dev
, "failed to enable active power domains\n");
875 ret
= q6v5_pds_enable(qproc
, qproc
->proxy_pds
, qproc
->proxy_pd_count
);
877 dev_err(qproc
->dev
, "failed to enable proxy power domains\n");
878 goto disable_active_pds
;
881 ret
= q6v5_regulator_enable(qproc
, qproc
->proxy_regs
,
882 qproc
->proxy_reg_count
);
884 dev_err(qproc
->dev
, "failed to enable proxy supplies\n");
885 goto disable_proxy_pds
;
888 ret
= q6v5_clk_enable(qproc
->dev
, qproc
->proxy_clks
,
889 qproc
->proxy_clk_count
);
891 dev_err(qproc
->dev
, "failed to enable proxy clocks\n");
892 goto disable_proxy_reg
;
895 ret
= q6v5_regulator_enable(qproc
, qproc
->active_regs
,
896 qproc
->active_reg_count
);
898 dev_err(qproc
->dev
, "failed to enable supplies\n");
899 goto disable_proxy_clk
;
902 ret
= q6v5_clk_enable(qproc
->dev
, qproc
->reset_clks
,
903 qproc
->reset_clk_count
);
905 dev_err(qproc
->dev
, "failed to enable reset clocks\n");
909 ret
= q6v5_reset_deassert(qproc
);
911 dev_err(qproc
->dev
, "failed to deassert mss restart\n");
912 goto disable_reset_clks
;
915 ret
= q6v5_clk_enable(qproc
->dev
, qproc
->active_clks
,
916 qproc
->active_clk_count
);
918 dev_err(qproc
->dev
, "failed to enable clocks\n");
922 /* Assign MBA image access in DDR to q6 */
923 ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mba_perm
, false, true,
924 qproc
->mba_phys
, qproc
->mba_size
);
927 "assigning Q6 access to mba memory failed: %d\n", ret
);
928 goto disable_active_clks
;
931 writel(qproc
->mba_phys
, qproc
->rmb_base
+ RMB_MBA_IMAGE_REG
);
933 ret
= q6v5proc_reset(qproc
);
937 ret
= q6v5_rmb_mba_wait(qproc
, 0, 5000);
938 if (ret
== -ETIMEDOUT
) {
939 dev_err(qproc
->dev
, "MBA boot timed out\n");
941 } else if (ret
!= RMB_MBA_XPU_UNLOCKED
&&
942 ret
!= RMB_MBA_XPU_UNLOCKED_SCRIBBLED
) {
943 dev_err(qproc
->dev
, "MBA returned unexpected status %d\n", ret
);
948 qproc
->dump_mba_loaded
= true;
952 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_q6
);
953 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_modem
);
954 if (qproc
->has_halt_nav
)
955 q6v5proc_halt_nav_axi_port(qproc
, qproc
->halt_nav_map
,
957 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_nc
);
960 xfermemop_ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mba_perm
, true,
961 false, qproc
->mba_phys
,
965 "Failed to reclaim mba buffer, system may become unstable\n");
969 q6v5_clk_disable(qproc
->dev
, qproc
->active_clks
,
970 qproc
->active_clk_count
);
972 q6v5_reset_assert(qproc
);
974 q6v5_clk_disable(qproc
->dev
, qproc
->reset_clks
,
975 qproc
->reset_clk_count
);
977 q6v5_regulator_disable(qproc
, qproc
->active_regs
,
978 qproc
->active_reg_count
);
980 q6v5_clk_disable(qproc
->dev
, qproc
->proxy_clks
,
981 qproc
->proxy_clk_count
);
983 q6v5_regulator_disable(qproc
, qproc
->proxy_regs
,
984 qproc
->proxy_reg_count
);
986 q6v5_pds_disable(qproc
, qproc
->proxy_pds
, qproc
->proxy_pd_count
);
988 q6v5_pds_disable(qproc
, qproc
->active_pds
, qproc
->active_pd_count
);
990 qcom_q6v5_unprepare(&qproc
->q6v5
);
995 static void q6v5_mba_reclaim(struct q6v5
*qproc
)
1000 qproc
->dump_mba_loaded
= false;
1002 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_q6
);
1003 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_modem
);
1004 if (qproc
->has_halt_nav
)
1005 q6v5proc_halt_nav_axi_port(qproc
, qproc
->halt_nav_map
,
1007 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_nc
);
1008 if (qproc
->version
== MSS_MSM8996
) {
1010 * To avoid high MX current during LPASS/MSS restart.
1012 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
1013 val
|= Q6SS_CLAMP_IO
| QDSP6v56_CLAMP_WL
|
1014 QDSP6v56_CLAMP_QMC_MEM
;
1015 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
1018 q6v5_reset_assert(qproc
);
1020 q6v5_clk_disable(qproc
->dev
, qproc
->reset_clks
,
1021 qproc
->reset_clk_count
);
1022 q6v5_clk_disable(qproc
->dev
, qproc
->active_clks
,
1023 qproc
->active_clk_count
);
1024 q6v5_regulator_disable(qproc
, qproc
->active_regs
,
1025 qproc
->active_reg_count
);
1026 q6v5_pds_disable(qproc
, qproc
->active_pds
, qproc
->active_pd_count
);
1028 /* In case of failure or coredump scenario where reclaiming MBA memory
1029 * could not happen reclaim it here.
1031 ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mba_perm
, true, false,
1036 ret
= qcom_q6v5_unprepare(&qproc
->q6v5
);
1038 q6v5_pds_disable(qproc
, qproc
->proxy_pds
,
1039 qproc
->proxy_pd_count
);
1040 q6v5_clk_disable(qproc
->dev
, qproc
->proxy_clks
,
1041 qproc
->proxy_clk_count
);
1042 q6v5_regulator_disable(qproc
, qproc
->proxy_regs
,
1043 qproc
->proxy_reg_count
);
1047 static int q6v5_reload_mba(struct rproc
*rproc
)
1049 struct q6v5
*qproc
= rproc
->priv
;
1050 const struct firmware
*fw
;
1053 ret
= request_firmware(&fw
, rproc
->firmware
, qproc
->dev
);
1057 q6v5_load(rproc
, fw
);
1058 ret
= q6v5_mba_load(qproc
);
1059 release_firmware(fw
);
1064 static int q6v5_mpss_load(struct q6v5
*qproc
)
1066 const struct elf32_phdr
*phdrs
;
1067 const struct elf32_phdr
*phdr
;
1068 const struct firmware
*seg_fw
;
1069 const struct firmware
*fw
;
1070 struct elf32_hdr
*ehdr
;
1071 phys_addr_t mpss_reloc
;
1072 phys_addr_t boot_addr
;
1073 phys_addr_t min_addr
= PHYS_ADDR_MAX
;
1074 phys_addr_t max_addr
= 0;
1076 bool relocate
= false;
1085 fw_name_len
= strlen(qproc
->hexagon_mdt_image
);
1086 if (fw_name_len
<= 4)
1089 fw_name
= kstrdup(qproc
->hexagon_mdt_image
, GFP_KERNEL
);
1093 ret
= request_firmware(&fw
, fw_name
, qproc
->dev
);
1095 dev_err(qproc
->dev
, "unable to load %s\n", fw_name
);
1099 /* Initialize the RMB validator */
1100 writel(0, qproc
->rmb_base
+ RMB_PMI_CODE_LENGTH_REG
);
1102 ret
= q6v5_mpss_init_image(qproc
, fw
);
1104 goto release_firmware
;
1106 ehdr
= (struct elf32_hdr
*)fw
->data
;
1107 phdrs
= (struct elf32_phdr
*)(ehdr
+ 1);
1109 for (i
= 0; i
< ehdr
->e_phnum
; i
++) {
1112 if (!q6v5_phdr_valid(phdr
))
1115 if (phdr
->p_flags
& QCOM_MDT_RELOCATABLE
)
1118 if (phdr
->p_paddr
< min_addr
)
1119 min_addr
= phdr
->p_paddr
;
1121 if (phdr
->p_paddr
+ phdr
->p_memsz
> max_addr
)
1122 max_addr
= ALIGN(phdr
->p_paddr
+ phdr
->p_memsz
, SZ_4K
);
1126 * In case of a modem subsystem restart on secure devices, the modem
1127 * memory can be reclaimed only after MBA is loaded. For modem cold
1128 * boot this will be a nop
1130 q6v5_xfer_mem_ownership(qproc
, &qproc
->mpss_perm
, true, false,
1131 qproc
->mpss_phys
, qproc
->mpss_size
);
1133 /* Share ownership between Linux and MSS, during segment loading */
1134 ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mpss_perm
, true, true,
1135 qproc
->mpss_phys
, qproc
->mpss_size
);
1138 "assigning Q6 access to mpss memory failed: %d\n", ret
);
1140 goto release_firmware
;
1143 mpss_reloc
= relocate
? min_addr
: qproc
->mpss_phys
;
1144 qproc
->mpss_reloc
= mpss_reloc
;
1145 /* Load firmware segments */
1146 for (i
= 0; i
< ehdr
->e_phnum
; i
++) {
1149 if (!q6v5_phdr_valid(phdr
))
1152 offset
= phdr
->p_paddr
- mpss_reloc
;
1153 if (offset
< 0 || offset
+ phdr
->p_memsz
> qproc
->mpss_size
) {
1154 dev_err(qproc
->dev
, "segment outside memory range\n");
1156 goto release_firmware
;
1159 ptr
= qproc
->mpss_region
+ offset
;
1161 if (phdr
->p_filesz
&& phdr
->p_offset
< fw
->size
) {
1162 /* Firmware is large enough to be non-split */
1163 if (phdr
->p_offset
+ phdr
->p_filesz
> fw
->size
) {
1165 "failed to load segment %d from truncated file %s\n",
1168 goto release_firmware
;
1171 memcpy(ptr
, fw
->data
+ phdr
->p_offset
, phdr
->p_filesz
);
1172 } else if (phdr
->p_filesz
) {
1173 /* Replace "xxx.xxx" with "xxx.bxx" */
1174 sprintf(fw_name
+ fw_name_len
- 3, "b%02d", i
);
1175 ret
= request_firmware(&seg_fw
, fw_name
, qproc
->dev
);
1177 dev_err(qproc
->dev
, "failed to load %s\n", fw_name
);
1178 goto release_firmware
;
1181 memcpy(ptr
, seg_fw
->data
, seg_fw
->size
);
1183 release_firmware(seg_fw
);
1186 if (phdr
->p_memsz
> phdr
->p_filesz
) {
1187 memset(ptr
+ phdr
->p_filesz
, 0,
1188 phdr
->p_memsz
- phdr
->p_filesz
);
1190 size
+= phdr
->p_memsz
;
1192 code_length
= readl(qproc
->rmb_base
+ RMB_PMI_CODE_LENGTH_REG
);
1194 boot_addr
= relocate
? qproc
->mpss_phys
: min_addr
;
1195 writel(boot_addr
, qproc
->rmb_base
+ RMB_PMI_CODE_START_REG
);
1196 writel(RMB_CMD_LOAD_READY
, qproc
->rmb_base
+ RMB_MBA_COMMAND_REG
);
1198 writel(size
, qproc
->rmb_base
+ RMB_PMI_CODE_LENGTH_REG
);
1200 ret
= readl(qproc
->rmb_base
+ RMB_MBA_STATUS_REG
);
1202 dev_err(qproc
->dev
, "MPSS authentication failed: %d\n",
1204 goto release_firmware
;
1208 /* Transfer ownership of modem ddr region to q6 */
1209 ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mpss_perm
, false, true,
1210 qproc
->mpss_phys
, qproc
->mpss_size
);
1213 "assigning Q6 access to mpss memory failed: %d\n", ret
);
1215 goto release_firmware
;
1218 ret
= q6v5_rmb_mba_wait(qproc
, RMB_MBA_AUTH_COMPLETE
, 10000);
1219 if (ret
== -ETIMEDOUT
)
1220 dev_err(qproc
->dev
, "MPSS authentication timed out\n");
1222 dev_err(qproc
->dev
, "MPSS authentication failed: %d\n", ret
);
1225 release_firmware(fw
);
1229 return ret
< 0 ? ret
: 0;
1232 static void qcom_q6v5_dump_segment(struct rproc
*rproc
,
1233 struct rproc_dump_segment
*segment
,
1237 struct q6v5
*qproc
= rproc
->priv
;
1238 unsigned long mask
= BIT((unsigned long)segment
->priv
);
1239 void *ptr
= rproc_da_to_va(rproc
, segment
->da
, segment
->size
);
1241 /* Unlock mba before copying segments */
1242 if (!qproc
->dump_mba_loaded
) {
1243 ret
= q6v5_reload_mba(rproc
);
1245 /* Reset ownership back to Linux to copy segments */
1246 ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mpss_perm
,
1254 memset(dest
, 0xff, segment
->size
);
1256 memcpy(dest
, ptr
, segment
->size
);
1258 qproc
->dump_segment_mask
|= mask
;
1260 /* Reclaim mba after copying segments */
1261 if (qproc
->dump_segment_mask
== qproc
->dump_complete_mask
) {
1262 if (qproc
->dump_mba_loaded
) {
1263 /* Try to reset ownership back to Q6 */
1264 q6v5_xfer_mem_ownership(qproc
, &qproc
->mpss_perm
,
1268 q6v5_mba_reclaim(qproc
);
1273 static int q6v5_start(struct rproc
*rproc
)
1275 struct q6v5
*qproc
= (struct q6v5
*)rproc
->priv
;
1279 ret
= q6v5_mba_load(qproc
);
1283 dev_info(qproc
->dev
, "MBA booted, loading mpss\n");
1285 ret
= q6v5_mpss_load(qproc
);
1289 ret
= qcom_q6v5_wait_for_start(&qproc
->q6v5
, msecs_to_jiffies(5000));
1290 if (ret
== -ETIMEDOUT
) {
1291 dev_err(qproc
->dev
, "start timed out\n");
1295 xfermemop_ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mba_perm
, true,
1296 false, qproc
->mba_phys
,
1300 "Failed to reclaim mba buffer system may become unstable\n");
1302 /* Reset Dump Segment Mask */
1303 qproc
->dump_segment_mask
= 0;
1304 qproc
->running
= true;
1309 q6v5_mba_reclaim(qproc
);
1314 static int q6v5_stop(struct rproc
*rproc
)
1316 struct q6v5
*qproc
= (struct q6v5
*)rproc
->priv
;
1319 qproc
->running
= false;
1321 ret
= qcom_q6v5_request_stop(&qproc
->q6v5
);
1322 if (ret
== -ETIMEDOUT
)
1323 dev_err(qproc
->dev
, "timed out on wait\n");
1325 q6v5_mba_reclaim(qproc
);
1330 static void *q6v5_da_to_va(struct rproc
*rproc
, u64 da
, size_t len
)
1332 struct q6v5
*qproc
= rproc
->priv
;
1335 offset
= da
- qproc
->mpss_reloc
;
1336 if (offset
< 0 || offset
+ len
> qproc
->mpss_size
)
1339 return qproc
->mpss_region
+ offset
;
1342 static int qcom_q6v5_register_dump_segments(struct rproc
*rproc
,
1343 const struct firmware
*mba_fw
)
1345 const struct firmware
*fw
;
1346 const struct elf32_phdr
*phdrs
;
1347 const struct elf32_phdr
*phdr
;
1348 const struct elf32_hdr
*ehdr
;
1349 struct q6v5
*qproc
= rproc
->priv
;
1353 ret
= request_firmware(&fw
, qproc
->hexagon_mdt_image
, qproc
->dev
);
1355 dev_err(qproc
->dev
, "unable to load %s\n",
1356 qproc
->hexagon_mdt_image
);
1360 ehdr
= (struct elf32_hdr
*)fw
->data
;
1361 phdrs
= (struct elf32_phdr
*)(ehdr
+ 1);
1362 qproc
->dump_complete_mask
= 0;
1364 for (i
= 0; i
< ehdr
->e_phnum
; i
++) {
1367 if (!q6v5_phdr_valid(phdr
))
1370 ret
= rproc_coredump_add_custom_segment(rproc
, phdr
->p_paddr
,
1372 qcom_q6v5_dump_segment
,
1377 qproc
->dump_complete_mask
|= BIT(i
);
1380 release_firmware(fw
);
1384 static const struct rproc_ops q6v5_ops
= {
1385 .start
= q6v5_start
,
1387 .da_to_va
= q6v5_da_to_va
,
1388 .parse_fw
= qcom_q6v5_register_dump_segments
,
1392 static void qcom_msa_handover(struct qcom_q6v5
*q6v5
)
1394 struct q6v5
*qproc
= container_of(q6v5
, struct q6v5
, q6v5
);
1396 q6v5_clk_disable(qproc
->dev
, qproc
->proxy_clks
,
1397 qproc
->proxy_clk_count
);
1398 q6v5_regulator_disable(qproc
, qproc
->proxy_regs
,
1399 qproc
->proxy_reg_count
);
1400 q6v5_pds_disable(qproc
, qproc
->proxy_pds
, qproc
->proxy_pd_count
);
1403 static int q6v5_init_mem(struct q6v5
*qproc
, struct platform_device
*pdev
)
1405 struct of_phandle_args args
;
1406 struct resource
*res
;
1409 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "qdsp6");
1410 qproc
->reg_base
= devm_ioremap_resource(&pdev
->dev
, res
);
1411 if (IS_ERR(qproc
->reg_base
))
1412 return PTR_ERR(qproc
->reg_base
);
1414 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "rmb");
1415 qproc
->rmb_base
= devm_ioremap_resource(&pdev
->dev
, res
);
1416 if (IS_ERR(qproc
->rmb_base
))
1417 return PTR_ERR(qproc
->rmb_base
);
1419 ret
= of_parse_phandle_with_fixed_args(pdev
->dev
.of_node
,
1420 "qcom,halt-regs", 3, 0, &args
);
1422 dev_err(&pdev
->dev
, "failed to parse qcom,halt-regs\n");
1426 qproc
->halt_map
= syscon_node_to_regmap(args
.np
);
1427 of_node_put(args
.np
);
1428 if (IS_ERR(qproc
->halt_map
))
1429 return PTR_ERR(qproc
->halt_map
);
1431 qproc
->halt_q6
= args
.args
[0];
1432 qproc
->halt_modem
= args
.args
[1];
1433 qproc
->halt_nc
= args
.args
[2];
1435 if (qproc
->has_halt_nav
) {
1436 struct platform_device
*nav_pdev
;
1438 ret
= of_parse_phandle_with_fixed_args(pdev
->dev
.of_node
,
1439 "qcom,halt-nav-regs",
1442 dev_err(&pdev
->dev
, "failed to parse halt-nav-regs\n");
1446 nav_pdev
= of_find_device_by_node(args
.np
);
1447 of_node_put(args
.np
);
1449 dev_err(&pdev
->dev
, "failed to get mss clock device\n");
1450 return -EPROBE_DEFER
;
1453 qproc
->halt_nav_map
= dev_get_regmap(&nav_pdev
->dev
, NULL
);
1454 if (!qproc
->halt_nav_map
) {
1455 dev_err(&pdev
->dev
, "failed to get map from device\n");
1458 qproc
->halt_nav
= args
.args
[0];
1460 ret
= of_parse_phandle_with_fixed_args(pdev
->dev
.of_node
,
1461 "qcom,halt-nav-regs",
1464 dev_err(&pdev
->dev
, "failed to parse halt-nav-regs\n");
1468 qproc
->conn_map
= syscon_node_to_regmap(args
.np
);
1469 of_node_put(args
.np
);
1470 if (IS_ERR(qproc
->conn_map
))
1471 return PTR_ERR(qproc
->conn_map
);
1473 qproc
->conn_box
= args
.args
[0];
1479 static int q6v5_init_clocks(struct device
*dev
, struct clk
**clks
,
1487 for (i
= 0; clk_names
[i
]; i
++) {
1488 clks
[i
] = devm_clk_get(dev
, clk_names
[i
]);
1489 if (IS_ERR(clks
[i
])) {
1490 int rc
= PTR_ERR(clks
[i
]);
1492 if (rc
!= -EPROBE_DEFER
)
1493 dev_err(dev
, "Failed to get %s clock\n",
1502 static int q6v5_pds_attach(struct device
*dev
, struct device
**devs
,
1512 while (pd_names
[num_pds
])
1515 for (i
= 0; i
< num_pds
; i
++) {
1516 devs
[i
] = dev_pm_domain_attach_by_name(dev
, pd_names
[i
]);
1517 if (IS_ERR_OR_NULL(devs
[i
])) {
1518 ret
= PTR_ERR(devs
[i
]) ? : -ENODATA
;
1526 for (i
--; i
>= 0; i
--)
1527 dev_pm_domain_detach(devs
[i
], false);
1532 static void q6v5_pds_detach(struct q6v5
*qproc
, struct device
**pds
,
1537 for (i
= 0; i
< pd_count
; i
++)
1538 dev_pm_domain_detach(pds
[i
], false);
1541 static int q6v5_init_reset(struct q6v5
*qproc
)
1543 qproc
->mss_restart
= devm_reset_control_get_exclusive(qproc
->dev
,
1545 if (IS_ERR(qproc
->mss_restart
)) {
1546 dev_err(qproc
->dev
, "failed to acquire mss restart\n");
1547 return PTR_ERR(qproc
->mss_restart
);
1550 if (qproc
->has_alt_reset
|| qproc
->has_halt_nav
) {
1551 qproc
->pdc_reset
= devm_reset_control_get_exclusive(qproc
->dev
,
1553 if (IS_ERR(qproc
->pdc_reset
)) {
1554 dev_err(qproc
->dev
, "failed to acquire pdc reset\n");
1555 return PTR_ERR(qproc
->pdc_reset
);
1562 static int q6v5_alloc_memory_region(struct q6v5
*qproc
)
1564 struct device_node
*child
;
1565 struct device_node
*node
;
1569 child
= of_get_child_by_name(qproc
->dev
->of_node
, "mba");
1570 node
= of_parse_phandle(child
, "memory-region", 0);
1571 ret
= of_address_to_resource(node
, 0, &r
);
1573 dev_err(qproc
->dev
, "unable to resolve mba region\n");
1578 qproc
->mba_phys
= r
.start
;
1579 qproc
->mba_size
= resource_size(&r
);
1580 qproc
->mba_region
= devm_ioremap_wc(qproc
->dev
, qproc
->mba_phys
, qproc
->mba_size
);
1581 if (!qproc
->mba_region
) {
1582 dev_err(qproc
->dev
, "unable to map memory region: %pa+%zx\n",
1583 &r
.start
, qproc
->mba_size
);
1587 child
= of_get_child_by_name(qproc
->dev
->of_node
, "mpss");
1588 node
= of_parse_phandle(child
, "memory-region", 0);
1589 ret
= of_address_to_resource(node
, 0, &r
);
1591 dev_err(qproc
->dev
, "unable to resolve mpss region\n");
1596 qproc
->mpss_phys
= qproc
->mpss_reloc
= r
.start
;
1597 qproc
->mpss_size
= resource_size(&r
);
1598 qproc
->mpss_region
= devm_ioremap_wc(qproc
->dev
, qproc
->mpss_phys
, qproc
->mpss_size
);
1599 if (!qproc
->mpss_region
) {
1600 dev_err(qproc
->dev
, "unable to map memory region: %pa+%zx\n",
1601 &r
.start
, qproc
->mpss_size
);
1608 #if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY)
1610 /* Register IPA notification function */
1611 int qcom_register_ipa_notify(struct rproc
*rproc
, qcom_ipa_notify_t notify
,
1614 struct qcom_rproc_ipa_notify
*ipa_notify
;
1615 struct q6v5
*qproc
= rproc
->priv
;
1620 ipa_notify
= &qproc
->ipa_notify_subdev
;
1621 if (ipa_notify
->notify
)
1624 ipa_notify
->notify
= notify
;
1625 ipa_notify
->data
= data
;
1629 EXPORT_SYMBOL_GPL(qcom_register_ipa_notify
);
1631 /* Deregister IPA notification function */
1632 void qcom_deregister_ipa_notify(struct rproc
*rproc
)
1634 struct q6v5
*qproc
= rproc
->priv
;
1636 qproc
->ipa_notify_subdev
.notify
= NULL
;
1638 EXPORT_SYMBOL_GPL(qcom_deregister_ipa_notify
);
1639 #endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */
1641 static int q6v5_probe(struct platform_device
*pdev
)
1643 const struct rproc_hexagon_res
*desc
;
1645 struct rproc
*rproc
;
1646 const char *mba_image
;
1649 desc
= of_device_get_match_data(&pdev
->dev
);
1653 if (desc
->need_mem_protection
&& !qcom_scm_is_available())
1654 return -EPROBE_DEFER
;
1656 mba_image
= desc
->hexagon_mba_image
;
1657 ret
= of_property_read_string_index(pdev
->dev
.of_node
, "firmware-name",
1659 if (ret
< 0 && ret
!= -EINVAL
)
1662 rproc
= rproc_alloc(&pdev
->dev
, pdev
->name
, &q6v5_ops
,
1663 mba_image
, sizeof(*qproc
));
1665 dev_err(&pdev
->dev
, "failed to allocate rproc\n");
1669 rproc
->auto_boot
= false;
1671 qproc
= (struct q6v5
*)rproc
->priv
;
1672 qproc
->dev
= &pdev
->dev
;
1673 qproc
->rproc
= rproc
;
1674 qproc
->hexagon_mdt_image
= "modem.mdt";
1675 ret
= of_property_read_string_index(pdev
->dev
.of_node
, "firmware-name",
1676 1, &qproc
->hexagon_mdt_image
);
1677 if (ret
< 0 && ret
!= -EINVAL
)
1680 platform_set_drvdata(pdev
, qproc
);
1682 qproc
->has_halt_nav
= desc
->has_halt_nav
;
1683 ret
= q6v5_init_mem(qproc
, pdev
);
1687 ret
= q6v5_alloc_memory_region(qproc
);
1691 ret
= q6v5_init_clocks(&pdev
->dev
, qproc
->proxy_clks
,
1692 desc
->proxy_clk_names
);
1694 dev_err(&pdev
->dev
, "Failed to get proxy clocks.\n");
1697 qproc
->proxy_clk_count
= ret
;
1699 ret
= q6v5_init_clocks(&pdev
->dev
, qproc
->reset_clks
,
1700 desc
->reset_clk_names
);
1702 dev_err(&pdev
->dev
, "Failed to get reset clocks.\n");
1705 qproc
->reset_clk_count
= ret
;
1707 ret
= q6v5_init_clocks(&pdev
->dev
, qproc
->active_clks
,
1708 desc
->active_clk_names
);
1710 dev_err(&pdev
->dev
, "Failed to get active clocks.\n");
1713 qproc
->active_clk_count
= ret
;
1715 ret
= q6v5_regulator_init(&pdev
->dev
, qproc
->proxy_regs
,
1716 desc
->proxy_supply
);
1718 dev_err(&pdev
->dev
, "Failed to get proxy regulators.\n");
1721 qproc
->proxy_reg_count
= ret
;
1723 ret
= q6v5_regulator_init(&pdev
->dev
, qproc
->active_regs
,
1724 desc
->active_supply
);
1726 dev_err(&pdev
->dev
, "Failed to get active regulators.\n");
1729 qproc
->active_reg_count
= ret
;
1731 ret
= q6v5_pds_attach(&pdev
->dev
, qproc
->active_pds
,
1732 desc
->active_pd_names
);
1734 dev_err(&pdev
->dev
, "Failed to attach active power domains\n");
1737 qproc
->active_pd_count
= ret
;
1739 ret
= q6v5_pds_attach(&pdev
->dev
, qproc
->proxy_pds
,
1740 desc
->proxy_pd_names
);
1742 dev_err(&pdev
->dev
, "Failed to init power domains\n");
1743 goto detach_active_pds
;
1745 qproc
->proxy_pd_count
= ret
;
1747 qproc
->has_alt_reset
= desc
->has_alt_reset
;
1748 ret
= q6v5_init_reset(qproc
);
1750 goto detach_proxy_pds
;
1752 qproc
->version
= desc
->version
;
1753 qproc
->need_mem_protection
= desc
->need_mem_protection
;
1755 ret
= qcom_q6v5_init(&qproc
->q6v5
, pdev
, rproc
, MPSS_CRASH_REASON_SMEM
,
1758 goto detach_proxy_pds
;
1760 qproc
->mpss_perm
= BIT(QCOM_SCM_VMID_HLOS
);
1761 qproc
->mba_perm
= BIT(QCOM_SCM_VMID_HLOS
);
1762 qcom_add_glink_subdev(rproc
, &qproc
->glink_subdev
);
1763 qcom_add_smd_subdev(rproc
, &qproc
->smd_subdev
);
1764 qcom_add_ssr_subdev(rproc
, &qproc
->ssr_subdev
, "mpss");
1765 qcom_add_ipa_notify_subdev(rproc
, &qproc
->ipa_notify_subdev
);
1766 qproc
->sysmon
= qcom_add_sysmon_subdev(rproc
, "modem", 0x12);
1767 if (IS_ERR(qproc
->sysmon
)) {
1768 ret
= PTR_ERR(qproc
->sysmon
);
1769 goto detach_proxy_pds
;
1772 ret
= rproc_add(rproc
);
1774 goto detach_proxy_pds
;
1779 qcom_remove_ipa_notify_subdev(qproc
->rproc
, &qproc
->ipa_notify_subdev
);
1780 q6v5_pds_detach(qproc
, qproc
->proxy_pds
, qproc
->proxy_pd_count
);
1782 q6v5_pds_detach(qproc
, qproc
->active_pds
, qproc
->active_pd_count
);
1789 static int q6v5_remove(struct platform_device
*pdev
)
1791 struct q6v5
*qproc
= platform_get_drvdata(pdev
);
1793 rproc_del(qproc
->rproc
);
1795 qcom_remove_sysmon_subdev(qproc
->sysmon
);
1796 qcom_remove_ipa_notify_subdev(qproc
->rproc
, &qproc
->ipa_notify_subdev
);
1797 qcom_remove_glink_subdev(qproc
->rproc
, &qproc
->glink_subdev
);
1798 qcom_remove_smd_subdev(qproc
->rproc
, &qproc
->smd_subdev
);
1799 qcom_remove_ssr_subdev(qproc
->rproc
, &qproc
->ssr_subdev
);
1801 q6v5_pds_detach(qproc
, qproc
->active_pds
, qproc
->active_pd_count
);
1802 q6v5_pds_detach(qproc
, qproc
->proxy_pds
, qproc
->proxy_pd_count
);
1804 rproc_free(qproc
->rproc
);
1809 static const struct rproc_hexagon_res sc7180_mss
= {
1810 .hexagon_mba_image
= "mba.mbn",
1811 .proxy_clk_names
= (char*[]){
1815 .reset_clk_names
= (char*[]){
1821 .active_clk_names
= (char*[]){
1828 .active_pd_names
= (char*[]){
1832 .proxy_pd_names
= (char*[]){
1838 .need_mem_protection
= true,
1839 .has_alt_reset
= false,
1840 .has_halt_nav
= true,
1841 .version
= MSS_SC7180
,
1844 static const struct rproc_hexagon_res sdm845_mss
= {
1845 .hexagon_mba_image
= "mba.mbn",
1846 .proxy_clk_names
= (char*[]){
1851 .reset_clk_names
= (char*[]){
1856 .active_clk_names
= (char*[]){
1863 .active_pd_names
= (char*[]){
1867 .proxy_pd_names
= (char*[]){
1873 .need_mem_protection
= true,
1874 .has_alt_reset
= true,
1875 .has_halt_nav
= false,
1876 .version
= MSS_SDM845
,
1879 static const struct rproc_hexagon_res msm8998_mss
= {
1880 .hexagon_mba_image
= "mba.mbn",
1881 .proxy_clk_names
= (char*[]){
1887 .active_clk_names
= (char*[]){
1895 .proxy_pd_names
= (char*[]){
1900 .need_mem_protection
= true,
1901 .has_alt_reset
= false,
1902 .has_halt_nav
= false,
1903 .version
= MSS_MSM8998
,
1906 static const struct rproc_hexagon_res msm8996_mss
= {
1907 .hexagon_mba_image
= "mba.mbn",
1908 .proxy_supply
= (struct qcom_mss_reg_res
[]) {
1915 .proxy_clk_names
= (char*[]){
1921 .active_clk_names
= (char*[]){
1930 .need_mem_protection
= true,
1931 .has_alt_reset
= false,
1932 .has_halt_nav
= false,
1933 .version
= MSS_MSM8996
,
1936 static const struct rproc_hexagon_res msm8916_mss
= {
1937 .hexagon_mba_image
= "mba.mbn",
1938 .proxy_supply
= (struct qcom_mss_reg_res
[]) {
1953 .proxy_clk_names
= (char*[]){
1957 .active_clk_names
= (char*[]){
1963 .need_mem_protection
= false,
1964 .has_alt_reset
= false,
1965 .has_halt_nav
= false,
1966 .version
= MSS_MSM8916
,
1969 static const struct rproc_hexagon_res msm8974_mss
= {
1970 .hexagon_mba_image
= "mba.b00",
1971 .proxy_supply
= (struct qcom_mss_reg_res
[]) {
1986 .active_supply
= (struct qcom_mss_reg_res
[]) {
1994 .proxy_clk_names
= (char*[]){
1998 .active_clk_names
= (char*[]){
2004 .need_mem_protection
= false,
2005 .has_alt_reset
= false,
2006 .has_halt_nav
= false,
2007 .version
= MSS_MSM8974
,
2010 static const struct of_device_id q6v5_of_match
[] = {
2011 { .compatible
= "qcom,q6v5-pil", .data
= &msm8916_mss
},
2012 { .compatible
= "qcom,msm8916-mss-pil", .data
= &msm8916_mss
},
2013 { .compatible
= "qcom,msm8974-mss-pil", .data
= &msm8974_mss
},
2014 { .compatible
= "qcom,msm8996-mss-pil", .data
= &msm8996_mss
},
2015 { .compatible
= "qcom,msm8998-mss-pil", .data
= &msm8998_mss
},
2016 { .compatible
= "qcom,sc7180-mss-pil", .data
= &sc7180_mss
},
2017 { .compatible
= "qcom,sdm845-mss-pil", .data
= &sdm845_mss
},
2020 MODULE_DEVICE_TABLE(of
, q6v5_of_match
);
2022 static struct platform_driver q6v5_driver
= {
2023 .probe
= q6v5_probe
,
2024 .remove
= q6v5_remove
,
2026 .name
= "qcom-q6v5-mss",
2027 .of_match_table
= q6v5_of_match
,
2030 module_platform_driver(q6v5_driver
);
2032 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
2033 MODULE_LICENSE("GPL v2");