1 // SPDX-License-Identifier: GPL-2.0-only
3 * Qualcomm self-authenticating modem subsystem remoteproc driver
5 * Copyright (C) 2016 Linaro Ltd.
6 * Copyright (C) 2014 Sony Mobile Communications AB
7 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/mfd/syscon.h>
16 #include <linux/module.h>
17 #include <linux/of_address.h>
18 #include <linux/of_device.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/regmap.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/remoteproc.h>
25 #include <linux/reset.h>
26 #include <linux/soc/qcom/mdt_loader.h>
27 #include <linux/iopoll.h>
29 #include "remoteproc_internal.h"
30 #include "qcom_common.h"
31 #include "qcom_q6v5.h"
33 #include <linux/qcom_scm.h>
35 #define MPSS_CRASH_REASON_SMEM 421
37 /* RMB Status Register Values */
38 #define RMB_PBL_SUCCESS 0x1
40 #define RMB_MBA_XPU_UNLOCKED 0x1
41 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
42 #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
43 #define RMB_MBA_AUTH_COMPLETE 0x4
45 /* PBL/MBA interface registers */
46 #define RMB_MBA_IMAGE_REG 0x00
47 #define RMB_PBL_STATUS_REG 0x04
48 #define RMB_MBA_COMMAND_REG 0x08
49 #define RMB_MBA_STATUS_REG 0x0C
50 #define RMB_PMI_META_DATA_REG 0x10
51 #define RMB_PMI_CODE_START_REG 0x14
52 #define RMB_PMI_CODE_LENGTH_REG 0x18
53 #define RMB_MBA_MSS_STATUS 0x40
54 #define RMB_MBA_ALT_RESET 0x44
56 #define RMB_CMD_META_DATA_READY 0x1
57 #define RMB_CMD_LOAD_READY 0x2
59 /* QDSP6SS Register Offsets */
60 #define QDSP6SS_RESET_REG 0x014
61 #define QDSP6SS_GFMUX_CTL_REG 0x020
62 #define QDSP6SS_PWR_CTL_REG 0x030
63 #define QDSP6SS_MEM_PWR_CTL 0x0B0
64 #define QDSP6V6SS_MEM_PWR_CTL 0x034
65 #define QDSP6SS_STRAP_ACC 0x110
67 /* AXI Halt Register Offsets */
68 #define AXI_HALTREQ_REG 0x0
69 #define AXI_HALTACK_REG 0x4
70 #define AXI_IDLE_REG 0x8
72 #define HALT_ACK_TIMEOUT_MS 100
75 #define Q6SS_STOP_CORE BIT(0)
76 #define Q6SS_CORE_ARES BIT(1)
77 #define Q6SS_BUS_ARES_ENABLE BIT(2)
79 /* QDSP6SS_GFMUX_CTL */
80 #define Q6SS_CLK_ENABLE BIT(1)
83 #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
84 #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
85 #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
86 #define Q6SS_L2TAG_SLP_NRET_N BIT(16)
87 #define Q6SS_ETB_SLP_NRET_N BIT(17)
88 #define Q6SS_L2DATA_STBY_N BIT(18)
89 #define Q6SS_SLP_RET_N BIT(19)
90 #define Q6SS_CLAMP_IO BIT(20)
91 #define QDSS_BHS_ON BIT(21)
92 #define QDSS_LDO_BYP BIT(22)
94 /* QDSP6v56 parameters */
95 #define QDSP6v56_LDO_BYP BIT(25)
96 #define QDSP6v56_BHS_ON BIT(24)
97 #define QDSP6v56_CLAMP_WL BIT(21)
98 #define QDSP6v56_CLAMP_QMC_MEM BIT(22)
99 #define HALT_CHECK_MAX_LOOPS 200
100 #define QDSP6SS_XO_CBCR 0x0038
101 #define QDSP6SS_ACC_OVERRIDE_VAL 0x20
103 /* QDSP6v65 parameters */
104 #define QDSP6SS_SLEEP 0x3C
105 #define QDSP6SS_BOOT_CORE_START 0x400
106 #define QDSP6SS_BOOT_CMD 0x404
107 #define SLEEP_CHECK_MAX_LOOPS 200
108 #define BOOT_FSM_TIMEOUT 10000
111 struct regulator
*reg
;
116 struct qcom_mss_reg_res
{
122 struct rproc_hexagon_res
{
123 const char *hexagon_mba_image
;
124 struct qcom_mss_reg_res
*proxy_supply
;
125 struct qcom_mss_reg_res
*active_supply
;
126 char **proxy_clk_names
;
127 char **reset_clk_names
;
128 char **active_clk_names
;
129 char **active_pd_names
;
130 char **proxy_pd_names
;
132 bool need_mem_protection
;
140 void __iomem
*reg_base
;
141 void __iomem
*rmb_base
;
143 struct regmap
*halt_map
;
148 struct reset_control
*mss_restart
;
149 struct reset_control
*pdc_reset
;
151 struct qcom_q6v5 q6v5
;
153 struct clk
*active_clks
[8];
154 struct clk
*reset_clks
[4];
155 struct clk
*proxy_clks
[4];
156 struct device
*active_pds
[1];
157 struct device
*proxy_pds
[3];
158 int active_clk_count
;
164 struct reg_info active_regs
[1];
165 struct reg_info proxy_regs
[3];
166 int active_reg_count
;
171 bool dump_mba_loaded
;
172 unsigned long dump_segment_mask
;
173 unsigned long dump_complete_mask
;
175 phys_addr_t mba_phys
;
179 phys_addr_t mpss_phys
;
180 phys_addr_t mpss_reloc
;
184 struct qcom_rproc_glink glink_subdev
;
185 struct qcom_rproc_subdev smd_subdev
;
186 struct qcom_rproc_ssr ssr_subdev
;
187 struct qcom_sysmon
*sysmon
;
188 bool need_mem_protection
;
192 const char *hexagon_mdt_image
;
204 static int q6v5_regulator_init(struct device
*dev
, struct reg_info
*regs
,
205 const struct qcom_mss_reg_res
*reg_res
)
213 for (i
= 0; reg_res
[i
].supply
; i
++) {
214 regs
[i
].reg
= devm_regulator_get(dev
, reg_res
[i
].supply
);
215 if (IS_ERR(regs
[i
].reg
)) {
216 rc
= PTR_ERR(regs
[i
].reg
);
217 if (rc
!= -EPROBE_DEFER
)
218 dev_err(dev
, "Failed to get %s\n regulator",
223 regs
[i
].uV
= reg_res
[i
].uV
;
224 regs
[i
].uA
= reg_res
[i
].uA
;
230 static int q6v5_regulator_enable(struct q6v5
*qproc
,
231 struct reg_info
*regs
, int count
)
236 for (i
= 0; i
< count
; i
++) {
237 if (regs
[i
].uV
> 0) {
238 ret
= regulator_set_voltage(regs
[i
].reg
,
239 regs
[i
].uV
, INT_MAX
);
242 "Failed to request voltage for %d.\n",
248 if (regs
[i
].uA
> 0) {
249 ret
= regulator_set_load(regs
[i
].reg
,
253 "Failed to set regulator mode\n");
258 ret
= regulator_enable(regs
[i
].reg
);
260 dev_err(qproc
->dev
, "Regulator enable failed\n");
267 for (; i
>= 0; i
--) {
269 regulator_set_voltage(regs
[i
].reg
, 0, INT_MAX
);
272 regulator_set_load(regs
[i
].reg
, 0);
274 regulator_disable(regs
[i
].reg
);
280 static void q6v5_regulator_disable(struct q6v5
*qproc
,
281 struct reg_info
*regs
, int count
)
285 for (i
= 0; i
< count
; i
++) {
287 regulator_set_voltage(regs
[i
].reg
, 0, INT_MAX
);
290 regulator_set_load(regs
[i
].reg
, 0);
292 regulator_disable(regs
[i
].reg
);
296 static int q6v5_clk_enable(struct device
*dev
,
297 struct clk
**clks
, int count
)
302 for (i
= 0; i
< count
; i
++) {
303 rc
= clk_prepare_enable(clks
[i
]);
305 dev_err(dev
, "Clock enable failed\n");
312 for (i
--; i
>= 0; i
--)
313 clk_disable_unprepare(clks
[i
]);
318 static void q6v5_clk_disable(struct device
*dev
,
319 struct clk
**clks
, int count
)
323 for (i
= 0; i
< count
; i
++)
324 clk_disable_unprepare(clks
[i
]);
327 static int q6v5_pds_enable(struct q6v5
*qproc
, struct device
**pds
,
333 for (i
= 0; i
< pd_count
; i
++) {
334 dev_pm_genpd_set_performance_state(pds
[i
], INT_MAX
);
335 ret
= pm_runtime_get_sync(pds
[i
]);
337 goto unroll_pd_votes
;
343 for (i
--; i
>= 0; i
--) {
344 dev_pm_genpd_set_performance_state(pds
[i
], 0);
345 pm_runtime_put(pds
[i
]);
351 static void q6v5_pds_disable(struct q6v5
*qproc
, struct device
**pds
,
356 for (i
= 0; i
< pd_count
; i
++) {
357 dev_pm_genpd_set_performance_state(pds
[i
], 0);
358 pm_runtime_put(pds
[i
]);
362 static int q6v5_xfer_mem_ownership(struct q6v5
*qproc
, int *current_perm
,
363 bool remote_owner
, phys_addr_t addr
,
366 struct qcom_scm_vmperm next
;
368 if (!qproc
->need_mem_protection
)
370 if (remote_owner
&& *current_perm
== BIT(QCOM_SCM_VMID_MSS_MSA
))
372 if (!remote_owner
&& *current_perm
== BIT(QCOM_SCM_VMID_HLOS
))
375 next
.vmid
= remote_owner
? QCOM_SCM_VMID_MSS_MSA
: QCOM_SCM_VMID_HLOS
;
376 next
.perm
= remote_owner
? QCOM_SCM_PERM_RW
: QCOM_SCM_PERM_RWX
;
378 return qcom_scm_assign_mem(addr
, ALIGN(size
, SZ_4K
),
379 current_perm
, &next
, 1);
382 static int q6v5_load(struct rproc
*rproc
, const struct firmware
*fw
)
384 struct q6v5
*qproc
= rproc
->priv
;
386 memcpy(qproc
->mba_region
, fw
->data
, fw
->size
);
391 static int q6v5_reset_assert(struct q6v5
*qproc
)
395 if (qproc
->has_alt_reset
) {
396 reset_control_assert(qproc
->pdc_reset
);
397 ret
= reset_control_reset(qproc
->mss_restart
);
398 reset_control_deassert(qproc
->pdc_reset
);
400 ret
= reset_control_assert(qproc
->mss_restart
);
406 static int q6v5_reset_deassert(struct q6v5
*qproc
)
410 if (qproc
->has_alt_reset
) {
411 reset_control_assert(qproc
->pdc_reset
);
412 writel(1, qproc
->rmb_base
+ RMB_MBA_ALT_RESET
);
413 ret
= reset_control_reset(qproc
->mss_restart
);
414 writel(0, qproc
->rmb_base
+ RMB_MBA_ALT_RESET
);
415 reset_control_deassert(qproc
->pdc_reset
);
417 ret
= reset_control_deassert(qproc
->mss_restart
);
423 static int q6v5_rmb_pbl_wait(struct q6v5
*qproc
, int ms
)
425 unsigned long timeout
;
428 timeout
= jiffies
+ msecs_to_jiffies(ms
);
430 val
= readl(qproc
->rmb_base
+ RMB_PBL_STATUS_REG
);
434 if (time_after(jiffies
, timeout
))
443 static int q6v5_rmb_mba_wait(struct q6v5
*qproc
, u32 status
, int ms
)
446 unsigned long timeout
;
449 timeout
= jiffies
+ msecs_to_jiffies(ms
);
451 val
= readl(qproc
->rmb_base
+ RMB_MBA_STATUS_REG
);
457 else if (status
&& val
== status
)
460 if (time_after(jiffies
, timeout
))
469 static int q6v5proc_reset(struct q6v5
*qproc
)
475 if (qproc
->version
== MSS_SDM845
) {
476 val
= readl(qproc
->reg_base
+ QDSP6SS_SLEEP
);
478 writel(val
, qproc
->reg_base
+ QDSP6SS_SLEEP
);
480 ret
= readl_poll_timeout(qproc
->reg_base
+ QDSP6SS_SLEEP
,
481 val
, !(val
& BIT(31)), 1,
482 SLEEP_CHECK_MAX_LOOPS
);
484 dev_err(qproc
->dev
, "QDSP6SS Sleep clock timed out\n");
488 /* De-assert QDSP6 stop core */
489 writel(1, qproc
->reg_base
+ QDSP6SS_BOOT_CORE_START
);
490 /* Trigger boot FSM */
491 writel(1, qproc
->reg_base
+ QDSP6SS_BOOT_CMD
);
493 ret
= readl_poll_timeout(qproc
->rmb_base
+ RMB_MBA_MSS_STATUS
,
494 val
, (val
& BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT
);
496 dev_err(qproc
->dev
, "Boot FSM failed to complete.\n");
497 /* Reset the modem so that boot FSM is in reset state */
498 q6v5_reset_deassert(qproc
);
503 } else if (qproc
->version
== MSS_MSM8996
||
504 qproc
->version
== MSS_MSM8998
) {
507 /* Override the ACC value if required */
508 writel(QDSP6SS_ACC_OVERRIDE_VAL
,
509 qproc
->reg_base
+ QDSP6SS_STRAP_ACC
);
511 /* Assert resets, stop core */
512 val
= readl(qproc
->reg_base
+ QDSP6SS_RESET_REG
);
513 val
|= Q6SS_CORE_ARES
| Q6SS_BUS_ARES_ENABLE
| Q6SS_STOP_CORE
;
514 writel(val
, qproc
->reg_base
+ QDSP6SS_RESET_REG
);
516 /* BHS require xo cbcr to be enabled */
517 val
= readl(qproc
->reg_base
+ QDSP6SS_XO_CBCR
);
519 writel(val
, qproc
->reg_base
+ QDSP6SS_XO_CBCR
);
521 /* Read CLKOFF bit to go low indicating CLK is enabled */
522 ret
= readl_poll_timeout(qproc
->reg_base
+ QDSP6SS_XO_CBCR
,
523 val
, !(val
& BIT(31)), 1,
524 HALT_CHECK_MAX_LOOPS
);
527 "xo cbcr enabling timed out (rc:%d)\n", ret
);
530 /* Enable power block headswitch and wait for it to stabilize */
531 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
532 val
|= QDSP6v56_BHS_ON
;
533 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
534 val
|= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
537 /* Put LDO in bypass mode */
538 val
|= QDSP6v56_LDO_BYP
;
539 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
541 /* Deassert QDSP6 compiler memory clamp */
542 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
543 val
&= ~QDSP6v56_CLAMP_QMC_MEM
;
544 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
546 /* Deassert memory peripheral sleep and L2 memory standby */
547 val
|= Q6SS_L2DATA_STBY_N
| Q6SS_SLP_RET_N
;
548 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
550 /* Turn on L1, L2, ETB and JU memories 1 at a time */
551 if (qproc
->version
== MSS_MSM8996
) {
552 mem_pwr_ctl
= QDSP6SS_MEM_PWR_CTL
;
556 mem_pwr_ctl
= QDSP6V6SS_MEM_PWR_CTL
;
559 val
= readl(qproc
->reg_base
+ mem_pwr_ctl
);
560 for (; i
>= 0; i
--) {
562 writel(val
, qproc
->reg_base
+ mem_pwr_ctl
);
564 * Read back value to ensure the write is done then
565 * wait for 1us for both memory peripheral and data
568 val
|= readl(qproc
->reg_base
+ mem_pwr_ctl
);
571 /* Remove word line clamp */
572 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
573 val
&= ~QDSP6v56_CLAMP_WL
;
574 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
576 /* Assert resets, stop core */
577 val
= readl(qproc
->reg_base
+ QDSP6SS_RESET_REG
);
578 val
|= Q6SS_CORE_ARES
| Q6SS_BUS_ARES_ENABLE
| Q6SS_STOP_CORE
;
579 writel(val
, qproc
->reg_base
+ QDSP6SS_RESET_REG
);
581 /* Enable power block headswitch and wait for it to stabilize */
582 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
583 val
|= QDSS_BHS_ON
| QDSS_LDO_BYP
;
584 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
585 val
|= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
588 * Turn on memories. L2 banks should be done individually
589 * to minimize inrush current.
591 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
592 val
|= Q6SS_SLP_RET_N
| Q6SS_L2TAG_SLP_NRET_N
|
593 Q6SS_ETB_SLP_NRET_N
| Q6SS_L2DATA_STBY_N
;
594 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
595 val
|= Q6SS_L2DATA_SLP_NRET_N_2
;
596 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
597 val
|= Q6SS_L2DATA_SLP_NRET_N_1
;
598 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
599 val
|= Q6SS_L2DATA_SLP_NRET_N_0
;
600 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
602 /* Remove IO clamp */
603 val
&= ~Q6SS_CLAMP_IO
;
604 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
606 /* Bring core out of reset */
607 val
= readl(qproc
->reg_base
+ QDSP6SS_RESET_REG
);
608 val
&= ~Q6SS_CORE_ARES
;
609 writel(val
, qproc
->reg_base
+ QDSP6SS_RESET_REG
);
611 /* Turn on core clock */
612 val
= readl(qproc
->reg_base
+ QDSP6SS_GFMUX_CTL_REG
);
613 val
|= Q6SS_CLK_ENABLE
;
614 writel(val
, qproc
->reg_base
+ QDSP6SS_GFMUX_CTL_REG
);
616 /* Start core execution */
617 val
= readl(qproc
->reg_base
+ QDSP6SS_RESET_REG
);
618 val
&= ~Q6SS_STOP_CORE
;
619 writel(val
, qproc
->reg_base
+ QDSP6SS_RESET_REG
);
622 /* Wait for PBL status */
623 ret
= q6v5_rmb_pbl_wait(qproc
, 1000);
624 if (ret
== -ETIMEDOUT
) {
625 dev_err(qproc
->dev
, "PBL boot timed out\n");
626 } else if (ret
!= RMB_PBL_SUCCESS
) {
627 dev_err(qproc
->dev
, "PBL returned unexpected status %d\n", ret
);
636 static void q6v5proc_halt_axi_port(struct q6v5
*qproc
,
637 struct regmap
*halt_map
,
640 unsigned long timeout
;
644 /* Check if we're already idle */
645 ret
= regmap_read(halt_map
, offset
+ AXI_IDLE_REG
, &val
);
649 /* Assert halt request */
650 regmap_write(halt_map
, offset
+ AXI_HALTREQ_REG
, 1);
653 timeout
= jiffies
+ msecs_to_jiffies(HALT_ACK_TIMEOUT_MS
);
655 ret
= regmap_read(halt_map
, offset
+ AXI_HALTACK_REG
, &val
);
656 if (ret
|| val
|| time_after(jiffies
, timeout
))
662 ret
= regmap_read(halt_map
, offset
+ AXI_IDLE_REG
, &val
);
664 dev_err(qproc
->dev
, "port failed halt\n");
666 /* Clear halt request (port will remain halted until reset) */
667 regmap_write(halt_map
, offset
+ AXI_HALTREQ_REG
, 0);
670 static int q6v5_mpss_init_image(struct q6v5
*qproc
, const struct firmware
*fw
)
672 unsigned long dma_attrs
= DMA_ATTR_FORCE_CONTIGUOUS
;
681 metadata
= qcom_mdt_read_metadata(fw
, &size
);
682 if (IS_ERR(metadata
))
683 return PTR_ERR(metadata
);
685 ptr
= dma_alloc_attrs(qproc
->dev
, size
, &phys
, GFP_KERNEL
, dma_attrs
);
688 dev_err(qproc
->dev
, "failed to allocate mdt buffer\n");
692 memcpy(ptr
, metadata
, size
);
694 /* Hypervisor mapping to access metadata by modem */
695 mdata_perm
= BIT(QCOM_SCM_VMID_HLOS
);
696 ret
= q6v5_xfer_mem_ownership(qproc
, &mdata_perm
, true, phys
, size
);
699 "assigning Q6 access to metadata failed: %d\n", ret
);
704 writel(phys
, qproc
->rmb_base
+ RMB_PMI_META_DATA_REG
);
705 writel(RMB_CMD_META_DATA_READY
, qproc
->rmb_base
+ RMB_MBA_COMMAND_REG
);
707 ret
= q6v5_rmb_mba_wait(qproc
, RMB_MBA_META_DATA_AUTH_SUCCESS
, 1000);
708 if (ret
== -ETIMEDOUT
)
709 dev_err(qproc
->dev
, "MPSS header authentication timed out\n");
711 dev_err(qproc
->dev
, "MPSS header authentication failed: %d\n", ret
);
713 /* Metadata authentication done, remove modem access */
714 xferop_ret
= q6v5_xfer_mem_ownership(qproc
, &mdata_perm
, false, phys
, size
);
717 "mdt buffer not reclaimed system may become unstable\n");
720 dma_free_attrs(qproc
->dev
, size
, ptr
, phys
, dma_attrs
);
723 return ret
< 0 ? ret
: 0;
726 static bool q6v5_phdr_valid(const struct elf32_phdr
*phdr
)
728 if (phdr
->p_type
!= PT_LOAD
)
731 if ((phdr
->p_flags
& QCOM_MDT_TYPE_MASK
) == QCOM_MDT_TYPE_HASH
)
740 static int q6v5_mba_load(struct q6v5
*qproc
)
745 qcom_q6v5_prepare(&qproc
->q6v5
);
747 ret
= q6v5_pds_enable(qproc
, qproc
->active_pds
, qproc
->active_pd_count
);
749 dev_err(qproc
->dev
, "failed to enable active power domains\n");
753 ret
= q6v5_pds_enable(qproc
, qproc
->proxy_pds
, qproc
->proxy_pd_count
);
755 dev_err(qproc
->dev
, "failed to enable proxy power domains\n");
756 goto disable_active_pds
;
759 ret
= q6v5_regulator_enable(qproc
, qproc
->proxy_regs
,
760 qproc
->proxy_reg_count
);
762 dev_err(qproc
->dev
, "failed to enable proxy supplies\n");
763 goto disable_proxy_pds
;
766 ret
= q6v5_clk_enable(qproc
->dev
, qproc
->proxy_clks
,
767 qproc
->proxy_clk_count
);
769 dev_err(qproc
->dev
, "failed to enable proxy clocks\n");
770 goto disable_proxy_reg
;
773 ret
= q6v5_regulator_enable(qproc
, qproc
->active_regs
,
774 qproc
->active_reg_count
);
776 dev_err(qproc
->dev
, "failed to enable supplies\n");
777 goto disable_proxy_clk
;
780 ret
= q6v5_clk_enable(qproc
->dev
, qproc
->reset_clks
,
781 qproc
->reset_clk_count
);
783 dev_err(qproc
->dev
, "failed to enable reset clocks\n");
787 ret
= q6v5_reset_deassert(qproc
);
789 dev_err(qproc
->dev
, "failed to deassert mss restart\n");
790 goto disable_reset_clks
;
793 ret
= q6v5_clk_enable(qproc
->dev
, qproc
->active_clks
,
794 qproc
->active_clk_count
);
796 dev_err(qproc
->dev
, "failed to enable clocks\n");
800 /* Assign MBA image access in DDR to q6 */
801 ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mba_perm
, true,
802 qproc
->mba_phys
, qproc
->mba_size
);
805 "assigning Q6 access to mba memory failed: %d\n", ret
);
806 goto disable_active_clks
;
809 writel(qproc
->mba_phys
, qproc
->rmb_base
+ RMB_MBA_IMAGE_REG
);
811 ret
= q6v5proc_reset(qproc
);
815 ret
= q6v5_rmb_mba_wait(qproc
, 0, 5000);
816 if (ret
== -ETIMEDOUT
) {
817 dev_err(qproc
->dev
, "MBA boot timed out\n");
819 } else if (ret
!= RMB_MBA_XPU_UNLOCKED
&&
820 ret
!= RMB_MBA_XPU_UNLOCKED_SCRIBBLED
) {
821 dev_err(qproc
->dev
, "MBA returned unexpected status %d\n", ret
);
826 qproc
->dump_mba_loaded
= true;
830 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_q6
);
831 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_modem
);
832 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_nc
);
835 xfermemop_ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mba_perm
, false,
840 "Failed to reclaim mba buffer, system may become unstable\n");
844 q6v5_clk_disable(qproc
->dev
, qproc
->active_clks
,
845 qproc
->active_clk_count
);
847 q6v5_reset_assert(qproc
);
849 q6v5_clk_disable(qproc
->dev
, qproc
->reset_clks
,
850 qproc
->reset_clk_count
);
852 q6v5_regulator_disable(qproc
, qproc
->active_regs
,
853 qproc
->active_reg_count
);
855 q6v5_clk_disable(qproc
->dev
, qproc
->proxy_clks
,
856 qproc
->proxy_clk_count
);
858 q6v5_regulator_disable(qproc
, qproc
->proxy_regs
,
859 qproc
->proxy_reg_count
);
861 q6v5_pds_disable(qproc
, qproc
->proxy_pds
, qproc
->proxy_pd_count
);
863 q6v5_pds_disable(qproc
, qproc
->active_pds
, qproc
->active_pd_count
);
865 qcom_q6v5_unprepare(&qproc
->q6v5
);
870 static void q6v5_mba_reclaim(struct q6v5
*qproc
)
875 qproc
->dump_mba_loaded
= false;
877 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_q6
);
878 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_modem
);
879 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_nc
);
880 if (qproc
->version
== MSS_MSM8996
) {
882 * To avoid high MX current during LPASS/MSS restart.
884 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
885 val
|= Q6SS_CLAMP_IO
| QDSP6v56_CLAMP_WL
|
886 QDSP6v56_CLAMP_QMC_MEM
;
887 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
890 ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mpss_perm
,
891 false, qproc
->mpss_phys
,
895 q6v5_reset_assert(qproc
);
897 q6v5_clk_disable(qproc
->dev
, qproc
->reset_clks
,
898 qproc
->reset_clk_count
);
899 q6v5_clk_disable(qproc
->dev
, qproc
->active_clks
,
900 qproc
->active_clk_count
);
901 q6v5_regulator_disable(qproc
, qproc
->active_regs
,
902 qproc
->active_reg_count
);
903 q6v5_pds_disable(qproc
, qproc
->active_pds
, qproc
->active_pd_count
);
905 /* In case of failure or coredump scenario where reclaiming MBA memory
906 * could not happen reclaim it here.
908 ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mba_perm
, false,
913 ret
= qcom_q6v5_unprepare(&qproc
->q6v5
);
915 q6v5_pds_disable(qproc
, qproc
->proxy_pds
,
916 qproc
->proxy_pd_count
);
917 q6v5_clk_disable(qproc
->dev
, qproc
->proxy_clks
,
918 qproc
->proxy_clk_count
);
919 q6v5_regulator_disable(qproc
, qproc
->proxy_regs
,
920 qproc
->proxy_reg_count
);
924 static int q6v5_mpss_load(struct q6v5
*qproc
)
926 const struct elf32_phdr
*phdrs
;
927 const struct elf32_phdr
*phdr
;
928 const struct firmware
*seg_fw
;
929 const struct firmware
*fw
;
930 struct elf32_hdr
*ehdr
;
931 phys_addr_t mpss_reloc
;
932 phys_addr_t boot_addr
;
933 phys_addr_t min_addr
= PHYS_ADDR_MAX
;
934 phys_addr_t max_addr
= 0;
935 bool relocate
= false;
944 fw_name_len
= strlen(qproc
->hexagon_mdt_image
);
945 if (fw_name_len
<= 4)
948 fw_name
= kstrdup(qproc
->hexagon_mdt_image
, GFP_KERNEL
);
952 ret
= request_firmware(&fw
, fw_name
, qproc
->dev
);
954 dev_err(qproc
->dev
, "unable to load %s\n", fw_name
);
958 /* Initialize the RMB validator */
959 writel(0, qproc
->rmb_base
+ RMB_PMI_CODE_LENGTH_REG
);
961 ret
= q6v5_mpss_init_image(qproc
, fw
);
963 goto release_firmware
;
965 ehdr
= (struct elf32_hdr
*)fw
->data
;
966 phdrs
= (struct elf32_phdr
*)(ehdr
+ 1);
968 for (i
= 0; i
< ehdr
->e_phnum
; i
++) {
971 if (!q6v5_phdr_valid(phdr
))
974 if (phdr
->p_flags
& QCOM_MDT_RELOCATABLE
)
977 if (phdr
->p_paddr
< min_addr
)
978 min_addr
= phdr
->p_paddr
;
980 if (phdr
->p_paddr
+ phdr
->p_memsz
> max_addr
)
981 max_addr
= ALIGN(phdr
->p_paddr
+ phdr
->p_memsz
, SZ_4K
);
984 mpss_reloc
= relocate
? min_addr
: qproc
->mpss_phys
;
985 qproc
->mpss_reloc
= mpss_reloc
;
986 /* Load firmware segments */
987 for (i
= 0; i
< ehdr
->e_phnum
; i
++) {
990 if (!q6v5_phdr_valid(phdr
))
993 offset
= phdr
->p_paddr
- mpss_reloc
;
994 if (offset
< 0 || offset
+ phdr
->p_memsz
> qproc
->mpss_size
) {
995 dev_err(qproc
->dev
, "segment outside memory range\n");
997 goto release_firmware
;
1000 ptr
= qproc
->mpss_region
+ offset
;
1002 if (phdr
->p_filesz
&& phdr
->p_offset
< fw
->size
) {
1003 /* Firmware is large enough to be non-split */
1004 if (phdr
->p_offset
+ phdr
->p_filesz
> fw
->size
) {
1006 "failed to load segment %d from truncated file %s\n",
1009 goto release_firmware
;
1012 memcpy(ptr
, fw
->data
+ phdr
->p_offset
, phdr
->p_filesz
);
1013 } else if (phdr
->p_filesz
) {
1014 /* Replace "xxx.xxx" with "xxx.bxx" */
1015 sprintf(fw_name
+ fw_name_len
- 3, "b%02d", i
);
1016 ret
= request_firmware(&seg_fw
, fw_name
, qproc
->dev
);
1018 dev_err(qproc
->dev
, "failed to load %s\n", fw_name
);
1019 goto release_firmware
;
1022 memcpy(ptr
, seg_fw
->data
, seg_fw
->size
);
1024 release_firmware(seg_fw
);
1027 if (phdr
->p_memsz
> phdr
->p_filesz
) {
1028 memset(ptr
+ phdr
->p_filesz
, 0,
1029 phdr
->p_memsz
- phdr
->p_filesz
);
1031 size
+= phdr
->p_memsz
;
1034 /* Transfer ownership of modem ddr region to q6 */
1035 ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mpss_perm
, true,
1036 qproc
->mpss_phys
, qproc
->mpss_size
);
1039 "assigning Q6 access to mpss memory failed: %d\n", ret
);
1041 goto release_firmware
;
1044 boot_addr
= relocate
? qproc
->mpss_phys
: min_addr
;
1045 writel(boot_addr
, qproc
->rmb_base
+ RMB_PMI_CODE_START_REG
);
1046 writel(RMB_CMD_LOAD_READY
, qproc
->rmb_base
+ RMB_MBA_COMMAND_REG
);
1047 writel(size
, qproc
->rmb_base
+ RMB_PMI_CODE_LENGTH_REG
);
1049 ret
= q6v5_rmb_mba_wait(qproc
, RMB_MBA_AUTH_COMPLETE
, 10000);
1050 if (ret
== -ETIMEDOUT
)
1051 dev_err(qproc
->dev
, "MPSS authentication timed out\n");
1053 dev_err(qproc
->dev
, "MPSS authentication failed: %d\n", ret
);
1056 release_firmware(fw
);
1060 return ret
< 0 ? ret
: 0;
1063 static void qcom_q6v5_dump_segment(struct rproc
*rproc
,
1064 struct rproc_dump_segment
*segment
,
1068 struct q6v5
*qproc
= rproc
->priv
;
1069 unsigned long mask
= BIT((unsigned long)segment
->priv
);
1070 void *ptr
= rproc_da_to_va(rproc
, segment
->da
, segment
->size
);
1072 /* Unlock mba before copying segments */
1073 if (!qproc
->dump_mba_loaded
)
1074 ret
= q6v5_mba_load(qproc
);
1077 memset(dest
, 0xff, segment
->size
);
1079 memcpy(dest
, ptr
, segment
->size
);
1081 qproc
->dump_segment_mask
|= mask
;
1083 /* Reclaim mba after copying segments */
1084 if (qproc
->dump_segment_mask
== qproc
->dump_complete_mask
) {
1085 if (qproc
->dump_mba_loaded
)
1086 q6v5_mba_reclaim(qproc
);
1090 static int q6v5_start(struct rproc
*rproc
)
1092 struct q6v5
*qproc
= (struct q6v5
*)rproc
->priv
;
1096 ret
= q6v5_mba_load(qproc
);
1100 dev_info(qproc
->dev
, "MBA booted, loading mpss\n");
1102 ret
= q6v5_mpss_load(qproc
);
1106 ret
= qcom_q6v5_wait_for_start(&qproc
->q6v5
, msecs_to_jiffies(5000));
1107 if (ret
== -ETIMEDOUT
) {
1108 dev_err(qproc
->dev
, "start timed out\n");
1112 xfermemop_ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mba_perm
, false,
1117 "Failed to reclaim mba buffer system may become unstable\n");
1119 /* Reset Dump Segment Mask */
1120 qproc
->dump_segment_mask
= 0;
1121 qproc
->running
= true;
1126 xfermemop_ret
= q6v5_xfer_mem_ownership(qproc
, &qproc
->mpss_perm
,
1127 false, qproc
->mpss_phys
,
1129 WARN_ON(xfermemop_ret
);
1130 q6v5_mba_reclaim(qproc
);
1135 static int q6v5_stop(struct rproc
*rproc
)
1137 struct q6v5
*qproc
= (struct q6v5
*)rproc
->priv
;
1140 qproc
->running
= false;
1142 ret
= qcom_q6v5_request_stop(&qproc
->q6v5
);
1143 if (ret
== -ETIMEDOUT
)
1144 dev_err(qproc
->dev
, "timed out on wait\n");
1146 q6v5_mba_reclaim(qproc
);
1151 static void *q6v5_da_to_va(struct rproc
*rproc
, u64 da
, int len
)
1153 struct q6v5
*qproc
= rproc
->priv
;
1156 offset
= da
- qproc
->mpss_reloc
;
1157 if (offset
< 0 || offset
+ len
> qproc
->mpss_size
)
1160 return qproc
->mpss_region
+ offset
;
1163 static int qcom_q6v5_register_dump_segments(struct rproc
*rproc
,
1164 const struct firmware
*mba_fw
)
1166 const struct firmware
*fw
;
1167 const struct elf32_phdr
*phdrs
;
1168 const struct elf32_phdr
*phdr
;
1169 const struct elf32_hdr
*ehdr
;
1170 struct q6v5
*qproc
= rproc
->priv
;
1174 ret
= request_firmware(&fw
, qproc
->hexagon_mdt_image
, qproc
->dev
);
1176 dev_err(qproc
->dev
, "unable to load %s\n",
1177 qproc
->hexagon_mdt_image
);
1181 ehdr
= (struct elf32_hdr
*)fw
->data
;
1182 phdrs
= (struct elf32_phdr
*)(ehdr
+ 1);
1183 qproc
->dump_complete_mask
= 0;
1185 for (i
= 0; i
< ehdr
->e_phnum
; i
++) {
1188 if (!q6v5_phdr_valid(phdr
))
1191 ret
= rproc_coredump_add_custom_segment(rproc
, phdr
->p_paddr
,
1193 qcom_q6v5_dump_segment
,
1198 qproc
->dump_complete_mask
|= BIT(i
);
1201 release_firmware(fw
);
1205 static const struct rproc_ops q6v5_ops
= {
1206 .start
= q6v5_start
,
1208 .da_to_va
= q6v5_da_to_va
,
1209 .parse_fw
= qcom_q6v5_register_dump_segments
,
1213 static void qcom_msa_handover(struct qcom_q6v5
*q6v5
)
1215 struct q6v5
*qproc
= container_of(q6v5
, struct q6v5
, q6v5
);
1217 q6v5_clk_disable(qproc
->dev
, qproc
->proxy_clks
,
1218 qproc
->proxy_clk_count
);
1219 q6v5_regulator_disable(qproc
, qproc
->proxy_regs
,
1220 qproc
->proxy_reg_count
);
1221 q6v5_pds_disable(qproc
, qproc
->proxy_pds
, qproc
->proxy_pd_count
);
1224 static int q6v5_init_mem(struct q6v5
*qproc
, struct platform_device
*pdev
)
1226 struct of_phandle_args args
;
1227 struct resource
*res
;
1230 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "qdsp6");
1231 qproc
->reg_base
= devm_ioremap_resource(&pdev
->dev
, res
);
1232 if (IS_ERR(qproc
->reg_base
))
1233 return PTR_ERR(qproc
->reg_base
);
1235 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "rmb");
1236 qproc
->rmb_base
= devm_ioremap_resource(&pdev
->dev
, res
);
1237 if (IS_ERR(qproc
->rmb_base
))
1238 return PTR_ERR(qproc
->rmb_base
);
1240 ret
= of_parse_phandle_with_fixed_args(pdev
->dev
.of_node
,
1241 "qcom,halt-regs", 3, 0, &args
);
1243 dev_err(&pdev
->dev
, "failed to parse qcom,halt-regs\n");
1247 qproc
->halt_map
= syscon_node_to_regmap(args
.np
);
1248 of_node_put(args
.np
);
1249 if (IS_ERR(qproc
->halt_map
))
1250 return PTR_ERR(qproc
->halt_map
);
1252 qproc
->halt_q6
= args
.args
[0];
1253 qproc
->halt_modem
= args
.args
[1];
1254 qproc
->halt_nc
= args
.args
[2];
1259 static int q6v5_init_clocks(struct device
*dev
, struct clk
**clks
,
1267 for (i
= 0; clk_names
[i
]; i
++) {
1268 clks
[i
] = devm_clk_get(dev
, clk_names
[i
]);
1269 if (IS_ERR(clks
[i
])) {
1270 int rc
= PTR_ERR(clks
[i
]);
1272 if (rc
!= -EPROBE_DEFER
)
1273 dev_err(dev
, "Failed to get %s clock\n",
1282 static int q6v5_pds_attach(struct device
*dev
, struct device
**devs
,
1292 while (pd_names
[num_pds
])
1295 for (i
= 0; i
< num_pds
; i
++) {
1296 devs
[i
] = dev_pm_domain_attach_by_name(dev
, pd_names
[i
]);
1297 if (IS_ERR_OR_NULL(devs
[i
])) {
1298 ret
= PTR_ERR(devs
[i
]) ? : -ENODATA
;
1306 for (i
--; i
>= 0; i
--)
1307 dev_pm_domain_detach(devs
[i
], false);
1312 static void q6v5_pds_detach(struct q6v5
*qproc
, struct device
**pds
,
1317 for (i
= 0; i
< pd_count
; i
++)
1318 dev_pm_domain_detach(pds
[i
], false);
1321 static int q6v5_init_reset(struct q6v5
*qproc
)
1323 qproc
->mss_restart
= devm_reset_control_get_exclusive(qproc
->dev
,
1325 if (IS_ERR(qproc
->mss_restart
)) {
1326 dev_err(qproc
->dev
, "failed to acquire mss restart\n");
1327 return PTR_ERR(qproc
->mss_restart
);
1330 if (qproc
->has_alt_reset
) {
1331 qproc
->pdc_reset
= devm_reset_control_get_exclusive(qproc
->dev
,
1333 if (IS_ERR(qproc
->pdc_reset
)) {
1334 dev_err(qproc
->dev
, "failed to acquire pdc reset\n");
1335 return PTR_ERR(qproc
->pdc_reset
);
1342 static int q6v5_alloc_memory_region(struct q6v5
*qproc
)
1344 struct device_node
*child
;
1345 struct device_node
*node
;
1349 child
= of_get_child_by_name(qproc
->dev
->of_node
, "mba");
1350 node
= of_parse_phandle(child
, "memory-region", 0);
1351 ret
= of_address_to_resource(node
, 0, &r
);
1353 dev_err(qproc
->dev
, "unable to resolve mba region\n");
1358 qproc
->mba_phys
= r
.start
;
1359 qproc
->mba_size
= resource_size(&r
);
1360 qproc
->mba_region
= devm_ioremap_wc(qproc
->dev
, qproc
->mba_phys
, qproc
->mba_size
);
1361 if (!qproc
->mba_region
) {
1362 dev_err(qproc
->dev
, "unable to map memory region: %pa+%zx\n",
1363 &r
.start
, qproc
->mba_size
);
1367 child
= of_get_child_by_name(qproc
->dev
->of_node
, "mpss");
1368 node
= of_parse_phandle(child
, "memory-region", 0);
1369 ret
= of_address_to_resource(node
, 0, &r
);
1371 dev_err(qproc
->dev
, "unable to resolve mpss region\n");
1376 qproc
->mpss_phys
= qproc
->mpss_reloc
= r
.start
;
1377 qproc
->mpss_size
= resource_size(&r
);
1378 qproc
->mpss_region
= devm_ioremap_wc(qproc
->dev
, qproc
->mpss_phys
, qproc
->mpss_size
);
1379 if (!qproc
->mpss_region
) {
1380 dev_err(qproc
->dev
, "unable to map memory region: %pa+%zx\n",
1381 &r
.start
, qproc
->mpss_size
);
1388 static int q6v5_probe(struct platform_device
*pdev
)
1390 const struct rproc_hexagon_res
*desc
;
1392 struct rproc
*rproc
;
1393 const char *mba_image
;
1396 desc
= of_device_get_match_data(&pdev
->dev
);
1400 if (desc
->need_mem_protection
&& !qcom_scm_is_available())
1401 return -EPROBE_DEFER
;
1403 mba_image
= desc
->hexagon_mba_image
;
1404 ret
= of_property_read_string_index(pdev
->dev
.of_node
, "firmware-name",
1406 if (ret
< 0 && ret
!= -EINVAL
)
1409 rproc
= rproc_alloc(&pdev
->dev
, pdev
->name
, &q6v5_ops
,
1410 mba_image
, sizeof(*qproc
));
1412 dev_err(&pdev
->dev
, "failed to allocate rproc\n");
1416 rproc
->auto_boot
= false;
1418 qproc
= (struct q6v5
*)rproc
->priv
;
1419 qproc
->dev
= &pdev
->dev
;
1420 qproc
->rproc
= rproc
;
1421 qproc
->hexagon_mdt_image
= "modem.mdt";
1422 ret
= of_property_read_string_index(pdev
->dev
.of_node
, "firmware-name",
1423 1, &qproc
->hexagon_mdt_image
);
1424 if (ret
< 0 && ret
!= -EINVAL
)
1427 platform_set_drvdata(pdev
, qproc
);
1429 ret
= q6v5_init_mem(qproc
, pdev
);
1433 ret
= q6v5_alloc_memory_region(qproc
);
1437 ret
= q6v5_init_clocks(&pdev
->dev
, qproc
->proxy_clks
,
1438 desc
->proxy_clk_names
);
1440 dev_err(&pdev
->dev
, "Failed to get proxy clocks.\n");
1443 qproc
->proxy_clk_count
= ret
;
1445 ret
= q6v5_init_clocks(&pdev
->dev
, qproc
->reset_clks
,
1446 desc
->reset_clk_names
);
1448 dev_err(&pdev
->dev
, "Failed to get reset clocks.\n");
1451 qproc
->reset_clk_count
= ret
;
1453 ret
= q6v5_init_clocks(&pdev
->dev
, qproc
->active_clks
,
1454 desc
->active_clk_names
);
1456 dev_err(&pdev
->dev
, "Failed to get active clocks.\n");
1459 qproc
->active_clk_count
= ret
;
1461 ret
= q6v5_regulator_init(&pdev
->dev
, qproc
->proxy_regs
,
1462 desc
->proxy_supply
);
1464 dev_err(&pdev
->dev
, "Failed to get proxy regulators.\n");
1467 qproc
->proxy_reg_count
= ret
;
1469 ret
= q6v5_regulator_init(&pdev
->dev
, qproc
->active_regs
,
1470 desc
->active_supply
);
1472 dev_err(&pdev
->dev
, "Failed to get active regulators.\n");
1475 qproc
->active_reg_count
= ret
;
1477 ret
= q6v5_pds_attach(&pdev
->dev
, qproc
->active_pds
,
1478 desc
->active_pd_names
);
1480 dev_err(&pdev
->dev
, "Failed to attach active power domains\n");
1483 qproc
->active_pd_count
= ret
;
1485 ret
= q6v5_pds_attach(&pdev
->dev
, qproc
->proxy_pds
,
1486 desc
->proxy_pd_names
);
1488 dev_err(&pdev
->dev
, "Failed to init power domains\n");
1489 goto detach_active_pds
;
1491 qproc
->proxy_pd_count
= ret
;
1493 qproc
->has_alt_reset
= desc
->has_alt_reset
;
1494 ret
= q6v5_init_reset(qproc
);
1496 goto detach_proxy_pds
;
1498 qproc
->version
= desc
->version
;
1499 qproc
->need_mem_protection
= desc
->need_mem_protection
;
1501 ret
= qcom_q6v5_init(&qproc
->q6v5
, pdev
, rproc
, MPSS_CRASH_REASON_SMEM
,
1504 goto detach_proxy_pds
;
1506 qproc
->mpss_perm
= BIT(QCOM_SCM_VMID_HLOS
);
1507 qproc
->mba_perm
= BIT(QCOM_SCM_VMID_HLOS
);
1508 qcom_add_glink_subdev(rproc
, &qproc
->glink_subdev
);
1509 qcom_add_smd_subdev(rproc
, &qproc
->smd_subdev
);
1510 qcom_add_ssr_subdev(rproc
, &qproc
->ssr_subdev
, "mpss");
1511 qproc
->sysmon
= qcom_add_sysmon_subdev(rproc
, "modem", 0x12);
1512 if (IS_ERR(qproc
->sysmon
)) {
1513 ret
= PTR_ERR(qproc
->sysmon
);
1514 goto detach_proxy_pds
;
1517 ret
= rproc_add(rproc
);
1519 goto detach_proxy_pds
;
1524 q6v5_pds_detach(qproc
, qproc
->proxy_pds
, qproc
->proxy_pd_count
);
1526 q6v5_pds_detach(qproc
, qproc
->active_pds
, qproc
->active_pd_count
);
1533 static int q6v5_remove(struct platform_device
*pdev
)
1535 struct q6v5
*qproc
= platform_get_drvdata(pdev
);
1537 rproc_del(qproc
->rproc
);
1539 qcom_remove_sysmon_subdev(qproc
->sysmon
);
1540 qcom_remove_glink_subdev(qproc
->rproc
, &qproc
->glink_subdev
);
1541 qcom_remove_smd_subdev(qproc
->rproc
, &qproc
->smd_subdev
);
1542 qcom_remove_ssr_subdev(qproc
->rproc
, &qproc
->ssr_subdev
);
1544 q6v5_pds_detach(qproc
, qproc
->active_pds
, qproc
->active_pd_count
);
1545 q6v5_pds_detach(qproc
, qproc
->proxy_pds
, qproc
->proxy_pd_count
);
1547 rproc_free(qproc
->rproc
);
1552 static const struct rproc_hexagon_res sdm845_mss
= {
1553 .hexagon_mba_image
= "mba.mbn",
1554 .proxy_clk_names
= (char*[]){
1559 .reset_clk_names
= (char*[]){
1564 .active_clk_names
= (char*[]){
1571 .active_pd_names
= (char*[]){
1575 .proxy_pd_names
= (char*[]){
1581 .need_mem_protection
= true,
1582 .has_alt_reset
= true,
1583 .version
= MSS_SDM845
,
1586 static const struct rproc_hexagon_res msm8998_mss
= {
1587 .hexagon_mba_image
= "mba.mbn",
1588 .proxy_clk_names
= (char*[]){
1594 .active_clk_names
= (char*[]){
1603 .proxy_pd_names
= (char*[]){
1608 .need_mem_protection
= true,
1609 .has_alt_reset
= false,
1610 .version
= MSS_MSM8998
,
1613 static const struct rproc_hexagon_res msm8996_mss
= {
1614 .hexagon_mba_image
= "mba.mbn",
1615 .proxy_supply
= (struct qcom_mss_reg_res
[]) {
1622 .proxy_clk_names
= (char*[]){
1628 .active_clk_names
= (char*[]){
1637 .need_mem_protection
= true,
1638 .has_alt_reset
= false,
1639 .version
= MSS_MSM8996
,
1642 static const struct rproc_hexagon_res msm8916_mss
= {
1643 .hexagon_mba_image
= "mba.mbn",
1644 .proxy_supply
= (struct qcom_mss_reg_res
[]) {
1659 .proxy_clk_names
= (char*[]){
1663 .active_clk_names
= (char*[]){
1669 .need_mem_protection
= false,
1670 .has_alt_reset
= false,
1671 .version
= MSS_MSM8916
,
1674 static const struct rproc_hexagon_res msm8974_mss
= {
1675 .hexagon_mba_image
= "mba.b00",
1676 .proxy_supply
= (struct qcom_mss_reg_res
[]) {
1691 .active_supply
= (struct qcom_mss_reg_res
[]) {
1699 .proxy_clk_names
= (char*[]){
1703 .active_clk_names
= (char*[]){
1709 .need_mem_protection
= false,
1710 .has_alt_reset
= false,
1711 .version
= MSS_MSM8974
,
1714 static const struct of_device_id q6v5_of_match
[] = {
1715 { .compatible
= "qcom,q6v5-pil", .data
= &msm8916_mss
},
1716 { .compatible
= "qcom,msm8916-mss-pil", .data
= &msm8916_mss
},
1717 { .compatible
= "qcom,msm8974-mss-pil", .data
= &msm8974_mss
},
1718 { .compatible
= "qcom,msm8996-mss-pil", .data
= &msm8996_mss
},
1719 { .compatible
= "qcom,msm8998-mss-pil", .data
= &msm8998_mss
},
1720 { .compatible
= "qcom,sdm845-mss-pil", .data
= &sdm845_mss
},
1723 MODULE_DEVICE_TABLE(of
, q6v5_of_match
);
1725 static struct platform_driver q6v5_driver
= {
1726 .probe
= q6v5_probe
,
1727 .remove
= q6v5_remove
,
1729 .name
= "qcom-q6v5-mss",
1730 .of_match_table
= q6v5_of_match
,
1733 module_platform_driver(q6v5_driver
);
1735 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
1736 MODULE_LICENSE("GPL v2");