2 * Qualcomm Peripheral Image Loader
4 * Copyright (C) 2016 Linaro Ltd.
5 * Copyright (C) 2014 Sony Mobile Communications AB
6 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/clk.h>
19 #include <linux/delay.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/interrupt.h>
22 #include <linux/kernel.h>
23 #include <linux/mfd/syscon.h>
24 #include <linux/module.h>
25 #include <linux/of_address.h>
26 #include <linux/platform_device.h>
27 #include <linux/regmap.h>
28 #include <linux/regulator/consumer.h>
29 #include <linux/remoteproc.h>
30 #include <linux/reset.h>
31 #include <linux/soc/qcom/smem.h>
32 #include <linux/soc/qcom/smem_state.h>
34 #include "remoteproc_internal.h"
35 #include "qcom_mdt_loader.h"
37 #include <linux/qcom_scm.h>
39 #define MBA_FIRMWARE_NAME "mba.b00"
40 #define MPSS_FIRMWARE_NAME "modem.mdt"
42 #define MPSS_CRASH_REASON_SMEM 421
44 /* RMB Status Register Values */
45 #define RMB_PBL_SUCCESS 0x1
47 #define RMB_MBA_XPU_UNLOCKED 0x1
48 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
49 #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
50 #define RMB_MBA_AUTH_COMPLETE 0x4
52 /* PBL/MBA interface registers */
53 #define RMB_MBA_IMAGE_REG 0x00
54 #define RMB_PBL_STATUS_REG 0x04
55 #define RMB_MBA_COMMAND_REG 0x08
56 #define RMB_MBA_STATUS_REG 0x0C
57 #define RMB_PMI_META_DATA_REG 0x10
58 #define RMB_PMI_CODE_START_REG 0x14
59 #define RMB_PMI_CODE_LENGTH_REG 0x18
61 #define RMB_CMD_META_DATA_READY 0x1
62 #define RMB_CMD_LOAD_READY 0x2
64 /* QDSP6SS Register Offsets */
65 #define QDSP6SS_RESET_REG 0x014
66 #define QDSP6SS_GFMUX_CTL_REG 0x020
67 #define QDSP6SS_PWR_CTL_REG 0x030
69 /* AXI Halt Register Offsets */
70 #define AXI_HALTREQ_REG 0x0
71 #define AXI_HALTACK_REG 0x4
72 #define AXI_IDLE_REG 0x8
74 #define HALT_ACK_TIMEOUT_MS 100
77 #define Q6SS_STOP_CORE BIT(0)
78 #define Q6SS_CORE_ARES BIT(1)
79 #define Q6SS_BUS_ARES_ENABLE BIT(2)
81 /* QDSP6SS_GFMUX_CTL */
82 #define Q6SS_CLK_ENABLE BIT(1)
85 #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
86 #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
87 #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
88 #define Q6SS_L2TAG_SLP_NRET_N BIT(16)
89 #define Q6SS_ETB_SLP_NRET_N BIT(17)
90 #define Q6SS_L2DATA_STBY_N BIT(18)
91 #define Q6SS_SLP_RET_N BIT(19)
92 #define Q6SS_CLAMP_IO BIT(20)
93 #define QDSS_BHS_ON BIT(21)
94 #define QDSS_LDO_BYP BIT(22)
100 void __iomem
*reg_base
;
101 void __iomem
*rmb_base
;
103 struct regmap
*halt_map
;
108 struct reset_control
*mss_restart
;
110 struct qcom_smem_state
*state
;
113 struct regulator_bulk_data supply
[4];
119 struct completion start_done
;
120 struct completion stop_done
;
123 phys_addr_t mba_phys
;
127 phys_addr_t mpss_phys
;
128 phys_addr_t mpss_reloc
;
140 static int q6v5_regulator_init(struct q6v5
*qproc
)
144 qproc
->supply
[Q6V5_SUPPLY_CX
].supply
= "cx";
145 qproc
->supply
[Q6V5_SUPPLY_MX
].supply
= "mx";
146 qproc
->supply
[Q6V5_SUPPLY_MSS
].supply
= "mss";
147 qproc
->supply
[Q6V5_SUPPLY_PLL
].supply
= "pll";
149 ret
= devm_regulator_bulk_get(qproc
->dev
,
150 ARRAY_SIZE(qproc
->supply
), qproc
->supply
);
152 dev_err(qproc
->dev
, "failed to get supplies\n");
156 regulator_set_load(qproc
->supply
[Q6V5_SUPPLY_CX
].consumer
, 100000);
157 regulator_set_load(qproc
->supply
[Q6V5_SUPPLY_MSS
].consumer
, 100000);
158 regulator_set_load(qproc
->supply
[Q6V5_SUPPLY_PLL
].consumer
, 10000);
163 static int q6v5_regulator_enable(struct q6v5
*qproc
)
165 struct regulator
*mss
= qproc
->supply
[Q6V5_SUPPLY_MSS
].consumer
;
166 struct regulator
*mx
= qproc
->supply
[Q6V5_SUPPLY_MX
].consumer
;
169 /* TODO: Q6V5_SUPPLY_CX is supposed to be set to super-turbo here */
171 ret
= regulator_set_voltage(mx
, 1050000, INT_MAX
);
175 regulator_set_voltage(mss
, 1000000, 1150000);
177 return regulator_bulk_enable(ARRAY_SIZE(qproc
->supply
), qproc
->supply
);
180 static void q6v5_regulator_disable(struct q6v5
*qproc
)
182 struct regulator
*mss
= qproc
->supply
[Q6V5_SUPPLY_MSS
].consumer
;
183 struct regulator
*mx
= qproc
->supply
[Q6V5_SUPPLY_MX
].consumer
;
185 /* TODO: Q6V5_SUPPLY_CX corner votes should be released */
187 regulator_bulk_disable(ARRAY_SIZE(qproc
->supply
), qproc
->supply
);
188 regulator_set_voltage(mx
, 0, INT_MAX
);
189 regulator_set_voltage(mss
, 0, 1150000);
192 static int q6v5_load(struct rproc
*rproc
, const struct firmware
*fw
)
194 struct q6v5
*qproc
= rproc
->priv
;
196 memcpy(qproc
->mba_region
, fw
->data
, fw
->size
);
201 static const struct rproc_fw_ops q6v5_fw_ops
= {
202 .find_rsc_table
= qcom_mdt_find_rsc_table
,
206 static int q6v5_rmb_pbl_wait(struct q6v5
*qproc
, int ms
)
208 unsigned long timeout
;
211 timeout
= jiffies
+ msecs_to_jiffies(ms
);
213 val
= readl(qproc
->rmb_base
+ RMB_PBL_STATUS_REG
);
217 if (time_after(jiffies
, timeout
))
226 static int q6v5_rmb_mba_wait(struct q6v5
*qproc
, u32 status
, int ms
)
229 unsigned long timeout
;
232 timeout
= jiffies
+ msecs_to_jiffies(ms
);
234 val
= readl(qproc
->rmb_base
+ RMB_MBA_STATUS_REG
);
240 else if (status
&& val
== status
)
243 if (time_after(jiffies
, timeout
))
252 static int q6v5proc_reset(struct q6v5
*qproc
)
257 /* Assert resets, stop core */
258 val
= readl(qproc
->reg_base
+ QDSP6SS_RESET_REG
);
259 val
|= (Q6SS_CORE_ARES
| Q6SS_BUS_ARES_ENABLE
| Q6SS_STOP_CORE
);
260 writel(val
, qproc
->reg_base
+ QDSP6SS_RESET_REG
);
262 /* Enable power block headswitch, and wait for it to stabilize */
263 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
264 val
|= QDSS_BHS_ON
| QDSS_LDO_BYP
;
265 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
269 * Turn on memories. L2 banks should be done individually
270 * to minimize inrush current.
272 val
= readl(qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
273 val
|= Q6SS_SLP_RET_N
| Q6SS_L2TAG_SLP_NRET_N
|
274 Q6SS_ETB_SLP_NRET_N
| Q6SS_L2DATA_STBY_N
;
275 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
276 val
|= Q6SS_L2DATA_SLP_NRET_N_2
;
277 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
278 val
|= Q6SS_L2DATA_SLP_NRET_N_1
;
279 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
280 val
|= Q6SS_L2DATA_SLP_NRET_N_0
;
281 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
283 /* Remove IO clamp */
284 val
&= ~Q6SS_CLAMP_IO
;
285 writel(val
, qproc
->reg_base
+ QDSP6SS_PWR_CTL_REG
);
287 /* Bring core out of reset */
288 val
= readl(qproc
->reg_base
+ QDSP6SS_RESET_REG
);
289 val
&= ~Q6SS_CORE_ARES
;
290 writel(val
, qproc
->reg_base
+ QDSP6SS_RESET_REG
);
292 /* Turn on core clock */
293 val
= readl(qproc
->reg_base
+ QDSP6SS_GFMUX_CTL_REG
);
294 val
|= Q6SS_CLK_ENABLE
;
295 writel(val
, qproc
->reg_base
+ QDSP6SS_GFMUX_CTL_REG
);
297 /* Start core execution */
298 val
= readl(qproc
->reg_base
+ QDSP6SS_RESET_REG
);
299 val
&= ~Q6SS_STOP_CORE
;
300 writel(val
, qproc
->reg_base
+ QDSP6SS_RESET_REG
);
302 /* Wait for PBL status */
303 ret
= q6v5_rmb_pbl_wait(qproc
, 1000);
304 if (ret
== -ETIMEDOUT
) {
305 dev_err(qproc
->dev
, "PBL boot timed out\n");
306 } else if (ret
!= RMB_PBL_SUCCESS
) {
307 dev_err(qproc
->dev
, "PBL returned unexpected status %d\n", ret
);
316 static void q6v5proc_halt_axi_port(struct q6v5
*qproc
,
317 struct regmap
*halt_map
,
320 unsigned long timeout
;
324 /* Check if we're already idle */
325 ret
= regmap_read(halt_map
, offset
+ AXI_IDLE_REG
, &val
);
329 /* Assert halt request */
330 regmap_write(halt_map
, offset
+ AXI_HALTREQ_REG
, 1);
333 timeout
= jiffies
+ msecs_to_jiffies(HALT_ACK_TIMEOUT_MS
);
335 ret
= regmap_read(halt_map
, offset
+ AXI_HALTACK_REG
, &val
);
336 if (ret
|| val
|| time_after(jiffies
, timeout
))
342 ret
= regmap_read(halt_map
, offset
+ AXI_IDLE_REG
, &val
);
344 dev_err(qproc
->dev
, "port failed halt\n");
346 /* Clear halt request (port will remain halted until reset) */
347 regmap_write(halt_map
, offset
+ AXI_HALTREQ_REG
, 0);
350 static int q6v5_mpss_init_image(struct q6v5
*qproc
, const struct firmware
*fw
)
352 unsigned long dma_attrs
= DMA_ATTR_FORCE_CONTIGUOUS
;
357 ptr
= dma_alloc_attrs(qproc
->dev
, fw
->size
, &phys
, GFP_KERNEL
, dma_attrs
);
359 dev_err(qproc
->dev
, "failed to allocate mdt buffer\n");
363 memcpy(ptr
, fw
->data
, fw
->size
);
365 writel(phys
, qproc
->rmb_base
+ RMB_PMI_META_DATA_REG
);
366 writel(RMB_CMD_META_DATA_READY
, qproc
->rmb_base
+ RMB_MBA_COMMAND_REG
);
368 ret
= q6v5_rmb_mba_wait(qproc
, RMB_MBA_META_DATA_AUTH_SUCCESS
, 1000);
369 if (ret
== -ETIMEDOUT
)
370 dev_err(qproc
->dev
, "MPSS header authentication timed out\n");
372 dev_err(qproc
->dev
, "MPSS header authentication failed: %d\n", ret
);
374 dma_free_attrs(qproc
->dev
, fw
->size
, ptr
, phys
, dma_attrs
);
376 return ret
< 0 ? ret
: 0;
379 static int q6v5_mpss_validate(struct q6v5
*qproc
, const struct firmware
*fw
)
381 const struct elf32_phdr
*phdrs
;
382 const struct elf32_phdr
*phdr
;
383 struct elf32_hdr
*ehdr
;
384 phys_addr_t boot_addr
;
391 ret
= qcom_mdt_parse(fw
, &fw_addr
, NULL
, &relocate
);
393 dev_err(qproc
->dev
, "failed to parse mdt header\n");
398 boot_addr
= qproc
->mpss_phys
;
402 ehdr
= (struct elf32_hdr
*)fw
->data
;
403 phdrs
= (struct elf32_phdr
*)(ehdr
+ 1);
404 for (i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++) {
407 if (phdr
->p_type
!= PT_LOAD
)
410 if ((phdr
->p_flags
& QCOM_MDT_TYPE_MASK
) == QCOM_MDT_TYPE_HASH
)
416 size
= readl(qproc
->rmb_base
+ RMB_PMI_CODE_LENGTH_REG
);
418 writel(boot_addr
, qproc
->rmb_base
+ RMB_PMI_CODE_START_REG
);
419 writel(RMB_CMD_LOAD_READY
, qproc
->rmb_base
+ RMB_MBA_COMMAND_REG
);
422 size
+= phdr
->p_memsz
;
423 writel(size
, qproc
->rmb_base
+ RMB_PMI_CODE_LENGTH_REG
);
426 ret
= q6v5_rmb_mba_wait(qproc
, RMB_MBA_AUTH_COMPLETE
, 10000);
427 if (ret
== -ETIMEDOUT
)
428 dev_err(qproc
->dev
, "MPSS authentication timed out\n");
430 dev_err(qproc
->dev
, "MPSS authentication failed: %d\n", ret
);
432 return ret
< 0 ? ret
: 0;
435 static int q6v5_mpss_load(struct q6v5
*qproc
)
437 const struct firmware
*fw
;
442 ret
= request_firmware(&fw
, MPSS_FIRMWARE_NAME
, qproc
->dev
);
444 dev_err(qproc
->dev
, "unable to load " MPSS_FIRMWARE_NAME
"\n");
448 ret
= qcom_mdt_parse(fw
, &fw_addr
, NULL
, &relocate
);
450 dev_err(qproc
->dev
, "failed to parse mdt header\n");
451 goto release_firmware
;
455 qproc
->mpss_reloc
= fw_addr
;
457 /* Initialize the RMB validator */
458 writel(0, qproc
->rmb_base
+ RMB_PMI_CODE_LENGTH_REG
);
460 ret
= q6v5_mpss_init_image(qproc
, fw
);
462 goto release_firmware
;
464 ret
= qcom_mdt_load(qproc
->rproc
, fw
, MPSS_FIRMWARE_NAME
);
466 goto release_firmware
;
468 ret
= q6v5_mpss_validate(qproc
, fw
);
471 release_firmware(fw
);
473 return ret
< 0 ? ret
: 0;
476 static int q6v5_start(struct rproc
*rproc
)
478 struct q6v5
*qproc
= (struct q6v5
*)rproc
->priv
;
481 ret
= q6v5_regulator_enable(qproc
);
483 dev_err(qproc
->dev
, "failed to enable supplies\n");
487 ret
= reset_control_deassert(qproc
->mss_restart
);
489 dev_err(qproc
->dev
, "failed to deassert mss restart\n");
493 ret
= clk_prepare_enable(qproc
->ahb_clk
);
497 ret
= clk_prepare_enable(qproc
->axi_clk
);
499 goto disable_ahb_clk
;
501 ret
= clk_prepare_enable(qproc
->rom_clk
);
503 goto disable_axi_clk
;
505 writel(qproc
->mba_phys
, qproc
->rmb_base
+ RMB_MBA_IMAGE_REG
);
507 ret
= q6v5proc_reset(qproc
);
511 ret
= q6v5_rmb_mba_wait(qproc
, 0, 5000);
512 if (ret
== -ETIMEDOUT
) {
513 dev_err(qproc
->dev
, "MBA boot timed out\n");
515 } else if (ret
!= RMB_MBA_XPU_UNLOCKED
&&
516 ret
!= RMB_MBA_XPU_UNLOCKED_SCRIBBLED
) {
517 dev_err(qproc
->dev
, "MBA returned unexpected status %d\n", ret
);
522 dev_info(qproc
->dev
, "MBA booted, loading mpss\n");
524 ret
= q6v5_mpss_load(qproc
);
528 ret
= wait_for_completion_timeout(&qproc
->start_done
,
529 msecs_to_jiffies(5000));
531 dev_err(qproc
->dev
, "start timed out\n");
536 qproc
->running
= true;
538 /* TODO: All done, release the handover resources */
543 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_q6
);
544 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_modem
);
545 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_nc
);
547 clk_disable_unprepare(qproc
->rom_clk
);
549 clk_disable_unprepare(qproc
->axi_clk
);
551 clk_disable_unprepare(qproc
->ahb_clk
);
553 reset_control_assert(qproc
->mss_restart
);
555 q6v5_regulator_disable(qproc
);
560 static int q6v5_stop(struct rproc
*rproc
)
562 struct q6v5
*qproc
= (struct q6v5
*)rproc
->priv
;
565 qproc
->running
= false;
567 qcom_smem_state_update_bits(qproc
->state
,
568 BIT(qproc
->stop_bit
), BIT(qproc
->stop_bit
));
570 ret
= wait_for_completion_timeout(&qproc
->stop_done
,
571 msecs_to_jiffies(5000));
573 dev_err(qproc
->dev
, "timed out on wait\n");
575 qcom_smem_state_update_bits(qproc
->state
, BIT(qproc
->stop_bit
), 0);
577 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_q6
);
578 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_modem
);
579 q6v5proc_halt_axi_port(qproc
, qproc
->halt_map
, qproc
->halt_nc
);
581 reset_control_assert(qproc
->mss_restart
);
582 clk_disable_unprepare(qproc
->rom_clk
);
583 clk_disable_unprepare(qproc
->axi_clk
);
584 clk_disable_unprepare(qproc
->ahb_clk
);
585 q6v5_regulator_disable(qproc
);
590 static void *q6v5_da_to_va(struct rproc
*rproc
, u64 da
, int len
)
592 struct q6v5
*qproc
= rproc
->priv
;
595 offset
= da
- qproc
->mpss_reloc
;
596 if (offset
< 0 || offset
+ len
> qproc
->mpss_size
)
599 return qproc
->mpss_region
+ offset
;
602 static const struct rproc_ops q6v5_ops
= {
605 .da_to_va
= q6v5_da_to_va
,
608 static irqreturn_t
q6v5_wdog_interrupt(int irq
, void *dev
)
610 struct q6v5
*qproc
= dev
;
614 /* Sometimes the stop triggers a watchdog rather than a stop-ack */
615 if (!qproc
->running
) {
616 complete(&qproc
->stop_done
);
620 msg
= qcom_smem_get(QCOM_SMEM_HOST_ANY
, MPSS_CRASH_REASON_SMEM
, &len
);
621 if (!IS_ERR(msg
) && len
> 0 && msg
[0])
622 dev_err(qproc
->dev
, "watchdog received: %s\n", msg
);
624 dev_err(qproc
->dev
, "watchdog without message\n");
626 rproc_report_crash(qproc
->rproc
, RPROC_WATCHDOG
);
634 static irqreturn_t
q6v5_fatal_interrupt(int irq
, void *dev
)
636 struct q6v5
*qproc
= dev
;
640 msg
= qcom_smem_get(QCOM_SMEM_HOST_ANY
, MPSS_CRASH_REASON_SMEM
, &len
);
641 if (!IS_ERR(msg
) && len
> 0 && msg
[0])
642 dev_err(qproc
->dev
, "fatal error received: %s\n", msg
);
644 dev_err(qproc
->dev
, "fatal error without message\n");
646 rproc_report_crash(qproc
->rproc
, RPROC_FATAL_ERROR
);
654 static irqreturn_t
q6v5_handover_interrupt(int irq
, void *dev
)
656 struct q6v5
*qproc
= dev
;
658 complete(&qproc
->start_done
);
662 static irqreturn_t
q6v5_stop_ack_interrupt(int irq
, void *dev
)
664 struct q6v5
*qproc
= dev
;
666 complete(&qproc
->stop_done
);
670 static int q6v5_init_mem(struct q6v5
*qproc
, struct platform_device
*pdev
)
672 struct of_phandle_args args
;
673 struct resource
*res
;
676 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "qdsp6");
677 qproc
->reg_base
= devm_ioremap_resource(&pdev
->dev
, res
);
678 if (IS_ERR(qproc
->reg_base
))
679 return PTR_ERR(qproc
->reg_base
);
681 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "rmb");
682 qproc
->rmb_base
= devm_ioremap_resource(&pdev
->dev
, res
);
683 if (IS_ERR(qproc
->rmb_base
))
684 return PTR_ERR(qproc
->rmb_base
);
686 ret
= of_parse_phandle_with_fixed_args(pdev
->dev
.of_node
,
687 "qcom,halt-regs", 3, 0, &args
);
689 dev_err(&pdev
->dev
, "failed to parse qcom,halt-regs\n");
693 qproc
->halt_map
= syscon_node_to_regmap(args
.np
);
694 of_node_put(args
.np
);
695 if (IS_ERR(qproc
->halt_map
))
696 return PTR_ERR(qproc
->halt_map
);
698 qproc
->halt_q6
= args
.args
[0];
699 qproc
->halt_modem
= args
.args
[1];
700 qproc
->halt_nc
= args
.args
[2];
705 static int q6v5_init_clocks(struct q6v5
*qproc
)
707 qproc
->ahb_clk
= devm_clk_get(qproc
->dev
, "iface");
708 if (IS_ERR(qproc
->ahb_clk
)) {
709 dev_err(qproc
->dev
, "failed to get iface clock\n");
710 return PTR_ERR(qproc
->ahb_clk
);
713 qproc
->axi_clk
= devm_clk_get(qproc
->dev
, "bus");
714 if (IS_ERR(qproc
->axi_clk
)) {
715 dev_err(qproc
->dev
, "failed to get bus clock\n");
716 return PTR_ERR(qproc
->axi_clk
);
719 qproc
->rom_clk
= devm_clk_get(qproc
->dev
, "mem");
720 if (IS_ERR(qproc
->rom_clk
)) {
721 dev_err(qproc
->dev
, "failed to get mem clock\n");
722 return PTR_ERR(qproc
->rom_clk
);
728 static int q6v5_init_reset(struct q6v5
*qproc
)
730 qproc
->mss_restart
= devm_reset_control_get(qproc
->dev
, NULL
);
731 if (IS_ERR(qproc
->mss_restart
)) {
732 dev_err(qproc
->dev
, "failed to acquire mss restart\n");
733 return PTR_ERR(qproc
->mss_restart
);
739 static int q6v5_request_irq(struct q6v5
*qproc
,
740 struct platform_device
*pdev
,
742 irq_handler_t thread_fn
)
746 ret
= platform_get_irq_byname(pdev
, name
);
748 dev_err(&pdev
->dev
, "no %s IRQ defined\n", name
);
752 ret
= devm_request_threaded_irq(&pdev
->dev
, ret
,
754 IRQF_TRIGGER_RISING
| IRQF_ONESHOT
,
757 dev_err(&pdev
->dev
, "request %s IRQ failed\n", name
);
762 static int q6v5_alloc_memory_region(struct q6v5
*qproc
)
764 struct device_node
*child
;
765 struct device_node
*node
;
769 child
= of_get_child_by_name(qproc
->dev
->of_node
, "mba");
770 node
= of_parse_phandle(child
, "memory-region", 0);
771 ret
= of_address_to_resource(node
, 0, &r
);
773 dev_err(qproc
->dev
, "unable to resolve mba region\n");
777 qproc
->mba_phys
= r
.start
;
778 qproc
->mba_size
= resource_size(&r
);
779 qproc
->mba_region
= devm_ioremap_wc(qproc
->dev
, qproc
->mba_phys
, qproc
->mba_size
);
780 if (!qproc
->mba_region
) {
781 dev_err(qproc
->dev
, "unable to map memory region: %pa+%zx\n",
782 &r
.start
, qproc
->mba_size
);
786 child
= of_get_child_by_name(qproc
->dev
->of_node
, "mpss");
787 node
= of_parse_phandle(child
, "memory-region", 0);
788 ret
= of_address_to_resource(node
, 0, &r
);
790 dev_err(qproc
->dev
, "unable to resolve mpss region\n");
794 qproc
->mpss_phys
= qproc
->mpss_reloc
= r
.start
;
795 qproc
->mpss_size
= resource_size(&r
);
796 qproc
->mpss_region
= devm_ioremap_wc(qproc
->dev
, qproc
->mpss_phys
, qproc
->mpss_size
);
797 if (!qproc
->mpss_region
) {
798 dev_err(qproc
->dev
, "unable to map memory region: %pa+%zx\n",
799 &r
.start
, qproc
->mpss_size
);
806 static int q6v5_probe(struct platform_device
*pdev
)
812 rproc
= rproc_alloc(&pdev
->dev
, pdev
->name
, &q6v5_ops
,
813 MBA_FIRMWARE_NAME
, sizeof(*qproc
));
815 dev_err(&pdev
->dev
, "failed to allocate rproc\n");
819 rproc
->fw_ops
= &q6v5_fw_ops
;
821 qproc
= (struct q6v5
*)rproc
->priv
;
822 qproc
->dev
= &pdev
->dev
;
823 qproc
->rproc
= rproc
;
824 platform_set_drvdata(pdev
, qproc
);
826 init_completion(&qproc
->start_done
);
827 init_completion(&qproc
->stop_done
);
829 ret
= q6v5_init_mem(qproc
, pdev
);
833 ret
= q6v5_alloc_memory_region(qproc
);
837 ret
= q6v5_init_clocks(qproc
);
841 ret
= q6v5_regulator_init(qproc
);
845 ret
= q6v5_init_reset(qproc
);
849 ret
= q6v5_request_irq(qproc
, pdev
, "wdog", q6v5_wdog_interrupt
);
853 ret
= q6v5_request_irq(qproc
, pdev
, "fatal", q6v5_fatal_interrupt
);
857 ret
= q6v5_request_irq(qproc
, pdev
, "handover", q6v5_handover_interrupt
);
861 ret
= q6v5_request_irq(qproc
, pdev
, "stop-ack", q6v5_stop_ack_interrupt
);
865 qproc
->state
= qcom_smem_state_get(&pdev
->dev
, "stop", &qproc
->stop_bit
);
866 if (IS_ERR(qproc
->state
)) {
867 ret
= PTR_ERR(qproc
->state
);
871 ret
= rproc_add(rproc
);
883 static int q6v5_remove(struct platform_device
*pdev
)
885 struct q6v5
*qproc
= platform_get_drvdata(pdev
);
887 rproc_del(qproc
->rproc
);
888 rproc_free(qproc
->rproc
);
893 static const struct of_device_id q6v5_of_match
[] = {
894 { .compatible
= "qcom,q6v5-pil", },
898 static struct platform_driver q6v5_driver
= {
900 .remove
= q6v5_remove
,
902 .name
= "qcom-q6v5-pil",
903 .of_match_table
= q6v5_of_match
,
906 module_platform_driver(q6v5_driver
);
908 MODULE_DESCRIPTION("Peripheral Image Loader for Hexagon");
909 MODULE_LICENSE("GPL v2");