1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2015 Linaro Ltd.
5 #include <linux/platform_device.h>
6 #include <linux/init.h>
7 #include <linux/cpumask.h>
8 #include <linux/export.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/qcom_scm.h>
14 #include <linux/of_address.h>
15 #include <linux/of_platform.h>
16 #include <linux/clk.h>
17 #include <linux/reset-controller.h>
18 #include <linux/arm-smccc.h>
22 static bool download_mode
= IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT
);
23 module_param(download_mode
, bool, 0);
25 #define SCM_HAS_CORE_CLK BIT(0)
26 #define SCM_HAS_IFACE_CLK BIT(1)
27 #define SCM_HAS_BUS_CLK BIT(2)
32 struct clk
*iface_clk
;
34 struct reset_controller_dev reset
;
39 struct qcom_scm_current_perm_info
{
47 struct qcom_scm_mem_map_info
{
52 #define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
53 #define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
54 #define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
55 #define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20
57 #define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04
58 #define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02
59 #define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
60 #define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
62 struct qcom_scm_wb_entry
{
67 static struct qcom_scm_wb_entry qcom_scm_wb
[] = {
68 { .flag
= QCOM_SCM_FLAG_WARMBOOT_CPU0
},
69 { .flag
= QCOM_SCM_FLAG_WARMBOOT_CPU1
},
70 { .flag
= QCOM_SCM_FLAG_WARMBOOT_CPU2
},
71 { .flag
= QCOM_SCM_FLAG_WARMBOOT_CPU3
},
74 static const char *qcom_scm_convention_names
[] = {
75 [SMC_CONVENTION_UNKNOWN
] = "unknown",
76 [SMC_CONVENTION_ARM_32
] = "smc arm 32",
77 [SMC_CONVENTION_ARM_64
] = "smc arm 64",
78 [SMC_CONVENTION_LEGACY
] = "smc legacy",
81 static struct qcom_scm
*__scm
;
83 static int qcom_scm_clk_enable(void)
87 ret
= clk_prepare_enable(__scm
->core_clk
);
91 ret
= clk_prepare_enable(__scm
->iface_clk
);
95 ret
= clk_prepare_enable(__scm
->bus_clk
);
102 clk_disable_unprepare(__scm
->iface_clk
);
104 clk_disable_unprepare(__scm
->core_clk
);
109 static void qcom_scm_clk_disable(void)
111 clk_disable_unprepare(__scm
->core_clk
);
112 clk_disable_unprepare(__scm
->iface_clk
);
113 clk_disable_unprepare(__scm
->bus_clk
);
116 static int __qcom_scm_is_call_available(struct device
*dev
, u32 svc_id
,
119 enum qcom_scm_convention qcom_scm_convention
;
120 static bool has_queried __read_mostly
;
121 static DEFINE_SPINLOCK(query_lock
);
123 static void __query_convention(void)
126 struct qcom_scm_desc desc
= {
127 .svc
= QCOM_SCM_SVC_INFO
,
128 .cmd
= QCOM_SCM_INFO_IS_CALL_AVAIL
,
129 .args
[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO
,
130 QCOM_SCM_INFO_IS_CALL_AVAIL
) |
131 (ARM_SMCCC_OWNER_SIP
<< ARM_SMCCC_OWNER_SHIFT
),
132 .arginfo
= QCOM_SCM_ARGS(1),
133 .owner
= ARM_SMCCC_OWNER_SIP
,
135 struct qcom_scm_res res
;
138 spin_lock_irqsave(&query_lock
, flags
);
142 qcom_scm_convention
= SMC_CONVENTION_ARM_64
;
143 // Device isn't required as there is only one argument - no device
144 // needed to dma_map_single to secure world
145 ret
= scm_smc_call(NULL
, &desc
, &res
, true);
146 if (!ret
&& res
.result
[0] == 1)
149 qcom_scm_convention
= SMC_CONVENTION_ARM_32
;
150 ret
= scm_smc_call(NULL
, &desc
, &res
, true);
151 if (!ret
&& res
.result
[0] == 1)
154 qcom_scm_convention
= SMC_CONVENTION_LEGACY
;
157 spin_unlock_irqrestore(&query_lock
, flags
);
158 pr_info("qcom_scm: convention: %s\n",
159 qcom_scm_convention_names
[qcom_scm_convention
]);
162 static inline enum qcom_scm_convention
__get_convention(void)
164 if (unlikely(!has_queried
))
165 __query_convention();
166 return qcom_scm_convention
;
170 * qcom_scm_call() - Invoke a syscall in the secure world
172 * @svc_id: service identifier
173 * @cmd_id: command identifier
174 * @desc: Descriptor structure containing arguments and return values
176 * Sends a command to the SCM and waits for the command to finish processing.
177 * This should *only* be called in pre-emptible context.
179 static int qcom_scm_call(struct device
*dev
, const struct qcom_scm_desc
*desc
,
180 struct qcom_scm_res
*res
)
183 switch (__get_convention()) {
184 case SMC_CONVENTION_ARM_32
:
185 case SMC_CONVENTION_ARM_64
:
186 return scm_smc_call(dev
, desc
, res
, false);
187 case SMC_CONVENTION_LEGACY
:
188 return scm_legacy_call(dev
, desc
, res
);
190 pr_err("Unknown current SCM calling convention.\n");
196 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
198 * @svc_id: service identifier
199 * @cmd_id: command identifier
200 * @desc: Descriptor structure containing arguments and return values
201 * @res: Structure containing results from SMC/HVC call
203 * Sends a command to the SCM and waits for the command to finish processing.
204 * This can be called in atomic context.
206 static int qcom_scm_call_atomic(struct device
*dev
,
207 const struct qcom_scm_desc
*desc
,
208 struct qcom_scm_res
*res
)
210 switch (__get_convention()) {
211 case SMC_CONVENTION_ARM_32
:
212 case SMC_CONVENTION_ARM_64
:
213 return scm_smc_call(dev
, desc
, res
, true);
214 case SMC_CONVENTION_LEGACY
:
215 return scm_legacy_call_atomic(dev
, desc
, res
);
217 pr_err("Unknown current SCM calling convention.\n");
222 static int __qcom_scm_is_call_available(struct device
*dev
, u32 svc_id
,
226 struct qcom_scm_desc desc
= {
227 .svc
= QCOM_SCM_SVC_INFO
,
228 .cmd
= QCOM_SCM_INFO_IS_CALL_AVAIL
,
229 .owner
= ARM_SMCCC_OWNER_SIP
,
231 struct qcom_scm_res res
;
233 desc
.arginfo
= QCOM_SCM_ARGS(1);
234 switch (__get_convention()) {
235 case SMC_CONVENTION_ARM_32
:
236 case SMC_CONVENTION_ARM_64
:
237 desc
.args
[0] = SCM_SMC_FNID(svc_id
, cmd_id
) |
238 (ARM_SMCCC_OWNER_SIP
<< ARM_SMCCC_OWNER_SHIFT
);
240 case SMC_CONVENTION_LEGACY
:
241 desc
.args
[0] = SCM_LEGACY_FNID(svc_id
, cmd_id
);
244 pr_err("Unknown SMC convention being used\n");
248 ret
= qcom_scm_call(dev
, &desc
, &res
);
250 return ret
? : res
.result
[0];
254 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
255 * @entry: Entry point function for the cpus
256 * @cpus: The cpumask of cpus that will use the entry point
258 * Set the Linux entry point for the SCM to transfer control to when coming
259 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
261 int qcom_scm_set_warm_boot_addr(void *entry
, const cpumask_t
*cpus
)
266 struct qcom_scm_desc desc
= {
267 .svc
= QCOM_SCM_SVC_BOOT
,
268 .cmd
= QCOM_SCM_BOOT_SET_ADDR
,
269 .arginfo
= QCOM_SCM_ARGS(2),
273 * Reassign only if we are switching from hotplug entry point
274 * to cpuidle entry point or vice versa.
276 for_each_cpu(cpu
, cpus
) {
277 if (entry
== qcom_scm_wb
[cpu
].entry
)
279 flags
|= qcom_scm_wb
[cpu
].flag
;
282 /* No change in entry function */
286 desc
.args
[0] = flags
;
287 desc
.args
[1] = virt_to_phys(entry
);
289 ret
= qcom_scm_call(__scm
->dev
, &desc
, NULL
);
291 for_each_cpu(cpu
, cpus
)
292 qcom_scm_wb
[cpu
].entry
= entry
;
297 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr
);
300 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
301 * @entry: Entry point function for the cpus
302 * @cpus: The cpumask of cpus that will use the entry point
304 * Set the cold boot address of the cpus. Any cpu outside the supported
305 * range would be removed from the cpu present mask.
307 int qcom_scm_set_cold_boot_addr(void *entry
, const cpumask_t
*cpus
)
311 int scm_cb_flags
[] = {
312 QCOM_SCM_FLAG_COLDBOOT_CPU0
,
313 QCOM_SCM_FLAG_COLDBOOT_CPU1
,
314 QCOM_SCM_FLAG_COLDBOOT_CPU2
,
315 QCOM_SCM_FLAG_COLDBOOT_CPU3
,
317 struct qcom_scm_desc desc
= {
318 .svc
= QCOM_SCM_SVC_BOOT
,
319 .cmd
= QCOM_SCM_BOOT_SET_ADDR
,
320 .arginfo
= QCOM_SCM_ARGS(2),
321 .owner
= ARM_SMCCC_OWNER_SIP
,
324 if (!cpus
|| (cpus
&& cpumask_empty(cpus
)))
327 for_each_cpu(cpu
, cpus
) {
328 if (cpu
< ARRAY_SIZE(scm_cb_flags
))
329 flags
|= scm_cb_flags
[cpu
];
331 set_cpu_present(cpu
, false);
334 desc
.args
[0] = flags
;
335 desc
.args
[1] = virt_to_phys(entry
);
337 return qcom_scm_call_atomic(__scm
? __scm
->dev
: NULL
, &desc
, NULL
);
339 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr
);
342 * qcom_scm_cpu_power_down() - Power down the cpu
343 * @flags - Flags to flush cache
345 * This is an end point to power down cpu. If there was a pending interrupt,
346 * the control would return from this function, otherwise, the cpu jumps to the
347 * warm boot entry point set for this cpu upon reset.
349 void qcom_scm_cpu_power_down(u32 flags
)
351 struct qcom_scm_desc desc
= {
352 .svc
= QCOM_SCM_SVC_BOOT
,
353 .cmd
= QCOM_SCM_BOOT_TERMINATE_PC
,
354 .args
[0] = flags
& QCOM_SCM_FLUSH_FLAG_MASK
,
355 .arginfo
= QCOM_SCM_ARGS(1),
356 .owner
= ARM_SMCCC_OWNER_SIP
,
359 qcom_scm_call_atomic(__scm
? __scm
->dev
: NULL
, &desc
, NULL
);
361 EXPORT_SYMBOL(qcom_scm_cpu_power_down
);
363 int qcom_scm_set_remote_state(u32 state
, u32 id
)
365 struct qcom_scm_desc desc
= {
366 .svc
= QCOM_SCM_SVC_BOOT
,
367 .cmd
= QCOM_SCM_BOOT_SET_REMOTE_STATE
,
368 .arginfo
= QCOM_SCM_ARGS(2),
371 .owner
= ARM_SMCCC_OWNER_SIP
,
373 struct qcom_scm_res res
;
376 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
378 return ret
? : res
.result
[0];
380 EXPORT_SYMBOL(qcom_scm_set_remote_state
);
382 static int __qcom_scm_set_dload_mode(struct device
*dev
, bool enable
)
384 struct qcom_scm_desc desc
= {
385 .svc
= QCOM_SCM_SVC_BOOT
,
386 .cmd
= QCOM_SCM_BOOT_SET_DLOAD_MODE
,
387 .arginfo
= QCOM_SCM_ARGS(2),
388 .args
[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE
,
389 .owner
= ARM_SMCCC_OWNER_SIP
,
392 desc
.args
[1] = enable
? QCOM_SCM_BOOT_SET_DLOAD_MODE
: 0;
394 return qcom_scm_call(__scm
->dev
, &desc
, NULL
);
397 static void qcom_scm_set_download_mode(bool enable
)
402 avail
= __qcom_scm_is_call_available(__scm
->dev
,
404 QCOM_SCM_BOOT_SET_DLOAD_MODE
);
406 ret
= __qcom_scm_set_dload_mode(__scm
->dev
, enable
);
407 } else if (__scm
->dload_mode_addr
) {
408 ret
= qcom_scm_io_writel(__scm
->dload_mode_addr
,
409 enable
? QCOM_SCM_BOOT_SET_DLOAD_MODE
: 0);
412 "No available mechanism for setting download mode\n");
416 dev_err(__scm
->dev
, "failed to set download mode: %d\n", ret
);
420 * qcom_scm_pas_init_image() - Initialize peripheral authentication service
421 * state machine for a given peripheral, using the
423 * @peripheral: peripheral id
424 * @metadata: pointer to memory containing ELF header, program header table
425 * and optional blob of data used for authenticating the metadata
426 * and the rest of the firmware
427 * @size: size of the metadata
429 * Returns 0 on success.
431 int qcom_scm_pas_init_image(u32 peripheral
, const void *metadata
, size_t size
)
433 dma_addr_t mdata_phys
;
436 struct qcom_scm_desc desc
= {
437 .svc
= QCOM_SCM_SVC_PIL
,
438 .cmd
= QCOM_SCM_PIL_PAS_INIT_IMAGE
,
439 .arginfo
= QCOM_SCM_ARGS(2, QCOM_SCM_VAL
, QCOM_SCM_RW
),
440 .args
[0] = peripheral
,
441 .owner
= ARM_SMCCC_OWNER_SIP
,
443 struct qcom_scm_res res
;
446 * During the scm call memory protection will be enabled for the meta
447 * data blob, so make sure it's physically contiguous, 4K aligned and
448 * non-cachable to avoid XPU violations.
450 mdata_buf
= dma_alloc_coherent(__scm
->dev
, size
, &mdata_phys
,
453 dev_err(__scm
->dev
, "Allocation of metadata buffer failed.\n");
456 memcpy(mdata_buf
, metadata
, size
);
458 ret
= qcom_scm_clk_enable();
462 desc
.args
[1] = mdata_phys
;
464 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
466 qcom_scm_clk_disable();
469 dma_free_coherent(__scm
->dev
, size
, mdata_buf
, mdata_phys
);
471 return ret
? : res
.result
[0];
473 EXPORT_SYMBOL(qcom_scm_pas_init_image
);
476 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
477 * for firmware loading
478 * @peripheral: peripheral id
479 * @addr: start address of memory area to prepare
480 * @size: size of the memory area to prepare
482 * Returns 0 on success.
484 int qcom_scm_pas_mem_setup(u32 peripheral
, phys_addr_t addr
, phys_addr_t size
)
487 struct qcom_scm_desc desc
= {
488 .svc
= QCOM_SCM_SVC_PIL
,
489 .cmd
= QCOM_SCM_PIL_PAS_MEM_SETUP
,
490 .arginfo
= QCOM_SCM_ARGS(3),
491 .args
[0] = peripheral
,
494 .owner
= ARM_SMCCC_OWNER_SIP
,
496 struct qcom_scm_res res
;
498 ret
= qcom_scm_clk_enable();
502 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
503 qcom_scm_clk_disable();
505 return ret
? : res
.result
[0];
507 EXPORT_SYMBOL(qcom_scm_pas_mem_setup
);
510 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
511 * and reset the remote processor
512 * @peripheral: peripheral id
514 * Return 0 on success.
516 int qcom_scm_pas_auth_and_reset(u32 peripheral
)
519 struct qcom_scm_desc desc
= {
520 .svc
= QCOM_SCM_SVC_PIL
,
521 .cmd
= QCOM_SCM_PIL_PAS_AUTH_AND_RESET
,
522 .arginfo
= QCOM_SCM_ARGS(1),
523 .args
[0] = peripheral
,
524 .owner
= ARM_SMCCC_OWNER_SIP
,
526 struct qcom_scm_res res
;
528 ret
= qcom_scm_clk_enable();
532 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
533 qcom_scm_clk_disable();
535 return ret
? : res
.result
[0];
537 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset
);
540 * qcom_scm_pas_shutdown() - Shut down the remote processor
541 * @peripheral: peripheral id
543 * Returns 0 on success.
545 int qcom_scm_pas_shutdown(u32 peripheral
)
548 struct qcom_scm_desc desc
= {
549 .svc
= QCOM_SCM_SVC_PIL
,
550 .cmd
= QCOM_SCM_PIL_PAS_SHUTDOWN
,
551 .arginfo
= QCOM_SCM_ARGS(1),
552 .args
[0] = peripheral
,
553 .owner
= ARM_SMCCC_OWNER_SIP
,
555 struct qcom_scm_res res
;
557 ret
= qcom_scm_clk_enable();
561 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
563 qcom_scm_clk_disable();
565 return ret
? : res
.result
[0];
567 EXPORT_SYMBOL(qcom_scm_pas_shutdown
);
570 * qcom_scm_pas_supported() - Check if the peripheral authentication service is
571 * available for the given peripherial
572 * @peripheral: peripheral id
574 * Returns true if PAS is supported for this peripheral, otherwise false.
576 bool qcom_scm_pas_supported(u32 peripheral
)
579 struct qcom_scm_desc desc
= {
580 .svc
= QCOM_SCM_SVC_PIL
,
581 .cmd
= QCOM_SCM_PIL_PAS_IS_SUPPORTED
,
582 .arginfo
= QCOM_SCM_ARGS(1),
583 .args
[0] = peripheral
,
584 .owner
= ARM_SMCCC_OWNER_SIP
,
586 struct qcom_scm_res res
;
588 ret
= __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_PIL
,
589 QCOM_SCM_PIL_PAS_IS_SUPPORTED
);
593 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
595 return ret
? false : !!res
.result
[0];
597 EXPORT_SYMBOL(qcom_scm_pas_supported
);
599 static int __qcom_scm_pas_mss_reset(struct device
*dev
, bool reset
)
601 struct qcom_scm_desc desc
= {
602 .svc
= QCOM_SCM_SVC_PIL
,
603 .cmd
= QCOM_SCM_PIL_PAS_MSS_RESET
,
604 .arginfo
= QCOM_SCM_ARGS(2),
607 .owner
= ARM_SMCCC_OWNER_SIP
,
609 struct qcom_scm_res res
;
612 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
614 return ret
? : res
.result
[0];
617 static int qcom_scm_pas_reset_assert(struct reset_controller_dev
*rcdev
,
623 return __qcom_scm_pas_mss_reset(__scm
->dev
, 1);
626 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev
*rcdev
,
632 return __qcom_scm_pas_mss_reset(__scm
->dev
, 0);
635 static const struct reset_control_ops qcom_scm_pas_reset_ops
= {
636 .assert = qcom_scm_pas_reset_assert
,
637 .deassert
= qcom_scm_pas_reset_deassert
,
640 int qcom_scm_io_readl(phys_addr_t addr
, unsigned int *val
)
642 struct qcom_scm_desc desc
= {
643 .svc
= QCOM_SCM_SVC_IO
,
644 .cmd
= QCOM_SCM_IO_READ
,
645 .arginfo
= QCOM_SCM_ARGS(1),
647 .owner
= ARM_SMCCC_OWNER_SIP
,
649 struct qcom_scm_res res
;
653 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
655 *val
= res
.result
[0];
657 return ret
< 0 ? ret
: 0;
659 EXPORT_SYMBOL(qcom_scm_io_readl
);
661 int qcom_scm_io_writel(phys_addr_t addr
, unsigned int val
)
663 struct qcom_scm_desc desc
= {
664 .svc
= QCOM_SCM_SVC_IO
,
665 .cmd
= QCOM_SCM_IO_WRITE
,
666 .arginfo
= QCOM_SCM_ARGS(2),
669 .owner
= ARM_SMCCC_OWNER_SIP
,
673 return qcom_scm_call(__scm
->dev
, &desc
, NULL
);
675 EXPORT_SYMBOL(qcom_scm_io_writel
);
678 * qcom_scm_restore_sec_cfg_available() - Check if secure environment
679 * supports restore security config interface.
681 * Return true if restore-cfg interface is supported, false if not.
683 bool qcom_scm_restore_sec_cfg_available(void)
685 return __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_MP
,
686 QCOM_SCM_MP_RESTORE_SEC_CFG
);
688 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available
);
690 int qcom_scm_restore_sec_cfg(u32 device_id
, u32 spare
)
692 struct qcom_scm_desc desc
= {
693 .svc
= QCOM_SCM_SVC_MP
,
694 .cmd
= QCOM_SCM_MP_RESTORE_SEC_CFG
,
695 .arginfo
= QCOM_SCM_ARGS(2),
696 .args
[0] = device_id
,
698 .owner
= ARM_SMCCC_OWNER_SIP
,
700 struct qcom_scm_res res
;
703 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
705 return ret
? : res
.result
[0];
707 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg
);
709 int qcom_scm_iommu_secure_ptbl_size(u32 spare
, size_t *size
)
711 struct qcom_scm_desc desc
= {
712 .svc
= QCOM_SCM_SVC_MP
,
713 .cmd
= QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE
,
714 .arginfo
= QCOM_SCM_ARGS(1),
716 .owner
= ARM_SMCCC_OWNER_SIP
,
718 struct qcom_scm_res res
;
721 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
724 *size
= res
.result
[0];
726 return ret
? : res
.result
[1];
728 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size
);
730 int qcom_scm_iommu_secure_ptbl_init(u64 addr
, u32 size
, u32 spare
)
732 struct qcom_scm_desc desc
= {
733 .svc
= QCOM_SCM_SVC_MP
,
734 .cmd
= QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT
,
735 .arginfo
= QCOM_SCM_ARGS(3, QCOM_SCM_RW
, QCOM_SCM_VAL
,
740 .owner
= ARM_SMCCC_OWNER_SIP
,
746 desc
.args
[2] = spare
;
747 desc
.arginfo
= QCOM_SCM_ARGS(3, QCOM_SCM_RW
, QCOM_SCM_VAL
,
750 ret
= qcom_scm_call(__scm
->dev
, &desc
, NULL
);
752 /* the pg table has been initialized already, ignore the error */
758 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init
);
760 static int __qcom_scm_assign_mem(struct device
*dev
, phys_addr_t mem_region
,
761 size_t mem_sz
, phys_addr_t src
, size_t src_sz
,
762 phys_addr_t dest
, size_t dest_sz
)
765 struct qcom_scm_desc desc
= {
766 .svc
= QCOM_SCM_SVC_MP
,
767 .cmd
= QCOM_SCM_MP_ASSIGN
,
768 .arginfo
= QCOM_SCM_ARGS(7, QCOM_SCM_RO
, QCOM_SCM_VAL
,
769 QCOM_SCM_RO
, QCOM_SCM_VAL
, QCOM_SCM_RO
,
770 QCOM_SCM_VAL
, QCOM_SCM_VAL
),
771 .args
[0] = mem_region
,
778 .owner
= ARM_SMCCC_OWNER_SIP
,
780 struct qcom_scm_res res
;
782 ret
= qcom_scm_call(dev
, &desc
, &res
);
784 return ret
? : res
.result
[0];
788 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
789 * @mem_addr: mem region whose ownership need to be reassigned
790 * @mem_sz: size of the region.
791 * @srcvm: vmid for current set of owners, each set bit in
792 * flag indicate a unique owner
793 * @newvm: array having new owners and corresponding permission
795 * @dest_cnt: number of owners in next set.
797 * Return negative errno on failure or 0 on success with @srcvm updated.
799 int qcom_scm_assign_mem(phys_addr_t mem_addr
, size_t mem_sz
,
801 const struct qcom_scm_vmperm
*newvm
,
802 unsigned int dest_cnt
)
804 struct qcom_scm_current_perm_info
*destvm
;
805 struct qcom_scm_mem_map_info
*mem_to_map
;
806 phys_addr_t mem_to_map_phys
;
807 phys_addr_t dest_phys
;
809 size_t mem_to_map_sz
;
817 unsigned long srcvm_bits
= *srcvm
;
819 src_sz
= hweight_long(srcvm_bits
) * sizeof(*src
);
820 mem_to_map_sz
= sizeof(*mem_to_map
);
821 dest_sz
= dest_cnt
* sizeof(*destvm
);
822 ptr_sz
= ALIGN(src_sz
, SZ_64
) + ALIGN(mem_to_map_sz
, SZ_64
) +
823 ALIGN(dest_sz
, SZ_64
);
825 ptr
= dma_alloc_coherent(__scm
->dev
, ptr_sz
, &ptr_phys
, GFP_KERNEL
);
829 /* Fill source vmid detail */
832 for_each_set_bit(b
, &srcvm_bits
, BITS_PER_LONG
)
833 src
[i
++] = cpu_to_le32(b
);
835 /* Fill details of mem buff to map */
836 mem_to_map
= ptr
+ ALIGN(src_sz
, SZ_64
);
837 mem_to_map_phys
= ptr_phys
+ ALIGN(src_sz
, SZ_64
);
838 mem_to_map
->mem_addr
= cpu_to_le64(mem_addr
);
839 mem_to_map
->mem_size
= cpu_to_le64(mem_sz
);
842 /* Fill details of next vmid detail */
843 destvm
= ptr
+ ALIGN(mem_to_map_sz
, SZ_64
) + ALIGN(src_sz
, SZ_64
);
844 dest_phys
= ptr_phys
+ ALIGN(mem_to_map_sz
, SZ_64
) + ALIGN(src_sz
, SZ_64
);
845 for (i
= 0; i
< dest_cnt
; i
++, destvm
++, newvm
++) {
846 destvm
->vmid
= cpu_to_le32(newvm
->vmid
);
847 destvm
->perm
= cpu_to_le32(newvm
->perm
);
849 destvm
->ctx_size
= 0;
850 next_vm
|= BIT(newvm
->vmid
);
853 ret
= __qcom_scm_assign_mem(__scm
->dev
, mem_to_map_phys
, mem_to_map_sz
,
854 ptr_phys
, src_sz
, dest_phys
, dest_sz
);
855 dma_free_coherent(__scm
->dev
, ptr_sz
, ptr
, ptr_phys
);
858 "Assign memory protection call failed %d\n", ret
);
865 EXPORT_SYMBOL(qcom_scm_assign_mem
);
868 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
870 bool qcom_scm_ocmem_lock_available(void)
872 return __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_OCMEM
,
873 QCOM_SCM_OCMEM_LOCK_CMD
);
875 EXPORT_SYMBOL(qcom_scm_ocmem_lock_available
);
878 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
879 * region to the specified initiator
881 * @id: tz initiator id
882 * @offset: OCMEM offset
884 * @mode: access mode (WIDE/NARROW)
886 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id
, u32 offset
, u32 size
,
889 struct qcom_scm_desc desc
= {
890 .svc
= QCOM_SCM_SVC_OCMEM
,
891 .cmd
= QCOM_SCM_OCMEM_LOCK_CMD
,
896 .arginfo
= QCOM_SCM_ARGS(4),
899 return qcom_scm_call(__scm
->dev
, &desc
, NULL
);
901 EXPORT_SYMBOL(qcom_scm_ocmem_lock
);
904 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
905 * region from the specified initiator
907 * @id: tz initiator id
908 * @offset: OCMEM offset
911 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id
, u32 offset
, u32 size
)
913 struct qcom_scm_desc desc
= {
914 .svc
= QCOM_SCM_SVC_OCMEM
,
915 .cmd
= QCOM_SCM_OCMEM_UNLOCK_CMD
,
919 .arginfo
= QCOM_SCM_ARGS(3),
922 return qcom_scm_call(__scm
->dev
, &desc
, NULL
);
924 EXPORT_SYMBOL(qcom_scm_ocmem_unlock
);
927 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
929 * Return true if HDCP is supported, false if not.
931 bool qcom_scm_hdcp_available(void)
933 int ret
= qcom_scm_clk_enable();
938 ret
= __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_HDCP
,
939 QCOM_SCM_HDCP_INVOKE
);
941 qcom_scm_clk_disable();
943 return ret
> 0 ? true : false;
945 EXPORT_SYMBOL(qcom_scm_hdcp_available
);
948 * qcom_scm_hdcp_req() - Send HDCP request.
949 * @req: HDCP request array
950 * @req_cnt: HDCP request array count
951 * @resp: response buffer passed to SCM
953 * Write HDCP register(s) through SCM.
955 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req
*req
, u32 req_cnt
, u32
*resp
)
958 struct qcom_scm_desc desc
= {
959 .svc
= QCOM_SCM_SVC_HDCP
,
960 .cmd
= QCOM_SCM_HDCP_INVOKE
,
961 .arginfo
= QCOM_SCM_ARGS(10),
974 .owner
= ARM_SMCCC_OWNER_SIP
,
976 struct qcom_scm_res res
;
978 if (req_cnt
> QCOM_SCM_HDCP_MAX_REQ_CNT
)
981 ret
= qcom_scm_clk_enable();
985 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
986 *resp
= res
.result
[0];
988 qcom_scm_clk_disable();
992 EXPORT_SYMBOL(qcom_scm_hdcp_req
);
994 int qcom_scm_qsmmu500_wait_safe_toggle(bool en
)
996 struct qcom_scm_desc desc
= {
997 .svc
= QCOM_SCM_SVC_SMMU_PROGRAM
,
998 .cmd
= QCOM_SCM_SMMU_CONFIG_ERRATA1
,
999 .arginfo
= QCOM_SCM_ARGS(2),
1000 .args
[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL
,
1002 .owner
= ARM_SMCCC_OWNER_SIP
,
1006 return qcom_scm_call_atomic(__scm
->dev
, &desc
, NULL
);
1008 EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle
);
1010 static int qcom_scm_find_dload_address(struct device
*dev
, u64
*addr
)
1012 struct device_node
*tcsr
;
1013 struct device_node
*np
= dev
->of_node
;
1014 struct resource res
;
1018 tcsr
= of_parse_phandle(np
, "qcom,dload-mode", 0);
1022 ret
= of_address_to_resource(tcsr
, 0, &res
);
1027 ret
= of_property_read_u32_index(np
, "qcom,dload-mode", 1, &offset
);
1031 *addr
= res
.start
+ offset
;
1037 * qcom_scm_is_available() - Checks if SCM is available
1039 bool qcom_scm_is_available(void)
1043 EXPORT_SYMBOL(qcom_scm_is_available
);
1045 static int qcom_scm_probe(struct platform_device
*pdev
)
1047 struct qcom_scm
*scm
;
1051 scm
= devm_kzalloc(&pdev
->dev
, sizeof(*scm
), GFP_KERNEL
);
1055 ret
= qcom_scm_find_dload_address(&pdev
->dev
, &scm
->dload_mode_addr
);
1059 clks
= (unsigned long)of_device_get_match_data(&pdev
->dev
);
1061 scm
->core_clk
= devm_clk_get(&pdev
->dev
, "core");
1062 if (IS_ERR(scm
->core_clk
)) {
1063 if (PTR_ERR(scm
->core_clk
) == -EPROBE_DEFER
)
1064 return PTR_ERR(scm
->core_clk
);
1066 if (clks
& SCM_HAS_CORE_CLK
) {
1067 dev_err(&pdev
->dev
, "failed to acquire core clk\n");
1068 return PTR_ERR(scm
->core_clk
);
1071 scm
->core_clk
= NULL
;
1074 scm
->iface_clk
= devm_clk_get(&pdev
->dev
, "iface");
1075 if (IS_ERR(scm
->iface_clk
)) {
1076 if (PTR_ERR(scm
->iface_clk
) == -EPROBE_DEFER
)
1077 return PTR_ERR(scm
->iface_clk
);
1079 if (clks
& SCM_HAS_IFACE_CLK
) {
1080 dev_err(&pdev
->dev
, "failed to acquire iface clk\n");
1081 return PTR_ERR(scm
->iface_clk
);
1084 scm
->iface_clk
= NULL
;
1087 scm
->bus_clk
= devm_clk_get(&pdev
->dev
, "bus");
1088 if (IS_ERR(scm
->bus_clk
)) {
1089 if (PTR_ERR(scm
->bus_clk
) == -EPROBE_DEFER
)
1090 return PTR_ERR(scm
->bus_clk
);
1092 if (clks
& SCM_HAS_BUS_CLK
) {
1093 dev_err(&pdev
->dev
, "failed to acquire bus clk\n");
1094 return PTR_ERR(scm
->bus_clk
);
1097 scm
->bus_clk
= NULL
;
1100 scm
->reset
.ops
= &qcom_scm_pas_reset_ops
;
1101 scm
->reset
.nr_resets
= 1;
1102 scm
->reset
.of_node
= pdev
->dev
.of_node
;
1103 ret
= devm_reset_controller_register(&pdev
->dev
, &scm
->reset
);
1107 /* vote for max clk rate for highest performance */
1108 ret
= clk_set_rate(scm
->core_clk
, INT_MAX
);
1113 __scm
->dev
= &pdev
->dev
;
1115 __query_convention();
1118 * If requested enable "download mode", from this point on warmboot
1119 * will cause the the boot stages to enter download mode, unless
1120 * disabled below by a clean shutdown/reboot.
1123 qcom_scm_set_download_mode(true);
1128 static void qcom_scm_shutdown(struct platform_device
*pdev
)
1130 /* Clean shutdown, disable download mode to allow normal restart */
1132 qcom_scm_set_download_mode(false);
1135 static const struct of_device_id qcom_scm_dt_match
[] = {
1136 { .compatible
= "qcom,scm-apq8064",
1137 /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
1139 { .compatible
= "qcom,scm-apq8084", .data
= (void *)(SCM_HAS_CORE_CLK
|
1143 { .compatible
= "qcom,scm-ipq4019" },
1144 { .compatible
= "qcom,scm-msm8660", .data
= (void *) SCM_HAS_CORE_CLK
},
1145 { .compatible
= "qcom,scm-msm8960", .data
= (void *) SCM_HAS_CORE_CLK
},
1146 { .compatible
= "qcom,scm-msm8916", .data
= (void *)(SCM_HAS_CORE_CLK
|
1150 { .compatible
= "qcom,scm-msm8974", .data
= (void *)(SCM_HAS_CORE_CLK
|
1154 { .compatible
= "qcom,scm-msm8996" },
1155 { .compatible
= "qcom,scm" },
1159 static struct platform_driver qcom_scm_driver
= {
1162 .of_match_table
= qcom_scm_dt_match
,
1164 .probe
= qcom_scm_probe
,
1165 .shutdown
= qcom_scm_shutdown
,
1168 static int __init
qcom_scm_init(void)
1170 return platform_driver_register(&qcom_scm_driver
);
1172 subsys_initcall(qcom_scm_init
);