1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2015 Linaro Ltd.
5 #include <linux/platform_device.h>
6 #include <linux/init.h>
7 #include <linux/cpumask.h>
8 #include <linux/export.h>
9 #include <linux/dma-direct.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/qcom_scm.h>
15 #include <linux/of_address.h>
16 #include <linux/of_platform.h>
17 #include <linux/clk.h>
18 #include <linux/reset-controller.h>
19 #include <linux/arm-smccc.h>
23 static bool download_mode
= IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT
);
24 module_param(download_mode
, bool, 0);
26 #define SCM_HAS_CORE_CLK BIT(0)
27 #define SCM_HAS_IFACE_CLK BIT(1)
28 #define SCM_HAS_BUS_CLK BIT(2)
33 struct clk
*iface_clk
;
35 struct reset_controller_dev reset
;
40 struct qcom_scm_current_perm_info
{
48 struct qcom_scm_mem_map_info
{
53 #define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
54 #define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
55 #define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
56 #define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20
58 #define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04
59 #define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02
60 #define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
61 #define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
63 struct qcom_scm_wb_entry
{
68 static struct qcom_scm_wb_entry qcom_scm_wb
[] = {
69 { .flag
= QCOM_SCM_FLAG_WARMBOOT_CPU0
},
70 { .flag
= QCOM_SCM_FLAG_WARMBOOT_CPU1
},
71 { .flag
= QCOM_SCM_FLAG_WARMBOOT_CPU2
},
72 { .flag
= QCOM_SCM_FLAG_WARMBOOT_CPU3
},
75 static const char *qcom_scm_convention_names
[] = {
76 [SMC_CONVENTION_UNKNOWN
] = "unknown",
77 [SMC_CONVENTION_ARM_32
] = "smc arm 32",
78 [SMC_CONVENTION_ARM_64
] = "smc arm 64",
79 [SMC_CONVENTION_LEGACY
] = "smc legacy",
82 static struct qcom_scm
*__scm
;
84 static int qcom_scm_clk_enable(void)
88 ret
= clk_prepare_enable(__scm
->core_clk
);
92 ret
= clk_prepare_enable(__scm
->iface_clk
);
96 ret
= clk_prepare_enable(__scm
->bus_clk
);
103 clk_disable_unprepare(__scm
->iface_clk
);
105 clk_disable_unprepare(__scm
->core_clk
);
110 static void qcom_scm_clk_disable(void)
112 clk_disable_unprepare(__scm
->core_clk
);
113 clk_disable_unprepare(__scm
->iface_clk
);
114 clk_disable_unprepare(__scm
->bus_clk
);
117 static int __qcom_scm_is_call_available(struct device
*dev
, u32 svc_id
,
120 enum qcom_scm_convention qcom_scm_convention
;
121 static bool has_queried __read_mostly
;
122 static DEFINE_SPINLOCK(query_lock
);
124 static void __query_convention(void)
127 struct qcom_scm_desc desc
= {
128 .svc
= QCOM_SCM_SVC_INFO
,
129 .cmd
= QCOM_SCM_INFO_IS_CALL_AVAIL
,
130 .args
[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO
,
131 QCOM_SCM_INFO_IS_CALL_AVAIL
) |
132 (ARM_SMCCC_OWNER_SIP
<< ARM_SMCCC_OWNER_SHIFT
),
133 .arginfo
= QCOM_SCM_ARGS(1),
134 .owner
= ARM_SMCCC_OWNER_SIP
,
136 struct qcom_scm_res res
;
139 spin_lock_irqsave(&query_lock
, flags
);
143 qcom_scm_convention
= SMC_CONVENTION_ARM_64
;
144 // Device isn't required as there is only one argument - no device
145 // needed to dma_map_single to secure world
146 ret
= scm_smc_call(NULL
, &desc
, &res
, true);
147 if (!ret
&& res
.result
[0] == 1)
150 qcom_scm_convention
= SMC_CONVENTION_ARM_32
;
151 ret
= scm_smc_call(NULL
, &desc
, &res
, true);
152 if (!ret
&& res
.result
[0] == 1)
155 qcom_scm_convention
= SMC_CONVENTION_LEGACY
;
158 spin_unlock_irqrestore(&query_lock
, flags
);
159 pr_info("qcom_scm: convention: %s\n",
160 qcom_scm_convention_names
[qcom_scm_convention
]);
163 static inline enum qcom_scm_convention
__get_convention(void)
165 if (unlikely(!has_queried
))
166 __query_convention();
167 return qcom_scm_convention
;
171 * qcom_scm_call() - Invoke a syscall in the secure world
173 * @svc_id: service identifier
174 * @cmd_id: command identifier
175 * @desc: Descriptor structure containing arguments and return values
177 * Sends a command to the SCM and waits for the command to finish processing.
178 * This should *only* be called in pre-emptible context.
180 static int qcom_scm_call(struct device
*dev
, const struct qcom_scm_desc
*desc
,
181 struct qcom_scm_res
*res
)
184 switch (__get_convention()) {
185 case SMC_CONVENTION_ARM_32
:
186 case SMC_CONVENTION_ARM_64
:
187 return scm_smc_call(dev
, desc
, res
, false);
188 case SMC_CONVENTION_LEGACY
:
189 return scm_legacy_call(dev
, desc
, res
);
191 pr_err("Unknown current SCM calling convention.\n");
197 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
199 * @svc_id: service identifier
200 * @cmd_id: command identifier
201 * @desc: Descriptor structure containing arguments and return values
202 * @res: Structure containing results from SMC/HVC call
204 * Sends a command to the SCM and waits for the command to finish processing.
205 * This can be called in atomic context.
207 static int qcom_scm_call_atomic(struct device
*dev
,
208 const struct qcom_scm_desc
*desc
,
209 struct qcom_scm_res
*res
)
211 switch (__get_convention()) {
212 case SMC_CONVENTION_ARM_32
:
213 case SMC_CONVENTION_ARM_64
:
214 return scm_smc_call(dev
, desc
, res
, true);
215 case SMC_CONVENTION_LEGACY
:
216 return scm_legacy_call_atomic(dev
, desc
, res
);
218 pr_err("Unknown current SCM calling convention.\n");
223 static int __qcom_scm_is_call_available(struct device
*dev
, u32 svc_id
,
227 struct qcom_scm_desc desc
= {
228 .svc
= QCOM_SCM_SVC_INFO
,
229 .cmd
= QCOM_SCM_INFO_IS_CALL_AVAIL
,
230 .owner
= ARM_SMCCC_OWNER_SIP
,
232 struct qcom_scm_res res
;
234 desc
.arginfo
= QCOM_SCM_ARGS(1);
235 switch (__get_convention()) {
236 case SMC_CONVENTION_ARM_32
:
237 case SMC_CONVENTION_ARM_64
:
238 desc
.args
[0] = SCM_SMC_FNID(svc_id
, cmd_id
) |
239 (ARM_SMCCC_OWNER_SIP
<< ARM_SMCCC_OWNER_SHIFT
);
241 case SMC_CONVENTION_LEGACY
:
242 desc
.args
[0] = SCM_LEGACY_FNID(svc_id
, cmd_id
);
245 pr_err("Unknown SMC convention being used\n");
249 ret
= qcom_scm_call(dev
, &desc
, &res
);
251 return ret
? : res
.result
[0];
255 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
256 * @entry: Entry point function for the cpus
257 * @cpus: The cpumask of cpus that will use the entry point
259 * Set the Linux entry point for the SCM to transfer control to when coming
260 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
262 int qcom_scm_set_warm_boot_addr(void *entry
, const cpumask_t
*cpus
)
267 struct qcom_scm_desc desc
= {
268 .svc
= QCOM_SCM_SVC_BOOT
,
269 .cmd
= QCOM_SCM_BOOT_SET_ADDR
,
270 .arginfo
= QCOM_SCM_ARGS(2),
274 * Reassign only if we are switching from hotplug entry point
275 * to cpuidle entry point or vice versa.
277 for_each_cpu(cpu
, cpus
) {
278 if (entry
== qcom_scm_wb
[cpu
].entry
)
280 flags
|= qcom_scm_wb
[cpu
].flag
;
283 /* No change in entry function */
287 desc
.args
[0] = flags
;
288 desc
.args
[1] = virt_to_phys(entry
);
290 ret
= qcom_scm_call(__scm
->dev
, &desc
, NULL
);
292 for_each_cpu(cpu
, cpus
)
293 qcom_scm_wb
[cpu
].entry
= entry
;
298 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr
);
301 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
302 * @entry: Entry point function for the cpus
303 * @cpus: The cpumask of cpus that will use the entry point
305 * Set the cold boot address of the cpus. Any cpu outside the supported
306 * range would be removed from the cpu present mask.
308 int qcom_scm_set_cold_boot_addr(void *entry
, const cpumask_t
*cpus
)
312 int scm_cb_flags
[] = {
313 QCOM_SCM_FLAG_COLDBOOT_CPU0
,
314 QCOM_SCM_FLAG_COLDBOOT_CPU1
,
315 QCOM_SCM_FLAG_COLDBOOT_CPU2
,
316 QCOM_SCM_FLAG_COLDBOOT_CPU3
,
318 struct qcom_scm_desc desc
= {
319 .svc
= QCOM_SCM_SVC_BOOT
,
320 .cmd
= QCOM_SCM_BOOT_SET_ADDR
,
321 .arginfo
= QCOM_SCM_ARGS(2),
322 .owner
= ARM_SMCCC_OWNER_SIP
,
325 if (!cpus
|| (cpus
&& cpumask_empty(cpus
)))
328 for_each_cpu(cpu
, cpus
) {
329 if (cpu
< ARRAY_SIZE(scm_cb_flags
))
330 flags
|= scm_cb_flags
[cpu
];
332 set_cpu_present(cpu
, false);
335 desc
.args
[0] = flags
;
336 desc
.args
[1] = virt_to_phys(entry
);
338 return qcom_scm_call_atomic(__scm
? __scm
->dev
: NULL
, &desc
, NULL
);
340 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr
);
343 * qcom_scm_cpu_power_down() - Power down the cpu
344 * @flags - Flags to flush cache
346 * This is an end point to power down cpu. If there was a pending interrupt,
347 * the control would return from this function, otherwise, the cpu jumps to the
348 * warm boot entry point set for this cpu upon reset.
350 void qcom_scm_cpu_power_down(u32 flags
)
352 struct qcom_scm_desc desc
= {
353 .svc
= QCOM_SCM_SVC_BOOT
,
354 .cmd
= QCOM_SCM_BOOT_TERMINATE_PC
,
355 .args
[0] = flags
& QCOM_SCM_FLUSH_FLAG_MASK
,
356 .arginfo
= QCOM_SCM_ARGS(1),
357 .owner
= ARM_SMCCC_OWNER_SIP
,
360 qcom_scm_call_atomic(__scm
? __scm
->dev
: NULL
, &desc
, NULL
);
362 EXPORT_SYMBOL(qcom_scm_cpu_power_down
);
364 int qcom_scm_set_remote_state(u32 state
, u32 id
)
366 struct qcom_scm_desc desc
= {
367 .svc
= QCOM_SCM_SVC_BOOT
,
368 .cmd
= QCOM_SCM_BOOT_SET_REMOTE_STATE
,
369 .arginfo
= QCOM_SCM_ARGS(2),
372 .owner
= ARM_SMCCC_OWNER_SIP
,
374 struct qcom_scm_res res
;
377 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
379 return ret
? : res
.result
[0];
381 EXPORT_SYMBOL(qcom_scm_set_remote_state
);
383 static int __qcom_scm_set_dload_mode(struct device
*dev
, bool enable
)
385 struct qcom_scm_desc desc
= {
386 .svc
= QCOM_SCM_SVC_BOOT
,
387 .cmd
= QCOM_SCM_BOOT_SET_DLOAD_MODE
,
388 .arginfo
= QCOM_SCM_ARGS(2),
389 .args
[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE
,
390 .owner
= ARM_SMCCC_OWNER_SIP
,
393 desc
.args
[1] = enable
? QCOM_SCM_BOOT_SET_DLOAD_MODE
: 0;
395 return qcom_scm_call(__scm
->dev
, &desc
, NULL
);
398 static void qcom_scm_set_download_mode(bool enable
)
403 avail
= __qcom_scm_is_call_available(__scm
->dev
,
405 QCOM_SCM_BOOT_SET_DLOAD_MODE
);
407 ret
= __qcom_scm_set_dload_mode(__scm
->dev
, enable
);
408 } else if (__scm
->dload_mode_addr
) {
409 ret
= qcom_scm_io_writel(__scm
->dload_mode_addr
,
410 enable
? QCOM_SCM_BOOT_SET_DLOAD_MODE
: 0);
413 "No available mechanism for setting download mode\n");
417 dev_err(__scm
->dev
, "failed to set download mode: %d\n", ret
);
421 * qcom_scm_pas_init_image() - Initialize peripheral authentication service
422 * state machine for a given peripheral, using the
424 * @peripheral: peripheral id
425 * @metadata: pointer to memory containing ELF header, program header table
426 * and optional blob of data used for authenticating the metadata
427 * and the rest of the firmware
428 * @size: size of the metadata
430 * Returns 0 on success.
432 int qcom_scm_pas_init_image(u32 peripheral
, const void *metadata
, size_t size
)
434 dma_addr_t mdata_phys
;
437 struct qcom_scm_desc desc
= {
438 .svc
= QCOM_SCM_SVC_PIL
,
439 .cmd
= QCOM_SCM_PIL_PAS_INIT_IMAGE
,
440 .arginfo
= QCOM_SCM_ARGS(2, QCOM_SCM_VAL
, QCOM_SCM_RW
),
441 .args
[0] = peripheral
,
442 .owner
= ARM_SMCCC_OWNER_SIP
,
444 struct qcom_scm_res res
;
447 * During the scm call memory protection will be enabled for the meta
448 * data blob, so make sure it's physically contiguous, 4K aligned and
449 * non-cachable to avoid XPU violations.
451 mdata_buf
= dma_alloc_coherent(__scm
->dev
, size
, &mdata_phys
,
454 dev_err(__scm
->dev
, "Allocation of metadata buffer failed.\n");
457 memcpy(mdata_buf
, metadata
, size
);
459 ret
= qcom_scm_clk_enable();
463 desc
.args
[1] = mdata_phys
;
465 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
467 qcom_scm_clk_disable();
470 dma_free_coherent(__scm
->dev
, size
, mdata_buf
, mdata_phys
);
472 return ret
? : res
.result
[0];
474 EXPORT_SYMBOL(qcom_scm_pas_init_image
);
477 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
478 * for firmware loading
479 * @peripheral: peripheral id
480 * @addr: start address of memory area to prepare
481 * @size: size of the memory area to prepare
483 * Returns 0 on success.
485 int qcom_scm_pas_mem_setup(u32 peripheral
, phys_addr_t addr
, phys_addr_t size
)
488 struct qcom_scm_desc desc
= {
489 .svc
= QCOM_SCM_SVC_PIL
,
490 .cmd
= QCOM_SCM_PIL_PAS_MEM_SETUP
,
491 .arginfo
= QCOM_SCM_ARGS(3),
492 .args
[0] = peripheral
,
495 .owner
= ARM_SMCCC_OWNER_SIP
,
497 struct qcom_scm_res res
;
499 ret
= qcom_scm_clk_enable();
503 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
504 qcom_scm_clk_disable();
506 return ret
? : res
.result
[0];
508 EXPORT_SYMBOL(qcom_scm_pas_mem_setup
);
511 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
512 * and reset the remote processor
513 * @peripheral: peripheral id
515 * Return 0 on success.
517 int qcom_scm_pas_auth_and_reset(u32 peripheral
)
520 struct qcom_scm_desc desc
= {
521 .svc
= QCOM_SCM_SVC_PIL
,
522 .cmd
= QCOM_SCM_PIL_PAS_AUTH_AND_RESET
,
523 .arginfo
= QCOM_SCM_ARGS(1),
524 .args
[0] = peripheral
,
525 .owner
= ARM_SMCCC_OWNER_SIP
,
527 struct qcom_scm_res res
;
529 ret
= qcom_scm_clk_enable();
533 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
534 qcom_scm_clk_disable();
536 return ret
? : res
.result
[0];
538 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset
);
541 * qcom_scm_pas_shutdown() - Shut down the remote processor
542 * @peripheral: peripheral id
544 * Returns 0 on success.
546 int qcom_scm_pas_shutdown(u32 peripheral
)
549 struct qcom_scm_desc desc
= {
550 .svc
= QCOM_SCM_SVC_PIL
,
551 .cmd
= QCOM_SCM_PIL_PAS_SHUTDOWN
,
552 .arginfo
= QCOM_SCM_ARGS(1),
553 .args
[0] = peripheral
,
554 .owner
= ARM_SMCCC_OWNER_SIP
,
556 struct qcom_scm_res res
;
558 ret
= qcom_scm_clk_enable();
562 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
564 qcom_scm_clk_disable();
566 return ret
? : res
.result
[0];
568 EXPORT_SYMBOL(qcom_scm_pas_shutdown
);
571 * qcom_scm_pas_supported() - Check if the peripheral authentication service is
572 * available for the given peripherial
573 * @peripheral: peripheral id
575 * Returns true if PAS is supported for this peripheral, otherwise false.
577 bool qcom_scm_pas_supported(u32 peripheral
)
580 struct qcom_scm_desc desc
= {
581 .svc
= QCOM_SCM_SVC_PIL
,
582 .cmd
= QCOM_SCM_PIL_PAS_IS_SUPPORTED
,
583 .arginfo
= QCOM_SCM_ARGS(1),
584 .args
[0] = peripheral
,
585 .owner
= ARM_SMCCC_OWNER_SIP
,
587 struct qcom_scm_res res
;
589 ret
= __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_PIL
,
590 QCOM_SCM_PIL_PAS_IS_SUPPORTED
);
594 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
596 return ret
? false : !!res
.result
[0];
598 EXPORT_SYMBOL(qcom_scm_pas_supported
);
600 static int __qcom_scm_pas_mss_reset(struct device
*dev
, bool reset
)
602 struct qcom_scm_desc desc
= {
603 .svc
= QCOM_SCM_SVC_PIL
,
604 .cmd
= QCOM_SCM_PIL_PAS_MSS_RESET
,
605 .arginfo
= QCOM_SCM_ARGS(2),
608 .owner
= ARM_SMCCC_OWNER_SIP
,
610 struct qcom_scm_res res
;
613 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
615 return ret
? : res
.result
[0];
618 static int qcom_scm_pas_reset_assert(struct reset_controller_dev
*rcdev
,
624 return __qcom_scm_pas_mss_reset(__scm
->dev
, 1);
627 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev
*rcdev
,
633 return __qcom_scm_pas_mss_reset(__scm
->dev
, 0);
636 static const struct reset_control_ops qcom_scm_pas_reset_ops
= {
637 .assert = qcom_scm_pas_reset_assert
,
638 .deassert
= qcom_scm_pas_reset_deassert
,
641 int qcom_scm_io_readl(phys_addr_t addr
, unsigned int *val
)
643 struct qcom_scm_desc desc
= {
644 .svc
= QCOM_SCM_SVC_IO
,
645 .cmd
= QCOM_SCM_IO_READ
,
646 .arginfo
= QCOM_SCM_ARGS(1),
648 .owner
= ARM_SMCCC_OWNER_SIP
,
650 struct qcom_scm_res res
;
654 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
656 *val
= res
.result
[0];
658 return ret
< 0 ? ret
: 0;
660 EXPORT_SYMBOL(qcom_scm_io_readl
);
662 int qcom_scm_io_writel(phys_addr_t addr
, unsigned int val
)
664 struct qcom_scm_desc desc
= {
665 .svc
= QCOM_SCM_SVC_IO
,
666 .cmd
= QCOM_SCM_IO_WRITE
,
667 .arginfo
= QCOM_SCM_ARGS(2),
670 .owner
= ARM_SMCCC_OWNER_SIP
,
674 return qcom_scm_call(__scm
->dev
, &desc
, NULL
);
676 EXPORT_SYMBOL(qcom_scm_io_writel
);
679 * qcom_scm_restore_sec_cfg_available() - Check if secure environment
680 * supports restore security config interface.
682 * Return true if restore-cfg interface is supported, false if not.
684 bool qcom_scm_restore_sec_cfg_available(void)
686 return __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_MP
,
687 QCOM_SCM_MP_RESTORE_SEC_CFG
);
689 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available
);
691 int qcom_scm_restore_sec_cfg(u32 device_id
, u32 spare
)
693 struct qcom_scm_desc desc
= {
694 .svc
= QCOM_SCM_SVC_MP
,
695 .cmd
= QCOM_SCM_MP_RESTORE_SEC_CFG
,
696 .arginfo
= QCOM_SCM_ARGS(2),
697 .args
[0] = device_id
,
699 .owner
= ARM_SMCCC_OWNER_SIP
,
701 struct qcom_scm_res res
;
704 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
706 return ret
? : res
.result
[0];
708 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg
);
710 int qcom_scm_iommu_secure_ptbl_size(u32 spare
, size_t *size
)
712 struct qcom_scm_desc desc
= {
713 .svc
= QCOM_SCM_SVC_MP
,
714 .cmd
= QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE
,
715 .arginfo
= QCOM_SCM_ARGS(1),
717 .owner
= ARM_SMCCC_OWNER_SIP
,
719 struct qcom_scm_res res
;
722 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
725 *size
= res
.result
[0];
727 return ret
? : res
.result
[1];
729 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size
);
731 int qcom_scm_iommu_secure_ptbl_init(u64 addr
, u32 size
, u32 spare
)
733 struct qcom_scm_desc desc
= {
734 .svc
= QCOM_SCM_SVC_MP
,
735 .cmd
= QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT
,
736 .arginfo
= QCOM_SCM_ARGS(3, QCOM_SCM_RW
, QCOM_SCM_VAL
,
741 .owner
= ARM_SMCCC_OWNER_SIP
,
747 desc
.args
[2] = spare
;
748 desc
.arginfo
= QCOM_SCM_ARGS(3, QCOM_SCM_RW
, QCOM_SCM_VAL
,
751 ret
= qcom_scm_call(__scm
->dev
, &desc
, NULL
);
753 /* the pg table has been initialized already, ignore the error */
759 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init
);
761 static int __qcom_scm_assign_mem(struct device
*dev
, phys_addr_t mem_region
,
762 size_t mem_sz
, phys_addr_t src
, size_t src_sz
,
763 phys_addr_t dest
, size_t dest_sz
)
766 struct qcom_scm_desc desc
= {
767 .svc
= QCOM_SCM_SVC_MP
,
768 .cmd
= QCOM_SCM_MP_ASSIGN
,
769 .arginfo
= QCOM_SCM_ARGS(7, QCOM_SCM_RO
, QCOM_SCM_VAL
,
770 QCOM_SCM_RO
, QCOM_SCM_VAL
, QCOM_SCM_RO
,
771 QCOM_SCM_VAL
, QCOM_SCM_VAL
),
772 .args
[0] = mem_region
,
779 .owner
= ARM_SMCCC_OWNER_SIP
,
781 struct qcom_scm_res res
;
783 ret
= qcom_scm_call(dev
, &desc
, &res
);
785 return ret
? : res
.result
[0];
789 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
790 * @mem_addr: mem region whose ownership need to be reassigned
791 * @mem_sz: size of the region.
792 * @srcvm: vmid for current set of owners, each set bit in
793 * flag indicate a unique owner
794 * @newvm: array having new owners and corresponding permission
796 * @dest_cnt: number of owners in next set.
798 * Return negative errno on failure or 0 on success with @srcvm updated.
800 int qcom_scm_assign_mem(phys_addr_t mem_addr
, size_t mem_sz
,
802 const struct qcom_scm_vmperm
*newvm
,
803 unsigned int dest_cnt
)
805 struct qcom_scm_current_perm_info
*destvm
;
806 struct qcom_scm_mem_map_info
*mem_to_map
;
807 phys_addr_t mem_to_map_phys
;
808 phys_addr_t dest_phys
;
809 phys_addr_t ptr_phys
;
811 size_t mem_to_map_sz
;
819 unsigned long srcvm_bits
= *srcvm
;
821 src_sz
= hweight_long(srcvm_bits
) * sizeof(*src
);
822 mem_to_map_sz
= sizeof(*mem_to_map
);
823 dest_sz
= dest_cnt
* sizeof(*destvm
);
824 ptr_sz
= ALIGN(src_sz
, SZ_64
) + ALIGN(mem_to_map_sz
, SZ_64
) +
825 ALIGN(dest_sz
, SZ_64
);
827 ptr
= dma_alloc_coherent(__scm
->dev
, ptr_sz
, &ptr_dma
, GFP_KERNEL
);
830 ptr_phys
= dma_to_phys(__scm
->dev
, ptr_dma
);
832 /* Fill source vmid detail */
835 for_each_set_bit(b
, &srcvm_bits
, BITS_PER_LONG
)
836 src
[i
++] = cpu_to_le32(b
);
838 /* Fill details of mem buff to map */
839 mem_to_map
= ptr
+ ALIGN(src_sz
, SZ_64
);
840 mem_to_map_phys
= ptr_phys
+ ALIGN(src_sz
, SZ_64
);
841 mem_to_map
->mem_addr
= cpu_to_le64(mem_addr
);
842 mem_to_map
->mem_size
= cpu_to_le64(mem_sz
);
845 /* Fill details of next vmid detail */
846 destvm
= ptr
+ ALIGN(mem_to_map_sz
, SZ_64
) + ALIGN(src_sz
, SZ_64
);
847 dest_phys
= ptr_phys
+ ALIGN(mem_to_map_sz
, SZ_64
) + ALIGN(src_sz
, SZ_64
);
848 for (i
= 0; i
< dest_cnt
; i
++, destvm
++, newvm
++) {
849 destvm
->vmid
= cpu_to_le32(newvm
->vmid
);
850 destvm
->perm
= cpu_to_le32(newvm
->perm
);
852 destvm
->ctx_size
= 0;
853 next_vm
|= BIT(newvm
->vmid
);
856 ret
= __qcom_scm_assign_mem(__scm
->dev
, mem_to_map_phys
, mem_to_map_sz
,
857 ptr_phys
, src_sz
, dest_phys
, dest_sz
);
858 dma_free_coherent(__scm
->dev
, ptr_sz
, ptr
, ptr_dma
);
861 "Assign memory protection call failed %d\n", ret
);
868 EXPORT_SYMBOL(qcom_scm_assign_mem
);
871 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
873 bool qcom_scm_ocmem_lock_available(void)
875 return __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_OCMEM
,
876 QCOM_SCM_OCMEM_LOCK_CMD
);
878 EXPORT_SYMBOL(qcom_scm_ocmem_lock_available
);
881 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
882 * region to the specified initiator
884 * @id: tz initiator id
885 * @offset: OCMEM offset
887 * @mode: access mode (WIDE/NARROW)
889 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id
, u32 offset
, u32 size
,
892 struct qcom_scm_desc desc
= {
893 .svc
= QCOM_SCM_SVC_OCMEM
,
894 .cmd
= QCOM_SCM_OCMEM_LOCK_CMD
,
899 .arginfo
= QCOM_SCM_ARGS(4),
902 return qcom_scm_call(__scm
->dev
, &desc
, NULL
);
904 EXPORT_SYMBOL(qcom_scm_ocmem_lock
);
907 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
908 * region from the specified initiator
910 * @id: tz initiator id
911 * @offset: OCMEM offset
914 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id
, u32 offset
, u32 size
)
916 struct qcom_scm_desc desc
= {
917 .svc
= QCOM_SCM_SVC_OCMEM
,
918 .cmd
= QCOM_SCM_OCMEM_UNLOCK_CMD
,
922 .arginfo
= QCOM_SCM_ARGS(3),
925 return qcom_scm_call(__scm
->dev
, &desc
, NULL
);
927 EXPORT_SYMBOL(qcom_scm_ocmem_unlock
);
930 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
932 * Return true if HDCP is supported, false if not.
934 bool qcom_scm_hdcp_available(void)
936 int ret
= qcom_scm_clk_enable();
941 ret
= __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_HDCP
,
942 QCOM_SCM_HDCP_INVOKE
);
944 qcom_scm_clk_disable();
946 return ret
> 0 ? true : false;
948 EXPORT_SYMBOL(qcom_scm_hdcp_available
);
951 * qcom_scm_hdcp_req() - Send HDCP request.
952 * @req: HDCP request array
953 * @req_cnt: HDCP request array count
954 * @resp: response buffer passed to SCM
956 * Write HDCP register(s) through SCM.
958 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req
*req
, u32 req_cnt
, u32
*resp
)
961 struct qcom_scm_desc desc
= {
962 .svc
= QCOM_SCM_SVC_HDCP
,
963 .cmd
= QCOM_SCM_HDCP_INVOKE
,
964 .arginfo
= QCOM_SCM_ARGS(10),
977 .owner
= ARM_SMCCC_OWNER_SIP
,
979 struct qcom_scm_res res
;
981 if (req_cnt
> QCOM_SCM_HDCP_MAX_REQ_CNT
)
984 ret
= qcom_scm_clk_enable();
988 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
989 *resp
= res
.result
[0];
991 qcom_scm_clk_disable();
995 EXPORT_SYMBOL(qcom_scm_hdcp_req
);
997 int qcom_scm_qsmmu500_wait_safe_toggle(bool en
)
999 struct qcom_scm_desc desc
= {
1000 .svc
= QCOM_SCM_SVC_SMMU_PROGRAM
,
1001 .cmd
= QCOM_SCM_SMMU_CONFIG_ERRATA1
,
1002 .arginfo
= QCOM_SCM_ARGS(2),
1003 .args
[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL
,
1005 .owner
= ARM_SMCCC_OWNER_SIP
,
1009 return qcom_scm_call_atomic(__scm
->dev
, &desc
, NULL
);
1011 EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle
);
1013 static int qcom_scm_find_dload_address(struct device
*dev
, u64
*addr
)
1015 struct device_node
*tcsr
;
1016 struct device_node
*np
= dev
->of_node
;
1017 struct resource res
;
1021 tcsr
= of_parse_phandle(np
, "qcom,dload-mode", 0);
1025 ret
= of_address_to_resource(tcsr
, 0, &res
);
1030 ret
= of_property_read_u32_index(np
, "qcom,dload-mode", 1, &offset
);
1034 *addr
= res
.start
+ offset
;
1040 * qcom_scm_is_available() - Checks if SCM is available
1042 bool qcom_scm_is_available(void)
1046 EXPORT_SYMBOL(qcom_scm_is_available
);
1048 static int qcom_scm_probe(struct platform_device
*pdev
)
1050 struct qcom_scm
*scm
;
1054 scm
= devm_kzalloc(&pdev
->dev
, sizeof(*scm
), GFP_KERNEL
);
1058 ret
= qcom_scm_find_dload_address(&pdev
->dev
, &scm
->dload_mode_addr
);
1062 clks
= (unsigned long)of_device_get_match_data(&pdev
->dev
);
1064 scm
->core_clk
= devm_clk_get(&pdev
->dev
, "core");
1065 if (IS_ERR(scm
->core_clk
)) {
1066 if (PTR_ERR(scm
->core_clk
) == -EPROBE_DEFER
)
1067 return PTR_ERR(scm
->core_clk
);
1069 if (clks
& SCM_HAS_CORE_CLK
) {
1070 dev_err(&pdev
->dev
, "failed to acquire core clk\n");
1071 return PTR_ERR(scm
->core_clk
);
1074 scm
->core_clk
= NULL
;
1077 scm
->iface_clk
= devm_clk_get(&pdev
->dev
, "iface");
1078 if (IS_ERR(scm
->iface_clk
)) {
1079 if (PTR_ERR(scm
->iface_clk
) == -EPROBE_DEFER
)
1080 return PTR_ERR(scm
->iface_clk
);
1082 if (clks
& SCM_HAS_IFACE_CLK
) {
1083 dev_err(&pdev
->dev
, "failed to acquire iface clk\n");
1084 return PTR_ERR(scm
->iface_clk
);
1087 scm
->iface_clk
= NULL
;
1090 scm
->bus_clk
= devm_clk_get(&pdev
->dev
, "bus");
1091 if (IS_ERR(scm
->bus_clk
)) {
1092 if (PTR_ERR(scm
->bus_clk
) == -EPROBE_DEFER
)
1093 return PTR_ERR(scm
->bus_clk
);
1095 if (clks
& SCM_HAS_BUS_CLK
) {
1096 dev_err(&pdev
->dev
, "failed to acquire bus clk\n");
1097 return PTR_ERR(scm
->bus_clk
);
1100 scm
->bus_clk
= NULL
;
1103 scm
->reset
.ops
= &qcom_scm_pas_reset_ops
;
1104 scm
->reset
.nr_resets
= 1;
1105 scm
->reset
.of_node
= pdev
->dev
.of_node
;
1106 ret
= devm_reset_controller_register(&pdev
->dev
, &scm
->reset
);
1110 /* vote for max clk rate for highest performance */
1111 ret
= clk_set_rate(scm
->core_clk
, INT_MAX
);
1116 __scm
->dev
= &pdev
->dev
;
1118 __query_convention();
1121 * If requested enable "download mode", from this point on warmboot
1122 * will cause the the boot stages to enter download mode, unless
1123 * disabled below by a clean shutdown/reboot.
1126 qcom_scm_set_download_mode(true);
1131 static void qcom_scm_shutdown(struct platform_device
*pdev
)
1133 /* Clean shutdown, disable download mode to allow normal restart */
1135 qcom_scm_set_download_mode(false);
1138 static const struct of_device_id qcom_scm_dt_match
[] = {
1139 { .compatible
= "qcom,scm-apq8064",
1140 /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
1142 { .compatible
= "qcom,scm-apq8084", .data
= (void *)(SCM_HAS_CORE_CLK
|
1146 { .compatible
= "qcom,scm-ipq4019" },
1147 { .compatible
= "qcom,scm-msm8660", .data
= (void *) SCM_HAS_CORE_CLK
},
1148 { .compatible
= "qcom,scm-msm8960", .data
= (void *) SCM_HAS_CORE_CLK
},
1149 { .compatible
= "qcom,scm-msm8916", .data
= (void *)(SCM_HAS_CORE_CLK
|
1153 { .compatible
= "qcom,scm-msm8974", .data
= (void *)(SCM_HAS_CORE_CLK
|
1157 { .compatible
= "qcom,scm-msm8996" },
1158 { .compatible
= "qcom,scm" },
1162 static struct platform_driver qcom_scm_driver
= {
1165 .of_match_table
= qcom_scm_dt_match
,
1167 .probe
= qcom_scm_probe
,
1168 .shutdown
= qcom_scm_shutdown
,
1171 static int __init
qcom_scm_init(void)
1173 return platform_driver_register(&qcom_scm_driver
);
1175 subsys_initcall(qcom_scm_init
);