1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2015 Linaro Ltd.
6 #include <linux/arm-smccc.h>
7 #include <linux/bitfield.h>
8 #include <linux/bits.h>
9 #include <linux/cleanup.h>
10 #include <linux/clk.h>
11 #include <linux/completion.h>
12 #include <linux/cpumask.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/export.h>
16 #include <linux/firmware/qcom/qcom_scm.h>
17 #include <linux/firmware/qcom/qcom_tzmem.h>
18 #include <linux/init.h>
19 #include <linux/interconnect.h>
20 #include <linux/interrupt.h>
21 #include <linux/kstrtox.h>
22 #include <linux/module.h>
24 #include <linux/of_address.h>
25 #include <linux/of_irq.h>
26 #include <linux/of_platform.h>
27 #include <linux/of_reserved_mem.h>
28 #include <linux/platform_device.h>
29 #include <linux/reset-controller.h>
30 #include <linux/sizes.h>
31 #include <linux/types.h>
34 #include "qcom_tzmem.h"
36 static u32 download_mode
;
41 struct clk
*iface_clk
;
43 struct icc_path
*path
;
44 struct completion waitq_comp
;
45 struct reset_controller_dev reset
;
47 /* control access to the interconnect path */
48 struct mutex scm_bw_lock
;
53 struct qcom_tzmem_pool
*mempool
;
56 struct qcom_scm_current_perm_info
{
64 struct qcom_scm_mem_map_info
{
70 * struct qcom_scm_qseecom_resp - QSEECOM SCM call response.
71 * @result: Result or status of the SCM call. See &enum qcom_scm_qseecom_result.
72 * @resp_type: Type of the response. See &enum qcom_scm_qseecom_resp_type.
73 * @data: Response data. The type of this data is given in @resp_type.
75 struct qcom_scm_qseecom_resp
{
81 enum qcom_scm_qseecom_result
{
82 QSEECOM_RESULT_SUCCESS
= 0,
83 QSEECOM_RESULT_INCOMPLETE
= 1,
84 QSEECOM_RESULT_BLOCKED_ON_LISTENER
= 2,
85 QSEECOM_RESULT_FAILURE
= 0xFFFFFFFF,
88 enum qcom_scm_qseecom_resp_type
{
89 QSEECOM_SCM_RES_APP_ID
= 0xEE01,
90 QSEECOM_SCM_RES_QSEOS_LISTENER_ID
= 0xEE02,
93 enum qcom_scm_qseecom_tz_owner
{
94 QSEECOM_TZ_OWNER_SIP
= 2,
95 QSEECOM_TZ_OWNER_TZ_APPS
= 48,
96 QSEECOM_TZ_OWNER_QSEE_OS
= 50
99 enum qcom_scm_qseecom_tz_svc
{
100 QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER
= 0,
101 QSEECOM_TZ_SVC_APP_MGR
= 1,
102 QSEECOM_TZ_SVC_INFO
= 6,
105 enum qcom_scm_qseecom_tz_cmd_app
{
106 QSEECOM_TZ_CMD_APP_SEND
= 1,
107 QSEECOM_TZ_CMD_APP_LOOKUP
= 3,
110 enum qcom_scm_qseecom_tz_cmd_info
{
111 QSEECOM_TZ_CMD_INFO_VERSION
= 3,
114 #define QSEECOM_MAX_APP_NAME_SIZE 64
115 #define SHMBRIDGE_RESULT_NOTSUPP 4
117 /* Each bit configures cold/warm boot address for one of the 4 CPUs */
118 static const u8 qcom_scm_cpu_cold_bits
[QCOM_SCM_BOOT_MAX_CPUS
] = {
119 0, BIT(0), BIT(3), BIT(5)
121 static const u8 qcom_scm_cpu_warm_bits
[QCOM_SCM_BOOT_MAX_CPUS
] = {
122 BIT(2), BIT(1), BIT(4), BIT(6)
125 #define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0)
127 #define QCOM_DLOAD_MASK GENMASK(5, 4)
128 #define QCOM_DLOAD_NODUMP 0
129 #define QCOM_DLOAD_FULLDUMP 1
130 #define QCOM_DLOAD_MINIDUMP 2
131 #define QCOM_DLOAD_BOTHDUMP 3
133 static const char * const qcom_scm_convention_names
[] = {
134 [SMC_CONVENTION_UNKNOWN
] = "unknown",
135 [SMC_CONVENTION_ARM_32
] = "smc arm 32",
136 [SMC_CONVENTION_ARM_64
] = "smc arm 64",
137 [SMC_CONVENTION_LEGACY
] = "smc legacy",
140 static const char * const download_mode_name
[] = {
141 [QCOM_DLOAD_NODUMP
] = "off",
142 [QCOM_DLOAD_FULLDUMP
] = "full",
143 [QCOM_DLOAD_MINIDUMP
] = "mini",
144 [QCOM_DLOAD_BOTHDUMP
] = "full,mini",
147 static struct qcom_scm
*__scm
;
149 static int qcom_scm_clk_enable(void)
153 ret
= clk_prepare_enable(__scm
->core_clk
);
157 ret
= clk_prepare_enable(__scm
->iface_clk
);
161 ret
= clk_prepare_enable(__scm
->bus_clk
);
168 clk_disable_unprepare(__scm
->iface_clk
);
170 clk_disable_unprepare(__scm
->core_clk
);
175 static void qcom_scm_clk_disable(void)
177 clk_disable_unprepare(__scm
->core_clk
);
178 clk_disable_unprepare(__scm
->iface_clk
);
179 clk_disable_unprepare(__scm
->bus_clk
);
182 static int qcom_scm_bw_enable(void)
189 mutex_lock(&__scm
->scm_bw_lock
);
190 if (!__scm
->scm_vote_count
) {
191 ret
= icc_set_bw(__scm
->path
, 0, UINT_MAX
);
193 dev_err(__scm
->dev
, "failed to set bandwidth request\n");
197 __scm
->scm_vote_count
++;
199 mutex_unlock(&__scm
->scm_bw_lock
);
204 static void qcom_scm_bw_disable(void)
209 mutex_lock(&__scm
->scm_bw_lock
);
210 if (__scm
->scm_vote_count
-- == 1)
211 icc_set_bw(__scm
->path
, 0, 0);
212 mutex_unlock(&__scm
->scm_bw_lock
);
215 enum qcom_scm_convention qcom_scm_convention
= SMC_CONVENTION_UNKNOWN
;
216 static DEFINE_SPINLOCK(scm_query_lock
);
218 struct qcom_tzmem_pool
*qcom_scm_get_tzmem_pool(void)
220 return __scm
? __scm
->mempool
: NULL
;
223 static enum qcom_scm_convention
__get_convention(void)
226 struct qcom_scm_desc desc
= {
227 .svc
= QCOM_SCM_SVC_INFO
,
228 .cmd
= QCOM_SCM_INFO_IS_CALL_AVAIL
,
229 .args
[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO
,
230 QCOM_SCM_INFO_IS_CALL_AVAIL
) |
231 (ARM_SMCCC_OWNER_SIP
<< ARM_SMCCC_OWNER_SHIFT
),
232 .arginfo
= QCOM_SCM_ARGS(1),
233 .owner
= ARM_SMCCC_OWNER_SIP
,
235 struct qcom_scm_res res
;
236 enum qcom_scm_convention probed_convention
;
240 if (likely(qcom_scm_convention
!= SMC_CONVENTION_UNKNOWN
))
241 return qcom_scm_convention
;
244 * Per the "SMC calling convention specification", the 64-bit calling
245 * convention can only be used when the client is 64-bit, otherwise
246 * system will encounter the undefined behaviour.
248 #if IS_ENABLED(CONFIG_ARM64)
250 * Device isn't required as there is only one argument - no device
251 * needed to dma_map_single to secure world
253 probed_convention
= SMC_CONVENTION_ARM_64
;
254 ret
= __scm_smc_call(NULL
, &desc
, probed_convention
, &res
, true);
255 if (!ret
&& res
.result
[0] == 1)
259 * Some SC7180 firmwares didn't implement the
260 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
261 * calling conventions on these firmwares. Luckily we don't make any
262 * early calls into the firmware on these SoCs so the device pointer
263 * will be valid here to check if the compatible matches.
265 if (of_device_is_compatible(__scm
? __scm
->dev
->of_node
: NULL
, "qcom,scm-sc7180")) {
271 probed_convention
= SMC_CONVENTION_ARM_32
;
272 ret
= __scm_smc_call(NULL
, &desc
, probed_convention
, &res
, true);
273 if (!ret
&& res
.result
[0] == 1)
276 probed_convention
= SMC_CONVENTION_LEGACY
;
278 spin_lock_irqsave(&scm_query_lock
, flags
);
279 if (probed_convention
!= qcom_scm_convention
) {
280 qcom_scm_convention
= probed_convention
;
281 pr_info("qcom_scm: convention: %s%s\n",
282 qcom_scm_convention_names
[qcom_scm_convention
],
283 forced
? " (forced)" : "");
285 spin_unlock_irqrestore(&scm_query_lock
, flags
);
287 return qcom_scm_convention
;
291 * qcom_scm_call() - Invoke a syscall in the secure world
293 * @desc: Descriptor structure containing arguments and return values
294 * @res: Structure containing results from SMC/HVC call
296 * Sends a command to the SCM and waits for the command to finish processing.
297 * This should *only* be called in pre-emptible context.
299 static int qcom_scm_call(struct device
*dev
, const struct qcom_scm_desc
*desc
,
300 struct qcom_scm_res
*res
)
303 switch (__get_convention()) {
304 case SMC_CONVENTION_ARM_32
:
305 case SMC_CONVENTION_ARM_64
:
306 return scm_smc_call(dev
, desc
, res
, false);
307 case SMC_CONVENTION_LEGACY
:
308 return scm_legacy_call(dev
, desc
, res
);
310 pr_err("Unknown current SCM calling convention.\n");
316 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
318 * @desc: Descriptor structure containing arguments and return values
319 * @res: Structure containing results from SMC/HVC call
321 * Sends a command to the SCM and waits for the command to finish processing.
322 * This can be called in atomic context.
324 static int qcom_scm_call_atomic(struct device
*dev
,
325 const struct qcom_scm_desc
*desc
,
326 struct qcom_scm_res
*res
)
328 switch (__get_convention()) {
329 case SMC_CONVENTION_ARM_32
:
330 case SMC_CONVENTION_ARM_64
:
331 return scm_smc_call(dev
, desc
, res
, true);
332 case SMC_CONVENTION_LEGACY
:
333 return scm_legacy_call_atomic(dev
, desc
, res
);
335 pr_err("Unknown current SCM calling convention.\n");
340 static bool __qcom_scm_is_call_available(struct device
*dev
, u32 svc_id
,
344 struct qcom_scm_desc desc
= {
345 .svc
= QCOM_SCM_SVC_INFO
,
346 .cmd
= QCOM_SCM_INFO_IS_CALL_AVAIL
,
347 .owner
= ARM_SMCCC_OWNER_SIP
,
349 struct qcom_scm_res res
;
351 desc
.arginfo
= QCOM_SCM_ARGS(1);
352 switch (__get_convention()) {
353 case SMC_CONVENTION_ARM_32
:
354 case SMC_CONVENTION_ARM_64
:
355 desc
.args
[0] = SCM_SMC_FNID(svc_id
, cmd_id
) |
356 (ARM_SMCCC_OWNER_SIP
<< ARM_SMCCC_OWNER_SHIFT
);
358 case SMC_CONVENTION_LEGACY
:
359 desc
.args
[0] = SCM_LEGACY_FNID(svc_id
, cmd_id
);
362 pr_err("Unknown SMC convention being used\n");
366 ret
= qcom_scm_call(dev
, &desc
, &res
);
368 return ret
? false : !!res
.result
[0];
371 static int qcom_scm_set_boot_addr(void *entry
, const u8
*cpu_bits
)
374 unsigned int flags
= 0;
375 struct qcom_scm_desc desc
= {
376 .svc
= QCOM_SCM_SVC_BOOT
,
377 .cmd
= QCOM_SCM_BOOT_SET_ADDR
,
378 .arginfo
= QCOM_SCM_ARGS(2),
379 .owner
= ARM_SMCCC_OWNER_SIP
,
382 for_each_present_cpu(cpu
) {
383 if (cpu
>= QCOM_SCM_BOOT_MAX_CPUS
)
385 flags
|= cpu_bits
[cpu
];
388 desc
.args
[0] = flags
;
389 desc
.args
[1] = virt_to_phys(entry
);
391 return qcom_scm_call_atomic(__scm
? __scm
->dev
: NULL
, &desc
, NULL
);
394 static int qcom_scm_set_boot_addr_mc(void *entry
, unsigned int flags
)
396 struct qcom_scm_desc desc
= {
397 .svc
= QCOM_SCM_SVC_BOOT
,
398 .cmd
= QCOM_SCM_BOOT_SET_ADDR_MC
,
399 .owner
= ARM_SMCCC_OWNER_SIP
,
400 .arginfo
= QCOM_SCM_ARGS(6),
403 /* Apply to all CPUs in all affinity levels */
404 ~0ULL, ~0ULL, ~0ULL, ~0ULL,
409 /* Need a device for DMA of the additional arguments */
410 if (!__scm
|| __get_convention() == SMC_CONVENTION_LEGACY
)
413 return qcom_scm_call(__scm
->dev
, &desc
, NULL
);
417 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus
418 * @entry: Entry point function for the cpus
420 * Set the Linux entry point for the SCM to transfer control to when coming
421 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
423 int qcom_scm_set_warm_boot_addr(void *entry
)
425 if (qcom_scm_set_boot_addr_mc(entry
, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT
))
426 /* Fallback to old SCM call */
427 return qcom_scm_set_boot_addr(entry
, qcom_scm_cpu_warm_bits
);
430 EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr
);
433 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus
434 * @entry: Entry point function for the cpus
436 int qcom_scm_set_cold_boot_addr(void *entry
)
438 if (qcom_scm_set_boot_addr_mc(entry
, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT
))
439 /* Fallback to old SCM call */
440 return qcom_scm_set_boot_addr(entry
, qcom_scm_cpu_cold_bits
);
443 EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr
);
446 * qcom_scm_cpu_power_down() - Power down the cpu
447 * @flags: Flags to flush cache
449 * This is an end point to power down cpu. If there was a pending interrupt,
450 * the control would return from this function, otherwise, the cpu jumps to the
451 * warm boot entry point set for this cpu upon reset.
453 void qcom_scm_cpu_power_down(u32 flags
)
455 struct qcom_scm_desc desc
= {
456 .svc
= QCOM_SCM_SVC_BOOT
,
457 .cmd
= QCOM_SCM_BOOT_TERMINATE_PC
,
458 .args
[0] = flags
& QCOM_SCM_FLUSH_FLAG_MASK
,
459 .arginfo
= QCOM_SCM_ARGS(1),
460 .owner
= ARM_SMCCC_OWNER_SIP
,
463 qcom_scm_call_atomic(__scm
? __scm
->dev
: NULL
, &desc
, NULL
);
465 EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down
);
467 int qcom_scm_set_remote_state(u32 state
, u32 id
)
469 struct qcom_scm_desc desc
= {
470 .svc
= QCOM_SCM_SVC_BOOT
,
471 .cmd
= QCOM_SCM_BOOT_SET_REMOTE_STATE
,
472 .arginfo
= QCOM_SCM_ARGS(2),
475 .owner
= ARM_SMCCC_OWNER_SIP
,
477 struct qcom_scm_res res
;
480 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
482 return ret
? : res
.result
[0];
484 EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state
);
486 static int qcom_scm_disable_sdi(void)
489 struct qcom_scm_desc desc
= {
490 .svc
= QCOM_SCM_SVC_BOOT
,
491 .cmd
= QCOM_SCM_BOOT_SDI_CONFIG
,
492 .args
[0] = 1, /* Disable watchdog debug */
493 .args
[1] = 0, /* Disable SDI */
494 .arginfo
= QCOM_SCM_ARGS(2),
495 .owner
= ARM_SMCCC_OWNER_SIP
,
497 struct qcom_scm_res res
;
499 ret
= qcom_scm_clk_enable();
502 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
504 qcom_scm_clk_disable();
506 return ret
? : res
.result
[0];
509 static int __qcom_scm_set_dload_mode(struct device
*dev
, bool enable
)
511 struct qcom_scm_desc desc
= {
512 .svc
= QCOM_SCM_SVC_BOOT
,
513 .cmd
= QCOM_SCM_BOOT_SET_DLOAD_MODE
,
514 .arginfo
= QCOM_SCM_ARGS(2),
515 .args
[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE
,
516 .owner
= ARM_SMCCC_OWNER_SIP
,
519 desc
.args
[1] = enable
? QCOM_SCM_BOOT_SET_DLOAD_MODE
: 0;
521 return qcom_scm_call_atomic(__scm
->dev
, &desc
, NULL
);
524 static int qcom_scm_io_rmw(phys_addr_t addr
, unsigned int mask
, unsigned int val
)
530 ret
= qcom_scm_io_readl(addr
, &old
);
534 new = (old
& ~mask
) | (val
& mask
);
536 return qcom_scm_io_writel(addr
, new);
539 static void qcom_scm_set_download_mode(u32 dload_mode
)
543 if (__scm
->dload_mode_addr
) {
544 ret
= qcom_scm_io_rmw(__scm
->dload_mode_addr
, QCOM_DLOAD_MASK
,
545 FIELD_PREP(QCOM_DLOAD_MASK
, dload_mode
));
546 } else if (__qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_BOOT
,
547 QCOM_SCM_BOOT_SET_DLOAD_MODE
)) {
548 ret
= __qcom_scm_set_dload_mode(__scm
->dev
, !!dload_mode
);
549 } else if (dload_mode
) {
551 "No available mechanism for setting download mode\n");
555 dev_err(__scm
->dev
, "failed to set download mode: %d\n", ret
);
559 * qcom_scm_pas_init_image() - Initialize peripheral authentication service
560 * state machine for a given peripheral, using the
562 * @peripheral: peripheral id
563 * @metadata: pointer to memory containing ELF header, program header table
564 * and optional blob of data used for authenticating the metadata
565 * and the rest of the firmware
566 * @size: size of the metadata
567 * @ctx: optional metadata context
569 * Return: 0 on success.
571 * Upon successful return, the PAS metadata context (@ctx) will be used to
572 * track the metadata allocation, this needs to be released by invoking
573 * qcom_scm_pas_metadata_release() by the caller.
575 int qcom_scm_pas_init_image(u32 peripheral
, const void *metadata
, size_t size
,
576 struct qcom_scm_pas_metadata
*ctx
)
578 dma_addr_t mdata_phys
;
581 struct qcom_scm_desc desc
= {
582 .svc
= QCOM_SCM_SVC_PIL
,
583 .cmd
= QCOM_SCM_PIL_PAS_INIT_IMAGE
,
584 .arginfo
= QCOM_SCM_ARGS(2, QCOM_SCM_VAL
, QCOM_SCM_RW
),
585 .args
[0] = peripheral
,
586 .owner
= ARM_SMCCC_OWNER_SIP
,
588 struct qcom_scm_res res
;
591 * During the scm call memory protection will be enabled for the meta
592 * data blob, so make sure it's physically contiguous, 4K aligned and
593 * non-cachable to avoid XPU violations.
595 * For PIL calls the hypervisor creates SHM Bridges for the blob
596 * buffers on behalf of Linux so we must not do it ourselves hence
597 * not using the TZMem allocator here.
599 * If we pass a buffer that is already part of an SHM Bridge to this
600 * call, it will fail.
602 mdata_buf
= dma_alloc_coherent(__scm
->dev
, size
, &mdata_phys
,
607 memcpy(mdata_buf
, metadata
, size
);
609 ret
= qcom_scm_clk_enable();
613 ret
= qcom_scm_bw_enable();
617 desc
.args
[1] = mdata_phys
;
619 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
620 qcom_scm_bw_disable();
623 qcom_scm_clk_disable();
626 if (ret
< 0 || !ctx
) {
627 dma_free_coherent(__scm
->dev
, size
, mdata_buf
, mdata_phys
);
629 ctx
->ptr
= mdata_buf
;
630 ctx
->phys
= mdata_phys
;
634 return ret
? : res
.result
[0];
636 EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image
);
639 * qcom_scm_pas_metadata_release() - release metadata context
640 * @ctx: metadata context
642 void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata
*ctx
)
647 dma_free_coherent(__scm
->dev
, ctx
->size
, ctx
->ptr
, ctx
->phys
);
653 EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release
);
656 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
657 * for firmware loading
658 * @peripheral: peripheral id
659 * @addr: start address of memory area to prepare
660 * @size: size of the memory area to prepare
662 * Returns 0 on success.
664 int qcom_scm_pas_mem_setup(u32 peripheral
, phys_addr_t addr
, phys_addr_t size
)
667 struct qcom_scm_desc desc
= {
668 .svc
= QCOM_SCM_SVC_PIL
,
669 .cmd
= QCOM_SCM_PIL_PAS_MEM_SETUP
,
670 .arginfo
= QCOM_SCM_ARGS(3),
671 .args
[0] = peripheral
,
674 .owner
= ARM_SMCCC_OWNER_SIP
,
676 struct qcom_scm_res res
;
678 ret
= qcom_scm_clk_enable();
682 ret
= qcom_scm_bw_enable();
686 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
687 qcom_scm_bw_disable();
690 qcom_scm_clk_disable();
692 return ret
? : res
.result
[0];
694 EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup
);
697 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
698 * and reset the remote processor
699 * @peripheral: peripheral id
701 * Return 0 on success.
703 int qcom_scm_pas_auth_and_reset(u32 peripheral
)
706 struct qcom_scm_desc desc
= {
707 .svc
= QCOM_SCM_SVC_PIL
,
708 .cmd
= QCOM_SCM_PIL_PAS_AUTH_AND_RESET
,
709 .arginfo
= QCOM_SCM_ARGS(1),
710 .args
[0] = peripheral
,
711 .owner
= ARM_SMCCC_OWNER_SIP
,
713 struct qcom_scm_res res
;
715 ret
= qcom_scm_clk_enable();
719 ret
= qcom_scm_bw_enable();
723 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
724 qcom_scm_bw_disable();
727 qcom_scm_clk_disable();
729 return ret
? : res
.result
[0];
731 EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset
);
734 * qcom_scm_pas_shutdown() - Shut down the remote processor
735 * @peripheral: peripheral id
737 * Returns 0 on success.
739 int qcom_scm_pas_shutdown(u32 peripheral
)
742 struct qcom_scm_desc desc
= {
743 .svc
= QCOM_SCM_SVC_PIL
,
744 .cmd
= QCOM_SCM_PIL_PAS_SHUTDOWN
,
745 .arginfo
= QCOM_SCM_ARGS(1),
746 .args
[0] = peripheral
,
747 .owner
= ARM_SMCCC_OWNER_SIP
,
749 struct qcom_scm_res res
;
751 ret
= qcom_scm_clk_enable();
755 ret
= qcom_scm_bw_enable();
759 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
760 qcom_scm_bw_disable();
763 qcom_scm_clk_disable();
765 return ret
? : res
.result
[0];
767 EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown
);
770 * qcom_scm_pas_supported() - Check if the peripheral authentication service is
771 * available for the given peripherial
772 * @peripheral: peripheral id
774 * Returns true if PAS is supported for this peripheral, otherwise false.
776 bool qcom_scm_pas_supported(u32 peripheral
)
779 struct qcom_scm_desc desc
= {
780 .svc
= QCOM_SCM_SVC_PIL
,
781 .cmd
= QCOM_SCM_PIL_PAS_IS_SUPPORTED
,
782 .arginfo
= QCOM_SCM_ARGS(1),
783 .args
[0] = peripheral
,
784 .owner
= ARM_SMCCC_OWNER_SIP
,
786 struct qcom_scm_res res
;
788 if (!__qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_PIL
,
789 QCOM_SCM_PIL_PAS_IS_SUPPORTED
))
792 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
794 return ret
? false : !!res
.result
[0];
796 EXPORT_SYMBOL_GPL(qcom_scm_pas_supported
);
798 static int __qcom_scm_pas_mss_reset(struct device
*dev
, bool reset
)
800 struct qcom_scm_desc desc
= {
801 .svc
= QCOM_SCM_SVC_PIL
,
802 .cmd
= QCOM_SCM_PIL_PAS_MSS_RESET
,
803 .arginfo
= QCOM_SCM_ARGS(2),
806 .owner
= ARM_SMCCC_OWNER_SIP
,
808 struct qcom_scm_res res
;
811 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
813 return ret
? : res
.result
[0];
816 static int qcom_scm_pas_reset_assert(struct reset_controller_dev
*rcdev
,
822 return __qcom_scm_pas_mss_reset(__scm
->dev
, 1);
825 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev
*rcdev
,
831 return __qcom_scm_pas_mss_reset(__scm
->dev
, 0);
834 static const struct reset_control_ops qcom_scm_pas_reset_ops
= {
835 .assert = qcom_scm_pas_reset_assert
,
836 .deassert
= qcom_scm_pas_reset_deassert
,
839 int qcom_scm_io_readl(phys_addr_t addr
, unsigned int *val
)
841 struct qcom_scm_desc desc
= {
842 .svc
= QCOM_SCM_SVC_IO
,
843 .cmd
= QCOM_SCM_IO_READ
,
844 .arginfo
= QCOM_SCM_ARGS(1),
846 .owner
= ARM_SMCCC_OWNER_SIP
,
848 struct qcom_scm_res res
;
852 ret
= qcom_scm_call_atomic(__scm
->dev
, &desc
, &res
);
854 *val
= res
.result
[0];
856 return ret
< 0 ? ret
: 0;
858 EXPORT_SYMBOL_GPL(qcom_scm_io_readl
);
860 int qcom_scm_io_writel(phys_addr_t addr
, unsigned int val
)
862 struct qcom_scm_desc desc
= {
863 .svc
= QCOM_SCM_SVC_IO
,
864 .cmd
= QCOM_SCM_IO_WRITE
,
865 .arginfo
= QCOM_SCM_ARGS(2),
868 .owner
= ARM_SMCCC_OWNER_SIP
,
871 return qcom_scm_call_atomic(__scm
->dev
, &desc
, NULL
);
873 EXPORT_SYMBOL_GPL(qcom_scm_io_writel
);
876 * qcom_scm_restore_sec_cfg_available() - Check if secure environment
877 * supports restore security config interface.
879 * Return true if restore-cfg interface is supported, false if not.
881 bool qcom_scm_restore_sec_cfg_available(void)
883 return __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_MP
,
884 QCOM_SCM_MP_RESTORE_SEC_CFG
);
886 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available
);
888 int qcom_scm_restore_sec_cfg(u32 device_id
, u32 spare
)
890 struct qcom_scm_desc desc
= {
891 .svc
= QCOM_SCM_SVC_MP
,
892 .cmd
= QCOM_SCM_MP_RESTORE_SEC_CFG
,
893 .arginfo
= QCOM_SCM_ARGS(2),
894 .args
[0] = device_id
,
896 .owner
= ARM_SMCCC_OWNER_SIP
,
898 struct qcom_scm_res res
;
901 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
903 return ret
? : res
.result
[0];
905 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg
);
907 #define QCOM_SCM_CP_APERTURE_CONTEXT_MASK GENMASK(7, 0)
909 bool qcom_scm_set_gpu_smmu_aperture_is_available(void)
911 return __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_MP
,
912 QCOM_SCM_MP_CP_SMMU_APERTURE_ID
);
914 EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture_is_available
);
916 int qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank
)
918 struct qcom_scm_desc desc
= {
919 .svc
= QCOM_SCM_SVC_MP
,
920 .cmd
= QCOM_SCM_MP_CP_SMMU_APERTURE_ID
,
921 .arginfo
= QCOM_SCM_ARGS(4),
922 .args
[0] = 0xffff0000 | FIELD_PREP(QCOM_SCM_CP_APERTURE_CONTEXT_MASK
, context_bank
),
923 .args
[1] = 0xffffffff,
924 .args
[2] = 0xffffffff,
925 .args
[3] = 0xffffffff,
926 .owner
= ARM_SMCCC_OWNER_SIP
929 return qcom_scm_call(__scm
->dev
, &desc
, NULL
);
931 EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture
);
933 int qcom_scm_iommu_secure_ptbl_size(u32 spare
, size_t *size
)
935 struct qcom_scm_desc desc
= {
936 .svc
= QCOM_SCM_SVC_MP
,
937 .cmd
= QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE
,
938 .arginfo
= QCOM_SCM_ARGS(1),
940 .owner
= ARM_SMCCC_OWNER_SIP
,
942 struct qcom_scm_res res
;
945 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
948 *size
= res
.result
[0];
950 return ret
? : res
.result
[1];
952 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size
);
954 int qcom_scm_iommu_secure_ptbl_init(u64 addr
, u32 size
, u32 spare
)
956 struct qcom_scm_desc desc
= {
957 .svc
= QCOM_SCM_SVC_MP
,
958 .cmd
= QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT
,
959 .arginfo
= QCOM_SCM_ARGS(3, QCOM_SCM_RW
, QCOM_SCM_VAL
,
964 .owner
= ARM_SMCCC_OWNER_SIP
,
968 ret
= qcom_scm_call(__scm
->dev
, &desc
, NULL
);
970 /* the pg table has been initialized already, ignore the error */
976 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init
);
978 int qcom_scm_iommu_set_cp_pool_size(u32 spare
, u32 size
)
980 struct qcom_scm_desc desc
= {
981 .svc
= QCOM_SCM_SVC_MP
,
982 .cmd
= QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE
,
983 .arginfo
= QCOM_SCM_ARGS(2),
986 .owner
= ARM_SMCCC_OWNER_SIP
,
989 return qcom_scm_call(__scm
->dev
, &desc
, NULL
);
991 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size
);
993 int qcom_scm_mem_protect_video_var(u32 cp_start
, u32 cp_size
,
994 u32 cp_nonpixel_start
,
995 u32 cp_nonpixel_size
)
998 struct qcom_scm_desc desc
= {
999 .svc
= QCOM_SCM_SVC_MP
,
1000 .cmd
= QCOM_SCM_MP_VIDEO_VAR
,
1001 .arginfo
= QCOM_SCM_ARGS(4, QCOM_SCM_VAL
, QCOM_SCM_VAL
,
1002 QCOM_SCM_VAL
, QCOM_SCM_VAL
),
1003 .args
[0] = cp_start
,
1005 .args
[2] = cp_nonpixel_start
,
1006 .args
[3] = cp_nonpixel_size
,
1007 .owner
= ARM_SMCCC_OWNER_SIP
,
1009 struct qcom_scm_res res
;
1011 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
1013 return ret
? : res
.result
[0];
1015 EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var
);
1017 static int __qcom_scm_assign_mem(struct device
*dev
, phys_addr_t mem_region
,
1018 size_t mem_sz
, phys_addr_t src
, size_t src_sz
,
1019 phys_addr_t dest
, size_t dest_sz
)
1022 struct qcom_scm_desc desc
= {
1023 .svc
= QCOM_SCM_SVC_MP
,
1024 .cmd
= QCOM_SCM_MP_ASSIGN
,
1025 .arginfo
= QCOM_SCM_ARGS(7, QCOM_SCM_RO
, QCOM_SCM_VAL
,
1026 QCOM_SCM_RO
, QCOM_SCM_VAL
, QCOM_SCM_RO
,
1027 QCOM_SCM_VAL
, QCOM_SCM_VAL
),
1028 .args
[0] = mem_region
,
1035 .owner
= ARM_SMCCC_OWNER_SIP
,
1037 struct qcom_scm_res res
;
1039 ret
= qcom_scm_call(dev
, &desc
, &res
);
1041 return ret
? : res
.result
[0];
1045 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
1046 * @mem_addr: mem region whose ownership need to be reassigned
1047 * @mem_sz: size of the region.
1048 * @srcvm: vmid for current set of owners, each set bit in
1049 * flag indicate a unique owner
1050 * @newvm: array having new owners and corresponding permission
1052 * @dest_cnt: number of owners in next set.
1054 * Return negative errno on failure or 0 on success with @srcvm updated.
1056 int qcom_scm_assign_mem(phys_addr_t mem_addr
, size_t mem_sz
,
1058 const struct qcom_scm_vmperm
*newvm
,
1059 unsigned int dest_cnt
)
1061 struct qcom_scm_current_perm_info
*destvm
;
1062 struct qcom_scm_mem_map_info
*mem_to_map
;
1063 phys_addr_t mem_to_map_phys
;
1064 phys_addr_t dest_phys
;
1065 phys_addr_t ptr_phys
;
1066 size_t mem_to_map_sz
;
1073 u64 srcvm_bits
= *srcvm
;
1075 src_sz
= hweight64(srcvm_bits
) * sizeof(*src
);
1076 mem_to_map_sz
= sizeof(*mem_to_map
);
1077 dest_sz
= dest_cnt
* sizeof(*destvm
);
1078 ptr_sz
= ALIGN(src_sz
, SZ_64
) + ALIGN(mem_to_map_sz
, SZ_64
) +
1079 ALIGN(dest_sz
, SZ_64
);
1081 void *ptr
__free(qcom_tzmem
) = qcom_tzmem_alloc(__scm
->mempool
,
1082 ptr_sz
, GFP_KERNEL
);
1086 ptr_phys
= qcom_tzmem_to_phys(ptr
);
1088 /* Fill source vmid detail */
1091 for (b
= 0; b
< BITS_PER_TYPE(u64
); b
++) {
1092 if (srcvm_bits
& BIT(b
))
1093 src
[i
++] = cpu_to_le32(b
);
1096 /* Fill details of mem buff to map */
1097 mem_to_map
= ptr
+ ALIGN(src_sz
, SZ_64
);
1098 mem_to_map_phys
= ptr_phys
+ ALIGN(src_sz
, SZ_64
);
1099 mem_to_map
->mem_addr
= cpu_to_le64(mem_addr
);
1100 mem_to_map
->mem_size
= cpu_to_le64(mem_sz
);
1103 /* Fill details of next vmid detail */
1104 destvm
= ptr
+ ALIGN(mem_to_map_sz
, SZ_64
) + ALIGN(src_sz
, SZ_64
);
1105 dest_phys
= ptr_phys
+ ALIGN(mem_to_map_sz
, SZ_64
) + ALIGN(src_sz
, SZ_64
);
1106 for (i
= 0; i
< dest_cnt
; i
++, destvm
++, newvm
++) {
1107 destvm
->vmid
= cpu_to_le32(newvm
->vmid
);
1108 destvm
->perm
= cpu_to_le32(newvm
->perm
);
1110 destvm
->ctx_size
= 0;
1111 next_vm
|= BIT(newvm
->vmid
);
1114 ret
= __qcom_scm_assign_mem(__scm
->dev
, mem_to_map_phys
, mem_to_map_sz
,
1115 ptr_phys
, src_sz
, dest_phys
, dest_sz
);
1118 "Assign memory protection call failed %d\n", ret
);
1125 EXPORT_SYMBOL_GPL(qcom_scm_assign_mem
);
1128 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
1130 bool qcom_scm_ocmem_lock_available(void)
1132 return __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_OCMEM
,
1133 QCOM_SCM_OCMEM_LOCK_CMD
);
1135 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available
);
1138 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
1139 * region to the specified initiator
1141 * @id: tz initiator id
1142 * @offset: OCMEM offset
1144 * @mode: access mode (WIDE/NARROW)
1146 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id
, u32 offset
, u32 size
,
1149 struct qcom_scm_desc desc
= {
1150 .svc
= QCOM_SCM_SVC_OCMEM
,
1151 .cmd
= QCOM_SCM_OCMEM_LOCK_CMD
,
1156 .arginfo
= QCOM_SCM_ARGS(4),
1159 return qcom_scm_call(__scm
->dev
, &desc
, NULL
);
1161 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock
);
1164 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
1165 * region from the specified initiator
1167 * @id: tz initiator id
1168 * @offset: OCMEM offset
1171 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id
, u32 offset
, u32 size
)
1173 struct qcom_scm_desc desc
= {
1174 .svc
= QCOM_SCM_SVC_OCMEM
,
1175 .cmd
= QCOM_SCM_OCMEM_UNLOCK_CMD
,
1179 .arginfo
= QCOM_SCM_ARGS(3),
1182 return qcom_scm_call(__scm
->dev
, &desc
, NULL
);
1184 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock
);
1187 * qcom_scm_ice_available() - Is the ICE key programming interface available?
1189 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
1190 * qcom_scm_ice_set_key() are available.
1192 bool qcom_scm_ice_available(void)
1194 return __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_ES
,
1195 QCOM_SCM_ES_INVALIDATE_ICE_KEY
) &&
1196 __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_ES
,
1197 QCOM_SCM_ES_CONFIG_SET_ICE_KEY
);
1199 EXPORT_SYMBOL_GPL(qcom_scm_ice_available
);
1202 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
1203 * @index: the keyslot to invalidate
1205 * The UFSHCI and eMMC standards define a standard way to do this, but it
1206 * doesn't work on these SoCs; only this SCM call does.
1208 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1209 * call doesn't specify which ICE instance the keyslot belongs to.
1211 * Return: 0 on success; -errno on failure.
1213 int qcom_scm_ice_invalidate_key(u32 index
)
1215 struct qcom_scm_desc desc
= {
1216 .svc
= QCOM_SCM_SVC_ES
,
1217 .cmd
= QCOM_SCM_ES_INVALIDATE_ICE_KEY
,
1218 .arginfo
= QCOM_SCM_ARGS(1),
1220 .owner
= ARM_SMCCC_OWNER_SIP
,
1223 return qcom_scm_call(__scm
->dev
, &desc
, NULL
);
1225 EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key
);
1228 * qcom_scm_ice_set_key() - Set an inline encryption key
1229 * @index: the keyslot into which to set the key
1230 * @key: the key to program
1231 * @key_size: the size of the key in bytes
1232 * @cipher: the encryption algorithm the key is for
1233 * @data_unit_size: the encryption data unit size, i.e. the size of each
1234 * individual plaintext and ciphertext. Given in 512-byte
1235 * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1237 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
1238 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
1240 * The UFSHCI and eMMC standards define a standard way to do this, but it
1241 * doesn't work on these SoCs; only this SCM call does.
1243 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1244 * call doesn't specify which ICE instance the keyslot belongs to.
1246 * Return: 0 on success; -errno on failure.
1248 int qcom_scm_ice_set_key(u32 index
, const u8
*key
, u32 key_size
,
1249 enum qcom_scm_ice_cipher cipher
, u32 data_unit_size
)
1251 struct qcom_scm_desc desc
= {
1252 .svc
= QCOM_SCM_SVC_ES
,
1253 .cmd
= QCOM_SCM_ES_CONFIG_SET_ICE_KEY
,
1254 .arginfo
= QCOM_SCM_ARGS(5, QCOM_SCM_VAL
, QCOM_SCM_RW
,
1255 QCOM_SCM_VAL
, QCOM_SCM_VAL
,
1258 .args
[2] = key_size
,
1260 .args
[4] = data_unit_size
,
1261 .owner
= ARM_SMCCC_OWNER_SIP
,
1266 void *keybuf
__free(qcom_tzmem
) = qcom_tzmem_alloc(__scm
->mempool
,
1271 memcpy(keybuf
, key
, key_size
);
1272 desc
.args
[1] = qcom_tzmem_to_phys(keybuf
);
1274 ret
= qcom_scm_call(__scm
->dev
, &desc
, NULL
);
1276 memzero_explicit(keybuf
, key_size
);
1280 EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key
);
1283 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1285 * Return true if HDCP is supported, false if not.
1287 bool qcom_scm_hdcp_available(void)
1290 int ret
= qcom_scm_clk_enable();
1295 avail
= __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_HDCP
,
1296 QCOM_SCM_HDCP_INVOKE
);
1298 qcom_scm_clk_disable();
1302 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available
);
1305 * qcom_scm_hdcp_req() - Send HDCP request.
1306 * @req: HDCP request array
1307 * @req_cnt: HDCP request array count
1308 * @resp: response buffer passed to SCM
1310 * Write HDCP register(s) through SCM.
1312 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req
*req
, u32 req_cnt
, u32
*resp
)
1315 struct qcom_scm_desc desc
= {
1316 .svc
= QCOM_SCM_SVC_HDCP
,
1317 .cmd
= QCOM_SCM_HDCP_INVOKE
,
1318 .arginfo
= QCOM_SCM_ARGS(10),
1331 .owner
= ARM_SMCCC_OWNER_SIP
,
1333 struct qcom_scm_res res
;
1335 if (req_cnt
> QCOM_SCM_HDCP_MAX_REQ_CNT
)
1338 ret
= qcom_scm_clk_enable();
1342 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
1343 *resp
= res
.result
[0];
1345 qcom_scm_clk_disable();
1349 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req
);
1351 int qcom_scm_iommu_set_pt_format(u32 sec_id
, u32 ctx_num
, u32 pt_fmt
)
1353 struct qcom_scm_desc desc
= {
1354 .svc
= QCOM_SCM_SVC_SMMU_PROGRAM
,
1355 .cmd
= QCOM_SCM_SMMU_PT_FORMAT
,
1356 .arginfo
= QCOM_SCM_ARGS(3),
1359 .args
[2] = pt_fmt
, /* 0: LPAE AArch32 - 1: AArch64 */
1360 .owner
= ARM_SMCCC_OWNER_SIP
,
1363 return qcom_scm_call(__scm
->dev
, &desc
, NULL
);
1365 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format
);
1367 int qcom_scm_qsmmu500_wait_safe_toggle(bool en
)
1369 struct qcom_scm_desc desc
= {
1370 .svc
= QCOM_SCM_SVC_SMMU_PROGRAM
,
1371 .cmd
= QCOM_SCM_SMMU_CONFIG_ERRATA1
,
1372 .arginfo
= QCOM_SCM_ARGS(2),
1373 .args
[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL
,
1375 .owner
= ARM_SMCCC_OWNER_SIP
,
1379 return qcom_scm_call_atomic(__scm
->dev
, &desc
, NULL
);
1381 EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle
);
1383 bool qcom_scm_lmh_dcvsh_available(void)
1385 return __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_LMH
, QCOM_SCM_LMH_LIMIT_DCVSH
);
1387 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available
);
1389 int qcom_scm_shm_bridge_enable(void)
1393 struct qcom_scm_desc desc
= {
1394 .svc
= QCOM_SCM_SVC_MP
,
1395 .cmd
= QCOM_SCM_MP_SHM_BRIDGE_ENABLE
,
1396 .owner
= ARM_SMCCC_OWNER_SIP
1399 struct qcom_scm_res res
;
1401 if (!__qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_MP
,
1402 QCOM_SCM_MP_SHM_BRIDGE_ENABLE
))
1405 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
1410 if (res
.result
[0] == SHMBRIDGE_RESULT_NOTSUPP
)
1413 return res
.result
[0];
1415 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable
);
1417 int qcom_scm_shm_bridge_create(struct device
*dev
, u64 pfn_and_ns_perm_flags
,
1418 u64 ipfn_and_s_perm_flags
, u64 size_and_flags
,
1419 u64 ns_vmids
, u64
*handle
)
1421 struct qcom_scm_desc desc
= {
1422 .svc
= QCOM_SCM_SVC_MP
,
1423 .cmd
= QCOM_SCM_MP_SHM_BRIDGE_CREATE
,
1424 .owner
= ARM_SMCCC_OWNER_SIP
,
1425 .args
[0] = pfn_and_ns_perm_flags
,
1426 .args
[1] = ipfn_and_s_perm_flags
,
1427 .args
[2] = size_and_flags
,
1428 .args
[3] = ns_vmids
,
1429 .arginfo
= QCOM_SCM_ARGS(4, QCOM_SCM_VAL
, QCOM_SCM_VAL
,
1430 QCOM_SCM_VAL
, QCOM_SCM_VAL
),
1433 struct qcom_scm_res res
;
1436 ret
= qcom_scm_call(__scm
->dev
, &desc
, &res
);
1439 *handle
= res
.result
[1];
1441 return ret
?: res
.result
[0];
1443 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create
);
1445 int qcom_scm_shm_bridge_delete(struct device
*dev
, u64 handle
)
1447 struct qcom_scm_desc desc
= {
1448 .svc
= QCOM_SCM_SVC_MP
,
1449 .cmd
= QCOM_SCM_MP_SHM_BRIDGE_DELETE
,
1450 .owner
= ARM_SMCCC_OWNER_SIP
,
1452 .arginfo
= QCOM_SCM_ARGS(1, QCOM_SCM_VAL
),
1455 return qcom_scm_call(__scm
->dev
, &desc
, NULL
);
1457 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_delete
);
1459 int qcom_scm_lmh_profile_change(u32 profile_id
)
1461 struct qcom_scm_desc desc
= {
1462 .svc
= QCOM_SCM_SVC_LMH
,
1463 .cmd
= QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE
,
1464 .arginfo
= QCOM_SCM_ARGS(1, QCOM_SCM_VAL
),
1465 .args
[0] = profile_id
,
1466 .owner
= ARM_SMCCC_OWNER_SIP
,
1469 return qcom_scm_call(__scm
->dev
, &desc
, NULL
);
1471 EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change
);
1473 int qcom_scm_lmh_dcvsh(u32 payload_fn
, u32 payload_reg
, u32 payload_val
,
1474 u64 limit_node
, u32 node_id
, u64 version
)
1476 int ret
, payload_size
= 5 * sizeof(u32
);
1478 struct qcom_scm_desc desc
= {
1479 .svc
= QCOM_SCM_SVC_LMH
,
1480 .cmd
= QCOM_SCM_LMH_LIMIT_DCVSH
,
1481 .arginfo
= QCOM_SCM_ARGS(5, QCOM_SCM_RO
, QCOM_SCM_VAL
, QCOM_SCM_VAL
,
1482 QCOM_SCM_VAL
, QCOM_SCM_VAL
),
1483 .args
[1] = payload_size
,
1484 .args
[2] = limit_node
,
1487 .owner
= ARM_SMCCC_OWNER_SIP
,
1490 u32
*payload_buf
__free(qcom_tzmem
) = qcom_tzmem_alloc(__scm
->mempool
,
1496 payload_buf
[0] = payload_fn
;
1498 payload_buf
[2] = payload_reg
;
1500 payload_buf
[4] = payload_val
;
1502 desc
.args
[0] = qcom_tzmem_to_phys(payload_buf
);
1504 ret
= qcom_scm_call(__scm
->dev
, &desc
, NULL
);
1508 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh
);
1510 int qcom_scm_gpu_init_regs(u32 gpu_req
)
1512 struct qcom_scm_desc desc
= {
1513 .svc
= QCOM_SCM_SVC_GPU
,
1514 .cmd
= QCOM_SCM_SVC_GPU_INIT_REGS
,
1515 .arginfo
= QCOM_SCM_ARGS(1),
1517 .owner
= ARM_SMCCC_OWNER_SIP
,
1520 return qcom_scm_call(__scm
->dev
, &desc
, NULL
);
1522 EXPORT_SYMBOL_GPL(qcom_scm_gpu_init_regs
);
1524 static int qcom_scm_find_dload_address(struct device
*dev
, u64
*addr
)
1526 struct device_node
*tcsr
;
1527 struct device_node
*np
= dev
->of_node
;
1528 struct resource res
;
1532 tcsr
= of_parse_phandle(np
, "qcom,dload-mode", 0);
1536 ret
= of_address_to_resource(tcsr
, 0, &res
);
1541 ret
= of_property_read_u32_index(np
, "qcom,dload-mode", 1, &offset
);
1545 *addr
= res
.start
+ offset
;
1550 #ifdef CONFIG_QCOM_QSEECOM
1552 /* Lock for QSEECOM SCM call executions */
1553 static DEFINE_MUTEX(qcom_scm_qseecom_call_lock
);
1555 static int __qcom_scm_qseecom_call(const struct qcom_scm_desc
*desc
,
1556 struct qcom_scm_qseecom_resp
*res
)
1558 struct qcom_scm_res scm_res
= {};
1562 * QSEECOM SCM calls should not be executed concurrently. Therefore, we
1563 * require the respective call lock to be held.
1565 lockdep_assert_held(&qcom_scm_qseecom_call_lock
);
1567 status
= qcom_scm_call(__scm
->dev
, desc
, &scm_res
);
1569 res
->result
= scm_res
.result
[0];
1570 res
->resp_type
= scm_res
.result
[1];
1571 res
->data
= scm_res
.result
[2];
1580 * qcom_scm_qseecom_call() - Perform a QSEECOM SCM call.
1581 * @desc: SCM call descriptor.
1582 * @res: SCM call response (output).
1584 * Performs the QSEECOM SCM call described by @desc, returning the response in
1587 * Return: Zero on success, nonzero on failure.
1589 static int qcom_scm_qseecom_call(const struct qcom_scm_desc
*desc
,
1590 struct qcom_scm_qseecom_resp
*res
)
1595 * Note: Multiple QSEECOM SCM calls should not be executed same time,
1596 * so lock things here. This needs to be extended to callback/listener
1597 * handling when support for that is implemented.
1600 mutex_lock(&qcom_scm_qseecom_call_lock
);
1601 status
= __qcom_scm_qseecom_call(desc
, res
);
1602 mutex_unlock(&qcom_scm_qseecom_call_lock
);
1604 dev_dbg(__scm
->dev
, "%s: owner=%x, svc=%x, cmd=%x, result=%lld, type=%llx, data=%llx\n",
1605 __func__
, desc
->owner
, desc
->svc
, desc
->cmd
, res
->result
,
1606 res
->resp_type
, res
->data
);
1609 dev_err(__scm
->dev
, "qseecom: scm call failed with error %d\n", status
);
1614 * TODO: Handle incomplete and blocked calls:
1616 * Incomplete and blocked calls are not supported yet. Some devices
1617 * and/or commands require those, some don't. Let's warn about them
1618 * prominently in case someone attempts to try these commands with a
1619 * device/command combination that isn't supported yet.
1621 WARN_ON(res
->result
== QSEECOM_RESULT_INCOMPLETE
);
1622 WARN_ON(res
->result
== QSEECOM_RESULT_BLOCKED_ON_LISTENER
);
1628 * qcom_scm_qseecom_get_version() - Query the QSEECOM version.
1629 * @version: Pointer where the QSEECOM version will be stored.
1631 * Performs the QSEECOM SCM querying the QSEECOM version currently running in
1634 * Return: Zero on success, nonzero on failure.
1636 static int qcom_scm_qseecom_get_version(u32
*version
)
1638 struct qcom_scm_desc desc
= {};
1639 struct qcom_scm_qseecom_resp res
= {};
1643 desc
.owner
= QSEECOM_TZ_OWNER_SIP
;
1644 desc
.svc
= QSEECOM_TZ_SVC_INFO
;
1645 desc
.cmd
= QSEECOM_TZ_CMD_INFO_VERSION
;
1646 desc
.arginfo
= QCOM_SCM_ARGS(1, QCOM_SCM_VAL
);
1647 desc
.args
[0] = feature
;
1649 ret
= qcom_scm_qseecom_call(&desc
, &res
);
1653 *version
= res
.result
;
1658 * qcom_scm_qseecom_app_get_id() - Query the app ID for a given QSEE app name.
1659 * @app_name: The name of the app.
1660 * @app_id: The returned app ID.
1662 * Query and return the application ID of the SEE app identified by the given
1663 * name. This returned ID is the unique identifier of the app required for
1664 * subsequent communication.
1666 * Return: Zero on success, nonzero on failure, -ENOENT if the app has not been
1667 * loaded or could not be found.
1669 int qcom_scm_qseecom_app_get_id(const char *app_name
, u32
*app_id
)
1671 unsigned long name_buf_size
= QSEECOM_MAX_APP_NAME_SIZE
;
1672 unsigned long app_name_len
= strlen(app_name
);
1673 struct qcom_scm_desc desc
= {};
1674 struct qcom_scm_qseecom_resp res
= {};
1677 if (app_name_len
>= name_buf_size
)
1680 char *name_buf
__free(qcom_tzmem
) = qcom_tzmem_alloc(__scm
->mempool
,
1686 memcpy(name_buf
, app_name
, app_name_len
);
1688 desc
.owner
= QSEECOM_TZ_OWNER_QSEE_OS
;
1689 desc
.svc
= QSEECOM_TZ_SVC_APP_MGR
;
1690 desc
.cmd
= QSEECOM_TZ_CMD_APP_LOOKUP
;
1691 desc
.arginfo
= QCOM_SCM_ARGS(2, QCOM_SCM_RW
, QCOM_SCM_VAL
);
1692 desc
.args
[0] = qcom_tzmem_to_phys(name_buf
);
1693 desc
.args
[1] = app_name_len
;
1695 status
= qcom_scm_qseecom_call(&desc
, &res
);
1700 if (res
.result
== QSEECOM_RESULT_FAILURE
)
1703 if (res
.result
!= QSEECOM_RESULT_SUCCESS
)
1706 if (res
.resp_type
!= QSEECOM_SCM_RES_APP_ID
)
1712 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id
);
1715 * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app.
1716 * @app_id: The ID of the target app.
1717 * @req: Request buffer sent to the app (must be TZ memory)
1718 * @req_size: Size of the request buffer.
1719 * @rsp: Response buffer, written to by the app (must be TZ memory)
1720 * @rsp_size: Size of the response buffer.
1722 * Sends a request to the QSEE app associated with the given ID and read back
1723 * its response. The caller must provide two DMA memory regions, one for the
1724 * request and one for the response, and fill out the @req region with the
1725 * respective (app-specific) request data. The QSEE app reads this and returns
1726 * its response in the @rsp region.
1728 * Return: Zero on success, nonzero on failure.
1730 int qcom_scm_qseecom_app_send(u32 app_id
, void *req
, size_t req_size
,
1731 void *rsp
, size_t rsp_size
)
1733 struct qcom_scm_qseecom_resp res
= {};
1734 struct qcom_scm_desc desc
= {};
1735 phys_addr_t req_phys
;
1736 phys_addr_t rsp_phys
;
1739 req_phys
= qcom_tzmem_to_phys(req
);
1740 rsp_phys
= qcom_tzmem_to_phys(rsp
);
1742 desc
.owner
= QSEECOM_TZ_OWNER_TZ_APPS
;
1743 desc
.svc
= QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER
;
1744 desc
.cmd
= QSEECOM_TZ_CMD_APP_SEND
;
1745 desc
.arginfo
= QCOM_SCM_ARGS(5, QCOM_SCM_VAL
,
1746 QCOM_SCM_RW
, QCOM_SCM_VAL
,
1747 QCOM_SCM_RW
, QCOM_SCM_VAL
);
1748 desc
.args
[0] = app_id
;
1749 desc
.args
[1] = req_phys
;
1750 desc
.args
[2] = req_size
;
1751 desc
.args
[3] = rsp_phys
;
1752 desc
.args
[4] = rsp_size
;
1754 status
= qcom_scm_qseecom_call(&desc
, &res
);
1759 if (res
.result
!= QSEECOM_RESULT_SUCCESS
)
1764 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send
);
1767 * We do not yet support re-entrant calls via the qseecom interface. To prevent
1768 + any potential issues with this, only allow validated machines for now.
1770 static const struct of_device_id qcom_scm_qseecom_allowlist
[] __maybe_unused
= {
1771 { .compatible
= "dell,xps13-9345" },
1772 { .compatible
= "lenovo,flex-5g" },
1773 { .compatible
= "lenovo,thinkpad-t14s" },
1774 { .compatible
= "lenovo,thinkpad-x13s", },
1775 { .compatible
= "lenovo,yoga-slim7x" },
1776 { .compatible
= "microsoft,arcata", },
1777 { .compatible
= "microsoft,romulus13", },
1778 { .compatible
= "microsoft,romulus15", },
1779 { .compatible
= "qcom,sc8180x-primus" },
1780 { .compatible
= "qcom,x1e001de-devkit" },
1781 { .compatible
= "qcom,x1e80100-crd" },
1782 { .compatible
= "qcom,x1e80100-qcp" },
1786 static bool qcom_scm_qseecom_machine_is_allowed(void)
1788 struct device_node
*np
;
1791 np
= of_find_node_by_path("/");
1795 match
= of_match_node(qcom_scm_qseecom_allowlist
, np
);
1801 static void qcom_scm_qseecom_free(void *data
)
1803 struct platform_device
*qseecom_dev
= data
;
1805 platform_device_del(qseecom_dev
);
1806 platform_device_put(qseecom_dev
);
1809 static int qcom_scm_qseecom_init(struct qcom_scm
*scm
)
1811 struct platform_device
*qseecom_dev
;
1816 * Note: We do two steps of validation here: First, we try to query the
1817 * QSEECOM version as a check to see if the interface exists on this
1818 * device. Second, we check against known good devices due to current
1819 * driver limitations (see comment in qcom_scm_qseecom_allowlist).
1821 * Note that we deliberately do the machine check after the version
1822 * check so that we can log potentially supported devices. This should
1823 * be safe as downstream sources indicate that the version query is
1824 * neither blocking nor reentrant.
1826 ret
= qcom_scm_qseecom_get_version(&version
);
1830 dev_info(scm
->dev
, "qseecom: found qseecom with version 0x%x\n", version
);
1832 if (!qcom_scm_qseecom_machine_is_allowed()) {
1833 dev_info(scm
->dev
, "qseecom: untested machine, skipping\n");
1838 * Set up QSEECOM interface device. All application clients will be
1839 * set up and managed by the corresponding driver for it.
1841 qseecom_dev
= platform_device_alloc("qcom_qseecom", -1);
1845 qseecom_dev
->dev
.parent
= scm
->dev
;
1847 ret
= platform_device_add(qseecom_dev
);
1849 platform_device_put(qseecom_dev
);
1853 return devm_add_action_or_reset(scm
->dev
, qcom_scm_qseecom_free
, qseecom_dev
);
1856 #else /* CONFIG_QCOM_QSEECOM */
1858 static int qcom_scm_qseecom_init(struct qcom_scm
*scm
)
1863 #endif /* CONFIG_QCOM_QSEECOM */
1866 * qcom_scm_is_available() - Checks if SCM is available
1868 bool qcom_scm_is_available(void)
1870 return !!READ_ONCE(__scm
);
1872 EXPORT_SYMBOL_GPL(qcom_scm_is_available
);
1874 static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx
)
1876 /* FW currently only supports a single wq_ctx (zero).
1877 * TODO: Update this logic to include dynamic allocation and lookup of
1878 * completion structs when FW supports more wq_ctx values.
1881 dev_err(__scm
->dev
, "Firmware unexpectedly passed non-zero wq_ctx\n");
1888 int qcom_scm_wait_for_wq_completion(u32 wq_ctx
)
1892 ret
= qcom_scm_assert_valid_wq_ctx(wq_ctx
);
1896 wait_for_completion(&__scm
->waitq_comp
);
1901 static int qcom_scm_waitq_wakeup(unsigned int wq_ctx
)
1905 ret
= qcom_scm_assert_valid_wq_ctx(wq_ctx
);
1909 complete(&__scm
->waitq_comp
);
1914 static irqreturn_t
qcom_scm_irq_handler(int irq
, void *data
)
1917 struct qcom_scm
*scm
= data
;
1918 u32 wq_ctx
, flags
, more_pending
= 0;
1921 ret
= scm_get_wq_ctx(&wq_ctx
, &flags
, &more_pending
);
1923 dev_err(scm
->dev
, "GET_WQ_CTX SMC call failed: %d\n", ret
);
1927 if (flags
!= QCOM_SMC_WAITQ_FLAG_WAKE_ONE
) {
1928 dev_err(scm
->dev
, "Invalid flags received for wq_ctx: %u\n", flags
);
1932 ret
= qcom_scm_waitq_wakeup(wq_ctx
);
1935 } while (more_pending
);
1941 static int get_download_mode(char *buffer
, const struct kernel_param
*kp
)
1943 if (download_mode
>= ARRAY_SIZE(download_mode_name
))
1944 return sysfs_emit(buffer
, "unknown mode\n");
1946 return sysfs_emit(buffer
, "%s\n", download_mode_name
[download_mode
]);
1949 static int set_download_mode(const char *val
, const struct kernel_param
*kp
)
1954 ret
= sysfs_match_string(download_mode_name
, val
);
1956 ret
= kstrtobool(val
, &tmp
);
1958 pr_err("qcom_scm: err: %d\n", ret
);
1965 download_mode
= ret
;
1967 qcom_scm_set_download_mode(download_mode
);
1972 static const struct kernel_param_ops download_mode_param_ops
= {
1973 .get
= get_download_mode
,
1974 .set
= set_download_mode
,
1977 module_param_cb(download_mode
, &download_mode_param_ops
, NULL
, 0644);
1978 MODULE_PARM_DESC(download_mode
, "download mode: off/0/N for no dump mode, full/on/1/Y for full dump mode, mini for minidump mode and full,mini for both full and minidump mode together are acceptable values");
1980 static int qcom_scm_probe(struct platform_device
*pdev
)
1982 struct qcom_tzmem_pool_config pool_config
;
1983 struct qcom_scm
*scm
;
1986 scm
= devm_kzalloc(&pdev
->dev
, sizeof(*scm
), GFP_KERNEL
);
1990 scm
->dev
= &pdev
->dev
;
1991 ret
= qcom_scm_find_dload_address(&pdev
->dev
, &scm
->dload_mode_addr
);
1995 init_completion(&scm
->waitq_comp
);
1996 mutex_init(&scm
->scm_bw_lock
);
1998 scm
->path
= devm_of_icc_get(&pdev
->dev
, NULL
);
1999 if (IS_ERR(scm
->path
))
2000 return dev_err_probe(&pdev
->dev
, PTR_ERR(scm
->path
),
2001 "failed to acquire interconnect path\n");
2003 scm
->core_clk
= devm_clk_get_optional(&pdev
->dev
, "core");
2004 if (IS_ERR(scm
->core_clk
))
2005 return PTR_ERR(scm
->core_clk
);
2007 scm
->iface_clk
= devm_clk_get_optional(&pdev
->dev
, "iface");
2008 if (IS_ERR(scm
->iface_clk
))
2009 return PTR_ERR(scm
->iface_clk
);
2011 scm
->bus_clk
= devm_clk_get_optional(&pdev
->dev
, "bus");
2012 if (IS_ERR(scm
->bus_clk
))
2013 return PTR_ERR(scm
->bus_clk
);
2015 scm
->reset
.ops
= &qcom_scm_pas_reset_ops
;
2016 scm
->reset
.nr_resets
= 1;
2017 scm
->reset
.of_node
= pdev
->dev
.of_node
;
2018 ret
= devm_reset_controller_register(&pdev
->dev
, &scm
->reset
);
2022 /* vote for max clk rate for highest performance */
2023 ret
= clk_set_rate(scm
->core_clk
, INT_MAX
);
2027 /* Let all above stores be available after this */
2028 smp_store_release(&__scm
, scm
);
2030 irq
= platform_get_irq_optional(pdev
, 0);
2035 ret
= devm_request_threaded_irq(__scm
->dev
, irq
, NULL
, qcom_scm_irq_handler
,
2036 IRQF_ONESHOT
, "qcom-scm", __scm
);
2038 return dev_err_probe(scm
->dev
, ret
, "Failed to request qcom-scm irq\n");
2044 * If "download mode" is requested, from this point on warmboot
2045 * will cause the boot stages to enter download mode, unless
2046 * disabled below by a clean shutdown/reboot.
2048 qcom_scm_set_download_mode(download_mode
);
2051 * Disable SDI if indicated by DT that it is enabled by default.
2053 if (of_property_read_bool(pdev
->dev
.of_node
, "qcom,sdi-enabled") || !download_mode
)
2054 qcom_scm_disable_sdi();
2056 ret
= of_reserved_mem_device_init(__scm
->dev
);
2057 if (ret
&& ret
!= -ENODEV
)
2058 return dev_err_probe(__scm
->dev
, ret
,
2059 "Failed to setup the reserved memory region for TZ mem\n");
2061 ret
= qcom_tzmem_enable(__scm
->dev
);
2063 return dev_err_probe(__scm
->dev
, ret
,
2064 "Failed to enable the TrustZone memory allocator\n");
2066 memset(&pool_config
, 0, sizeof(pool_config
));
2067 pool_config
.initial_size
= 0;
2068 pool_config
.policy
= QCOM_TZMEM_POLICY_ON_DEMAND
;
2069 pool_config
.max_size
= SZ_256K
;
2071 __scm
->mempool
= devm_qcom_tzmem_pool_new(__scm
->dev
, &pool_config
);
2072 if (IS_ERR(__scm
->mempool
))
2073 return dev_err_probe(__scm
->dev
, PTR_ERR(__scm
->mempool
),
2074 "Failed to create the SCM memory pool\n");
2077 * Initialize the QSEECOM interface.
2079 * Note: QSEECOM is fairly self-contained and this only adds the
2080 * interface device (the driver of which does most of the heavy
2081 * lifting). So any errors returned here should be either -ENOMEM or
2082 * -EINVAL (with the latter only in case there's a bug in our code).
2083 * This means that there is no need to bring down the whole SCM driver.
2084 * Just log the error instead and let SCM live.
2086 ret
= qcom_scm_qseecom_init(scm
);
2087 WARN(ret
< 0, "failed to initialize qseecom: %d\n", ret
);
2092 static void qcom_scm_shutdown(struct platform_device
*pdev
)
2094 /* Clean shutdown, disable download mode to allow normal restart */
2095 qcom_scm_set_download_mode(QCOM_DLOAD_NODUMP
);
2098 static const struct of_device_id qcom_scm_dt_match
[] = {
2099 { .compatible
= "qcom,scm" },
2101 /* Legacy entries kept for backwards compatibility */
2102 { .compatible
= "qcom,scm-apq8064" },
2103 { .compatible
= "qcom,scm-apq8084" },
2104 { .compatible
= "qcom,scm-ipq4019" },
2105 { .compatible
= "qcom,scm-msm8953" },
2106 { .compatible
= "qcom,scm-msm8974" },
2107 { .compatible
= "qcom,scm-msm8996" },
2110 MODULE_DEVICE_TABLE(of
, qcom_scm_dt_match
);
2112 static struct platform_driver qcom_scm_driver
= {
2115 .of_match_table
= qcom_scm_dt_match
,
2116 .suppress_bind_attrs
= true,
2118 .probe
= qcom_scm_probe
,
2119 .shutdown
= qcom_scm_shutdown
,
2122 static int __init
qcom_scm_init(void)
2124 return platform_driver_register(&qcom_scm_driver
);
2126 subsys_initcall(qcom_scm_init
);
2128 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
2129 MODULE_LICENSE("GPL v2");