drm/tests: hdmi: Fix memory leaks in drm_display_mode_from_cea_vic()
[drm/drm-misc.git] / drivers / firmware / qcom / qcom_scm.c
blob10986cb11ec028e7e4c9551157f799042cfae1f7
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2015 Linaro Ltd.
4 */
6 #include <linux/arm-smccc.h>
7 #include <linux/bitfield.h>
8 #include <linux/bits.h>
9 #include <linux/cleanup.h>
10 #include <linux/clk.h>
11 #include <linux/completion.h>
12 #include <linux/cpumask.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/export.h>
16 #include <linux/firmware/qcom/qcom_scm.h>
17 #include <linux/firmware/qcom/qcom_tzmem.h>
18 #include <linux/init.h>
19 #include <linux/interconnect.h>
20 #include <linux/interrupt.h>
21 #include <linux/kstrtox.h>
22 #include <linux/module.h>
23 #include <linux/of.h>
24 #include <linux/of_address.h>
25 #include <linux/of_irq.h>
26 #include <linux/of_platform.h>
27 #include <linux/of_reserved_mem.h>
28 #include <linux/platform_device.h>
29 #include <linux/reset-controller.h>
30 #include <linux/sizes.h>
31 #include <linux/types.h>
33 #include "qcom_scm.h"
34 #include "qcom_tzmem.h"
36 static u32 download_mode;
38 struct qcom_scm {
39 struct device *dev;
40 struct clk *core_clk;
41 struct clk *iface_clk;
42 struct clk *bus_clk;
43 struct icc_path *path;
44 struct completion waitq_comp;
45 struct reset_controller_dev reset;
47 /* control access to the interconnect path */
48 struct mutex scm_bw_lock;
49 int scm_vote_count;
51 u64 dload_mode_addr;
53 struct qcom_tzmem_pool *mempool;
56 struct qcom_scm_current_perm_info {
57 __le32 vmid;
58 __le32 perm;
59 __le64 ctx;
60 __le32 ctx_size;
61 __le32 unused;
64 struct qcom_scm_mem_map_info {
65 __le64 mem_addr;
66 __le64 mem_size;
69 /**
70 * struct qcom_scm_qseecom_resp - QSEECOM SCM call response.
71 * @result: Result or status of the SCM call. See &enum qcom_scm_qseecom_result.
72 * @resp_type: Type of the response. See &enum qcom_scm_qseecom_resp_type.
73 * @data: Response data. The type of this data is given in @resp_type.
75 struct qcom_scm_qseecom_resp {
76 u64 result;
77 u64 resp_type;
78 u64 data;
81 enum qcom_scm_qseecom_result {
82 QSEECOM_RESULT_SUCCESS = 0,
83 QSEECOM_RESULT_INCOMPLETE = 1,
84 QSEECOM_RESULT_BLOCKED_ON_LISTENER = 2,
85 QSEECOM_RESULT_FAILURE = 0xFFFFFFFF,
88 enum qcom_scm_qseecom_resp_type {
89 QSEECOM_SCM_RES_APP_ID = 0xEE01,
90 QSEECOM_SCM_RES_QSEOS_LISTENER_ID = 0xEE02,
93 enum qcom_scm_qseecom_tz_owner {
94 QSEECOM_TZ_OWNER_SIP = 2,
95 QSEECOM_TZ_OWNER_TZ_APPS = 48,
96 QSEECOM_TZ_OWNER_QSEE_OS = 50
99 enum qcom_scm_qseecom_tz_svc {
100 QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER = 0,
101 QSEECOM_TZ_SVC_APP_MGR = 1,
102 QSEECOM_TZ_SVC_INFO = 6,
105 enum qcom_scm_qseecom_tz_cmd_app {
106 QSEECOM_TZ_CMD_APP_SEND = 1,
107 QSEECOM_TZ_CMD_APP_LOOKUP = 3,
110 enum qcom_scm_qseecom_tz_cmd_info {
111 QSEECOM_TZ_CMD_INFO_VERSION = 3,
114 #define QSEECOM_MAX_APP_NAME_SIZE 64
116 /* Each bit configures cold/warm boot address for one of the 4 CPUs */
117 static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
118 0, BIT(0), BIT(3), BIT(5)
120 static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
121 BIT(2), BIT(1), BIT(4), BIT(6)
124 #define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0)
126 #define QCOM_DLOAD_MASK GENMASK(5, 4)
127 #define QCOM_DLOAD_NODUMP 0
128 #define QCOM_DLOAD_FULLDUMP 1
129 #define QCOM_DLOAD_MINIDUMP 2
130 #define QCOM_DLOAD_BOTHDUMP 3
132 static const char * const qcom_scm_convention_names[] = {
133 [SMC_CONVENTION_UNKNOWN] = "unknown",
134 [SMC_CONVENTION_ARM_32] = "smc arm 32",
135 [SMC_CONVENTION_ARM_64] = "smc arm 64",
136 [SMC_CONVENTION_LEGACY] = "smc legacy",
139 static const char * const download_mode_name[] = {
140 [QCOM_DLOAD_NODUMP] = "off",
141 [QCOM_DLOAD_FULLDUMP] = "full",
142 [QCOM_DLOAD_MINIDUMP] = "mini",
143 [QCOM_DLOAD_BOTHDUMP] = "full,mini",
146 static struct qcom_scm *__scm;
148 static int qcom_scm_clk_enable(void)
150 int ret;
152 ret = clk_prepare_enable(__scm->core_clk);
153 if (ret)
154 goto bail;
156 ret = clk_prepare_enable(__scm->iface_clk);
157 if (ret)
158 goto disable_core;
160 ret = clk_prepare_enable(__scm->bus_clk);
161 if (ret)
162 goto disable_iface;
164 return 0;
166 disable_iface:
167 clk_disable_unprepare(__scm->iface_clk);
168 disable_core:
169 clk_disable_unprepare(__scm->core_clk);
170 bail:
171 return ret;
174 static void qcom_scm_clk_disable(void)
176 clk_disable_unprepare(__scm->core_clk);
177 clk_disable_unprepare(__scm->iface_clk);
178 clk_disable_unprepare(__scm->bus_clk);
181 static int qcom_scm_bw_enable(void)
183 int ret = 0;
185 if (!__scm->path)
186 return 0;
188 mutex_lock(&__scm->scm_bw_lock);
189 if (!__scm->scm_vote_count) {
190 ret = icc_set_bw(__scm->path, 0, UINT_MAX);
191 if (ret < 0) {
192 dev_err(__scm->dev, "failed to set bandwidth request\n");
193 goto err_bw;
196 __scm->scm_vote_count++;
197 err_bw:
198 mutex_unlock(&__scm->scm_bw_lock);
200 return ret;
203 static void qcom_scm_bw_disable(void)
205 if (!__scm->path)
206 return;
208 mutex_lock(&__scm->scm_bw_lock);
209 if (__scm->scm_vote_count-- == 1)
210 icc_set_bw(__scm->path, 0, 0);
211 mutex_unlock(&__scm->scm_bw_lock);
214 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
215 static DEFINE_SPINLOCK(scm_query_lock);
217 struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void)
219 return __scm->mempool;
222 static enum qcom_scm_convention __get_convention(void)
224 unsigned long flags;
225 struct qcom_scm_desc desc = {
226 .svc = QCOM_SCM_SVC_INFO,
227 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
228 .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
229 QCOM_SCM_INFO_IS_CALL_AVAIL) |
230 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
231 .arginfo = QCOM_SCM_ARGS(1),
232 .owner = ARM_SMCCC_OWNER_SIP,
234 struct qcom_scm_res res;
235 enum qcom_scm_convention probed_convention;
236 int ret;
237 bool forced = false;
239 if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
240 return qcom_scm_convention;
243 * Per the "SMC calling convention specification", the 64-bit calling
244 * convention can only be used when the client is 64-bit, otherwise
245 * system will encounter the undefined behaviour.
247 #if IS_ENABLED(CONFIG_ARM64)
249 * Device isn't required as there is only one argument - no device
250 * needed to dma_map_single to secure world
252 probed_convention = SMC_CONVENTION_ARM_64;
253 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
254 if (!ret && res.result[0] == 1)
255 goto found;
258 * Some SC7180 firmwares didn't implement the
259 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
260 * calling conventions on these firmwares. Luckily we don't make any
261 * early calls into the firmware on these SoCs so the device pointer
262 * will be valid here to check if the compatible matches.
264 if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
265 forced = true;
266 goto found;
268 #endif
270 probed_convention = SMC_CONVENTION_ARM_32;
271 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
272 if (!ret && res.result[0] == 1)
273 goto found;
275 probed_convention = SMC_CONVENTION_LEGACY;
276 found:
277 spin_lock_irqsave(&scm_query_lock, flags);
278 if (probed_convention != qcom_scm_convention) {
279 qcom_scm_convention = probed_convention;
280 pr_info("qcom_scm: convention: %s%s\n",
281 qcom_scm_convention_names[qcom_scm_convention],
282 forced ? " (forced)" : "");
284 spin_unlock_irqrestore(&scm_query_lock, flags);
286 return qcom_scm_convention;
290 * qcom_scm_call() - Invoke a syscall in the secure world
291 * @dev: device
292 * @desc: Descriptor structure containing arguments and return values
293 * @res: Structure containing results from SMC/HVC call
295 * Sends a command to the SCM and waits for the command to finish processing.
296 * This should *only* be called in pre-emptible context.
298 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
299 struct qcom_scm_res *res)
301 might_sleep();
302 switch (__get_convention()) {
303 case SMC_CONVENTION_ARM_32:
304 case SMC_CONVENTION_ARM_64:
305 return scm_smc_call(dev, desc, res, false);
306 case SMC_CONVENTION_LEGACY:
307 return scm_legacy_call(dev, desc, res);
308 default:
309 pr_err("Unknown current SCM calling convention.\n");
310 return -EINVAL;
315 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
316 * @dev: device
317 * @desc: Descriptor structure containing arguments and return values
318 * @res: Structure containing results from SMC/HVC call
320 * Sends a command to the SCM and waits for the command to finish processing.
321 * This can be called in atomic context.
323 static int qcom_scm_call_atomic(struct device *dev,
324 const struct qcom_scm_desc *desc,
325 struct qcom_scm_res *res)
327 switch (__get_convention()) {
328 case SMC_CONVENTION_ARM_32:
329 case SMC_CONVENTION_ARM_64:
330 return scm_smc_call(dev, desc, res, true);
331 case SMC_CONVENTION_LEGACY:
332 return scm_legacy_call_atomic(dev, desc, res);
333 default:
334 pr_err("Unknown current SCM calling convention.\n");
335 return -EINVAL;
339 static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
340 u32 cmd_id)
342 int ret;
343 struct qcom_scm_desc desc = {
344 .svc = QCOM_SCM_SVC_INFO,
345 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
346 .owner = ARM_SMCCC_OWNER_SIP,
348 struct qcom_scm_res res;
350 desc.arginfo = QCOM_SCM_ARGS(1);
351 switch (__get_convention()) {
352 case SMC_CONVENTION_ARM_32:
353 case SMC_CONVENTION_ARM_64:
354 desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
355 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
356 break;
357 case SMC_CONVENTION_LEGACY:
358 desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
359 break;
360 default:
361 pr_err("Unknown SMC convention being used\n");
362 return false;
365 ret = qcom_scm_call(dev, &desc, &res);
367 return ret ? false : !!res.result[0];
370 static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits)
372 int cpu;
373 unsigned int flags = 0;
374 struct qcom_scm_desc desc = {
375 .svc = QCOM_SCM_SVC_BOOT,
376 .cmd = QCOM_SCM_BOOT_SET_ADDR,
377 .arginfo = QCOM_SCM_ARGS(2),
378 .owner = ARM_SMCCC_OWNER_SIP,
381 for_each_present_cpu(cpu) {
382 if (cpu >= QCOM_SCM_BOOT_MAX_CPUS)
383 return -EINVAL;
384 flags |= cpu_bits[cpu];
387 desc.args[0] = flags;
388 desc.args[1] = virt_to_phys(entry);
390 return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
393 static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags)
395 struct qcom_scm_desc desc = {
396 .svc = QCOM_SCM_SVC_BOOT,
397 .cmd = QCOM_SCM_BOOT_SET_ADDR_MC,
398 .owner = ARM_SMCCC_OWNER_SIP,
399 .arginfo = QCOM_SCM_ARGS(6),
400 .args = {
401 virt_to_phys(entry),
402 /* Apply to all CPUs in all affinity levels */
403 ~0ULL, ~0ULL, ~0ULL, ~0ULL,
404 flags,
408 /* Need a device for DMA of the additional arguments */
409 if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY)
410 return -EOPNOTSUPP;
412 return qcom_scm_call(__scm->dev, &desc, NULL);
416 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus
417 * @entry: Entry point function for the cpus
419 * Set the Linux entry point for the SCM to transfer control to when coming
420 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
422 int qcom_scm_set_warm_boot_addr(void *entry)
424 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT))
425 /* Fallback to old SCM call */
426 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits);
427 return 0;
429 EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr);
432 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus
433 * @entry: Entry point function for the cpus
435 int qcom_scm_set_cold_boot_addr(void *entry)
437 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT))
438 /* Fallback to old SCM call */
439 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits);
440 return 0;
442 EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr);
445 * qcom_scm_cpu_power_down() - Power down the cpu
446 * @flags: Flags to flush cache
448 * This is an end point to power down cpu. If there was a pending interrupt,
449 * the control would return from this function, otherwise, the cpu jumps to the
450 * warm boot entry point set for this cpu upon reset.
452 void qcom_scm_cpu_power_down(u32 flags)
454 struct qcom_scm_desc desc = {
455 .svc = QCOM_SCM_SVC_BOOT,
456 .cmd = QCOM_SCM_BOOT_TERMINATE_PC,
457 .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
458 .arginfo = QCOM_SCM_ARGS(1),
459 .owner = ARM_SMCCC_OWNER_SIP,
462 qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
464 EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down);
466 int qcom_scm_set_remote_state(u32 state, u32 id)
468 struct qcom_scm_desc desc = {
469 .svc = QCOM_SCM_SVC_BOOT,
470 .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
471 .arginfo = QCOM_SCM_ARGS(2),
472 .args[0] = state,
473 .args[1] = id,
474 .owner = ARM_SMCCC_OWNER_SIP,
476 struct qcom_scm_res res;
477 int ret;
479 ret = qcom_scm_call(__scm->dev, &desc, &res);
481 return ret ? : res.result[0];
483 EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state);
485 static int qcom_scm_disable_sdi(void)
487 int ret;
488 struct qcom_scm_desc desc = {
489 .svc = QCOM_SCM_SVC_BOOT,
490 .cmd = QCOM_SCM_BOOT_SDI_CONFIG,
491 .args[0] = 1, /* Disable watchdog debug */
492 .args[1] = 0, /* Disable SDI */
493 .arginfo = QCOM_SCM_ARGS(2),
494 .owner = ARM_SMCCC_OWNER_SIP,
496 struct qcom_scm_res res;
498 ret = qcom_scm_clk_enable();
499 if (ret)
500 return ret;
501 ret = qcom_scm_call(__scm->dev, &desc, &res);
503 qcom_scm_clk_disable();
505 return ret ? : res.result[0];
508 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
510 struct qcom_scm_desc desc = {
511 .svc = QCOM_SCM_SVC_BOOT,
512 .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
513 .arginfo = QCOM_SCM_ARGS(2),
514 .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
515 .owner = ARM_SMCCC_OWNER_SIP,
518 desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
520 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
523 static int qcom_scm_io_rmw(phys_addr_t addr, unsigned int mask, unsigned int val)
525 unsigned int old;
526 unsigned int new;
527 int ret;
529 ret = qcom_scm_io_readl(addr, &old);
530 if (ret)
531 return ret;
533 new = (old & ~mask) | (val & mask);
535 return qcom_scm_io_writel(addr, new);
538 static void qcom_scm_set_download_mode(u32 dload_mode)
540 int ret = 0;
542 if (__scm->dload_mode_addr) {
543 ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK,
544 FIELD_PREP(QCOM_DLOAD_MASK, dload_mode));
545 } else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT,
546 QCOM_SCM_BOOT_SET_DLOAD_MODE)) {
547 ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode);
548 } else {
549 dev_err(__scm->dev,
550 "No available mechanism for setting download mode\n");
553 if (ret)
554 dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
558 * qcom_scm_pas_init_image() - Initialize peripheral authentication service
559 * state machine for a given peripheral, using the
560 * metadata
561 * @peripheral: peripheral id
562 * @metadata: pointer to memory containing ELF header, program header table
563 * and optional blob of data used for authenticating the metadata
564 * and the rest of the firmware
565 * @size: size of the metadata
566 * @ctx: optional metadata context
568 * Return: 0 on success.
570 * Upon successful return, the PAS metadata context (@ctx) will be used to
571 * track the metadata allocation, this needs to be released by invoking
572 * qcom_scm_pas_metadata_release() by the caller.
574 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
575 struct qcom_scm_pas_metadata *ctx)
577 dma_addr_t mdata_phys;
578 void *mdata_buf;
579 int ret;
580 struct qcom_scm_desc desc = {
581 .svc = QCOM_SCM_SVC_PIL,
582 .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
583 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
584 .args[0] = peripheral,
585 .owner = ARM_SMCCC_OWNER_SIP,
587 struct qcom_scm_res res;
590 * During the scm call memory protection will be enabled for the meta
591 * data blob, so make sure it's physically contiguous, 4K aligned and
592 * non-cachable to avoid XPU violations.
594 * For PIL calls the hypervisor creates SHM Bridges for the blob
595 * buffers on behalf of Linux so we must not do it ourselves hence
596 * not using the TZMem allocator here.
598 * If we pass a buffer that is already part of an SHM Bridge to this
599 * call, it will fail.
601 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
602 GFP_KERNEL);
603 if (!mdata_buf)
604 return -ENOMEM;
606 memcpy(mdata_buf, metadata, size);
608 ret = qcom_scm_clk_enable();
609 if (ret)
610 goto out;
612 ret = qcom_scm_bw_enable();
613 if (ret)
614 goto disable_clk;
616 desc.args[1] = mdata_phys;
618 ret = qcom_scm_call(__scm->dev, &desc, &res);
619 qcom_scm_bw_disable();
621 disable_clk:
622 qcom_scm_clk_disable();
624 out:
625 if (ret < 0 || !ctx) {
626 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
627 } else if (ctx) {
628 ctx->ptr = mdata_buf;
629 ctx->phys = mdata_phys;
630 ctx->size = size;
633 return ret ? : res.result[0];
635 EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image);
638 * qcom_scm_pas_metadata_release() - release metadata context
639 * @ctx: metadata context
641 void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx)
643 if (!ctx->ptr)
644 return;
646 dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys);
648 ctx->ptr = NULL;
649 ctx->phys = 0;
650 ctx->size = 0;
652 EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release);
655 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
656 * for firmware loading
657 * @peripheral: peripheral id
658 * @addr: start address of memory area to prepare
659 * @size: size of the memory area to prepare
661 * Returns 0 on success.
663 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
665 int ret;
666 struct qcom_scm_desc desc = {
667 .svc = QCOM_SCM_SVC_PIL,
668 .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
669 .arginfo = QCOM_SCM_ARGS(3),
670 .args[0] = peripheral,
671 .args[1] = addr,
672 .args[2] = size,
673 .owner = ARM_SMCCC_OWNER_SIP,
675 struct qcom_scm_res res;
677 ret = qcom_scm_clk_enable();
678 if (ret)
679 return ret;
681 ret = qcom_scm_bw_enable();
682 if (ret)
683 goto disable_clk;
685 ret = qcom_scm_call(__scm->dev, &desc, &res);
686 qcom_scm_bw_disable();
688 disable_clk:
689 qcom_scm_clk_disable();
691 return ret ? : res.result[0];
693 EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup);
696 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
697 * and reset the remote processor
698 * @peripheral: peripheral id
700 * Return 0 on success.
702 int qcom_scm_pas_auth_and_reset(u32 peripheral)
704 int ret;
705 struct qcom_scm_desc desc = {
706 .svc = QCOM_SCM_SVC_PIL,
707 .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
708 .arginfo = QCOM_SCM_ARGS(1),
709 .args[0] = peripheral,
710 .owner = ARM_SMCCC_OWNER_SIP,
712 struct qcom_scm_res res;
714 ret = qcom_scm_clk_enable();
715 if (ret)
716 return ret;
718 ret = qcom_scm_bw_enable();
719 if (ret)
720 goto disable_clk;
722 ret = qcom_scm_call(__scm->dev, &desc, &res);
723 qcom_scm_bw_disable();
725 disable_clk:
726 qcom_scm_clk_disable();
728 return ret ? : res.result[0];
730 EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset);
733 * qcom_scm_pas_shutdown() - Shut down the remote processor
734 * @peripheral: peripheral id
736 * Returns 0 on success.
738 int qcom_scm_pas_shutdown(u32 peripheral)
740 int ret;
741 struct qcom_scm_desc desc = {
742 .svc = QCOM_SCM_SVC_PIL,
743 .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
744 .arginfo = QCOM_SCM_ARGS(1),
745 .args[0] = peripheral,
746 .owner = ARM_SMCCC_OWNER_SIP,
748 struct qcom_scm_res res;
750 ret = qcom_scm_clk_enable();
751 if (ret)
752 return ret;
754 ret = qcom_scm_bw_enable();
755 if (ret)
756 goto disable_clk;
758 ret = qcom_scm_call(__scm->dev, &desc, &res);
759 qcom_scm_bw_disable();
761 disable_clk:
762 qcom_scm_clk_disable();
764 return ret ? : res.result[0];
766 EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown);
769 * qcom_scm_pas_supported() - Check if the peripheral authentication service is
770 * available for the given peripherial
771 * @peripheral: peripheral id
773 * Returns true if PAS is supported for this peripheral, otherwise false.
775 bool qcom_scm_pas_supported(u32 peripheral)
777 int ret;
778 struct qcom_scm_desc desc = {
779 .svc = QCOM_SCM_SVC_PIL,
780 .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
781 .arginfo = QCOM_SCM_ARGS(1),
782 .args[0] = peripheral,
783 .owner = ARM_SMCCC_OWNER_SIP,
785 struct qcom_scm_res res;
787 if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
788 QCOM_SCM_PIL_PAS_IS_SUPPORTED))
789 return false;
791 ret = qcom_scm_call(__scm->dev, &desc, &res);
793 return ret ? false : !!res.result[0];
795 EXPORT_SYMBOL_GPL(qcom_scm_pas_supported);
797 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
799 struct qcom_scm_desc desc = {
800 .svc = QCOM_SCM_SVC_PIL,
801 .cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
802 .arginfo = QCOM_SCM_ARGS(2),
803 .args[0] = reset,
804 .args[1] = 0,
805 .owner = ARM_SMCCC_OWNER_SIP,
807 struct qcom_scm_res res;
808 int ret;
810 ret = qcom_scm_call(__scm->dev, &desc, &res);
812 return ret ? : res.result[0];
815 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
816 unsigned long idx)
818 if (idx != 0)
819 return -EINVAL;
821 return __qcom_scm_pas_mss_reset(__scm->dev, 1);
824 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
825 unsigned long idx)
827 if (idx != 0)
828 return -EINVAL;
830 return __qcom_scm_pas_mss_reset(__scm->dev, 0);
833 static const struct reset_control_ops qcom_scm_pas_reset_ops = {
834 .assert = qcom_scm_pas_reset_assert,
835 .deassert = qcom_scm_pas_reset_deassert,
838 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
840 struct qcom_scm_desc desc = {
841 .svc = QCOM_SCM_SVC_IO,
842 .cmd = QCOM_SCM_IO_READ,
843 .arginfo = QCOM_SCM_ARGS(1),
844 .args[0] = addr,
845 .owner = ARM_SMCCC_OWNER_SIP,
847 struct qcom_scm_res res;
848 int ret;
851 ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
852 if (ret >= 0)
853 *val = res.result[0];
855 return ret < 0 ? ret : 0;
857 EXPORT_SYMBOL_GPL(qcom_scm_io_readl);
859 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
861 struct qcom_scm_desc desc = {
862 .svc = QCOM_SCM_SVC_IO,
863 .cmd = QCOM_SCM_IO_WRITE,
864 .arginfo = QCOM_SCM_ARGS(2),
865 .args[0] = addr,
866 .args[1] = val,
867 .owner = ARM_SMCCC_OWNER_SIP,
870 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
872 EXPORT_SYMBOL_GPL(qcom_scm_io_writel);
875 * qcom_scm_restore_sec_cfg_available() - Check if secure environment
876 * supports restore security config interface.
878 * Return true if restore-cfg interface is supported, false if not.
880 bool qcom_scm_restore_sec_cfg_available(void)
882 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
883 QCOM_SCM_MP_RESTORE_SEC_CFG);
885 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available);
887 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
889 struct qcom_scm_desc desc = {
890 .svc = QCOM_SCM_SVC_MP,
891 .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
892 .arginfo = QCOM_SCM_ARGS(2),
893 .args[0] = device_id,
894 .args[1] = spare,
895 .owner = ARM_SMCCC_OWNER_SIP,
897 struct qcom_scm_res res;
898 int ret;
900 ret = qcom_scm_call(__scm->dev, &desc, &res);
902 return ret ? : res.result[0];
904 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg);
906 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
908 struct qcom_scm_desc desc = {
909 .svc = QCOM_SCM_SVC_MP,
910 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
911 .arginfo = QCOM_SCM_ARGS(1),
912 .args[0] = spare,
913 .owner = ARM_SMCCC_OWNER_SIP,
915 struct qcom_scm_res res;
916 int ret;
918 ret = qcom_scm_call(__scm->dev, &desc, &res);
920 if (size)
921 *size = res.result[0];
923 return ret ? : res.result[1];
925 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size);
927 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
929 struct qcom_scm_desc desc = {
930 .svc = QCOM_SCM_SVC_MP,
931 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
932 .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
933 QCOM_SCM_VAL),
934 .args[0] = addr,
935 .args[1] = size,
936 .args[2] = spare,
937 .owner = ARM_SMCCC_OWNER_SIP,
939 int ret;
941 ret = qcom_scm_call(__scm->dev, &desc, NULL);
943 /* the pg table has been initialized already, ignore the error */
944 if (ret == -EPERM)
945 ret = 0;
947 return ret;
949 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init);
951 int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size)
953 struct qcom_scm_desc desc = {
954 .svc = QCOM_SCM_SVC_MP,
955 .cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE,
956 .arginfo = QCOM_SCM_ARGS(2),
957 .args[0] = size,
958 .args[1] = spare,
959 .owner = ARM_SMCCC_OWNER_SIP,
962 return qcom_scm_call(__scm->dev, &desc, NULL);
964 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size);
966 int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
967 u32 cp_nonpixel_start,
968 u32 cp_nonpixel_size)
970 int ret;
971 struct qcom_scm_desc desc = {
972 .svc = QCOM_SCM_SVC_MP,
973 .cmd = QCOM_SCM_MP_VIDEO_VAR,
974 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
975 QCOM_SCM_VAL, QCOM_SCM_VAL),
976 .args[0] = cp_start,
977 .args[1] = cp_size,
978 .args[2] = cp_nonpixel_start,
979 .args[3] = cp_nonpixel_size,
980 .owner = ARM_SMCCC_OWNER_SIP,
982 struct qcom_scm_res res;
984 ret = qcom_scm_call(__scm->dev, &desc, &res);
986 return ret ? : res.result[0];
988 EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var);
990 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
991 size_t mem_sz, phys_addr_t src, size_t src_sz,
992 phys_addr_t dest, size_t dest_sz)
994 int ret;
995 struct qcom_scm_desc desc = {
996 .svc = QCOM_SCM_SVC_MP,
997 .cmd = QCOM_SCM_MP_ASSIGN,
998 .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
999 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
1000 QCOM_SCM_VAL, QCOM_SCM_VAL),
1001 .args[0] = mem_region,
1002 .args[1] = mem_sz,
1003 .args[2] = src,
1004 .args[3] = src_sz,
1005 .args[4] = dest,
1006 .args[5] = dest_sz,
1007 .args[6] = 0,
1008 .owner = ARM_SMCCC_OWNER_SIP,
1010 struct qcom_scm_res res;
1012 ret = qcom_scm_call(dev, &desc, &res);
1014 return ret ? : res.result[0];
1018 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
1019 * @mem_addr: mem region whose ownership need to be reassigned
1020 * @mem_sz: size of the region.
1021 * @srcvm: vmid for current set of owners, each set bit in
1022 * flag indicate a unique owner
1023 * @newvm: array having new owners and corresponding permission
1024 * flags
1025 * @dest_cnt: number of owners in next set.
1027 * Return negative errno on failure or 0 on success with @srcvm updated.
1029 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
1030 u64 *srcvm,
1031 const struct qcom_scm_vmperm *newvm,
1032 unsigned int dest_cnt)
1034 struct qcom_scm_current_perm_info *destvm;
1035 struct qcom_scm_mem_map_info *mem_to_map;
1036 phys_addr_t mem_to_map_phys;
1037 phys_addr_t dest_phys;
1038 phys_addr_t ptr_phys;
1039 size_t mem_to_map_sz;
1040 size_t dest_sz;
1041 size_t src_sz;
1042 size_t ptr_sz;
1043 int next_vm;
1044 __le32 *src;
1045 int ret, i, b;
1046 u64 srcvm_bits = *srcvm;
1048 src_sz = hweight64(srcvm_bits) * sizeof(*src);
1049 mem_to_map_sz = sizeof(*mem_to_map);
1050 dest_sz = dest_cnt * sizeof(*destvm);
1051 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
1052 ALIGN(dest_sz, SZ_64);
1054 void *ptr __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1055 ptr_sz, GFP_KERNEL);
1056 if (!ptr)
1057 return -ENOMEM;
1059 ptr_phys = qcom_tzmem_to_phys(ptr);
1061 /* Fill source vmid detail */
1062 src = ptr;
1063 i = 0;
1064 for (b = 0; b < BITS_PER_TYPE(u64); b++) {
1065 if (srcvm_bits & BIT(b))
1066 src[i++] = cpu_to_le32(b);
1069 /* Fill details of mem buff to map */
1070 mem_to_map = ptr + ALIGN(src_sz, SZ_64);
1071 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
1072 mem_to_map->mem_addr = cpu_to_le64(mem_addr);
1073 mem_to_map->mem_size = cpu_to_le64(mem_sz);
1075 next_vm = 0;
1076 /* Fill details of next vmid detail */
1077 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1078 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1079 for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
1080 destvm->vmid = cpu_to_le32(newvm->vmid);
1081 destvm->perm = cpu_to_le32(newvm->perm);
1082 destvm->ctx = 0;
1083 destvm->ctx_size = 0;
1084 next_vm |= BIT(newvm->vmid);
1087 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
1088 ptr_phys, src_sz, dest_phys, dest_sz);
1089 if (ret) {
1090 dev_err(__scm->dev,
1091 "Assign memory protection call failed %d\n", ret);
1092 return -EINVAL;
1095 *srcvm = next_vm;
1096 return 0;
1098 EXPORT_SYMBOL_GPL(qcom_scm_assign_mem);
1101 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
1103 bool qcom_scm_ocmem_lock_available(void)
1105 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
1106 QCOM_SCM_OCMEM_LOCK_CMD);
1108 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available);
1111 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
1112 * region to the specified initiator
1114 * @id: tz initiator id
1115 * @offset: OCMEM offset
1116 * @size: OCMEM size
1117 * @mode: access mode (WIDE/NARROW)
1119 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
1120 u32 mode)
1122 struct qcom_scm_desc desc = {
1123 .svc = QCOM_SCM_SVC_OCMEM,
1124 .cmd = QCOM_SCM_OCMEM_LOCK_CMD,
1125 .args[0] = id,
1126 .args[1] = offset,
1127 .args[2] = size,
1128 .args[3] = mode,
1129 .arginfo = QCOM_SCM_ARGS(4),
1132 return qcom_scm_call(__scm->dev, &desc, NULL);
1134 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock);
1137 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
1138 * region from the specified initiator
1140 * @id: tz initiator id
1141 * @offset: OCMEM offset
1142 * @size: OCMEM size
1144 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
1146 struct qcom_scm_desc desc = {
1147 .svc = QCOM_SCM_SVC_OCMEM,
1148 .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
1149 .args[0] = id,
1150 .args[1] = offset,
1151 .args[2] = size,
1152 .arginfo = QCOM_SCM_ARGS(3),
1155 return qcom_scm_call(__scm->dev, &desc, NULL);
1157 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock);
1160 * qcom_scm_ice_available() - Is the ICE key programming interface available?
1162 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
1163 * qcom_scm_ice_set_key() are available.
1165 bool qcom_scm_ice_available(void)
1167 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1168 QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
1169 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1170 QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
1172 EXPORT_SYMBOL_GPL(qcom_scm_ice_available);
1175 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
1176 * @index: the keyslot to invalidate
1178 * The UFSHCI and eMMC standards define a standard way to do this, but it
1179 * doesn't work on these SoCs; only this SCM call does.
1181 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1182 * call doesn't specify which ICE instance the keyslot belongs to.
1184 * Return: 0 on success; -errno on failure.
1186 int qcom_scm_ice_invalidate_key(u32 index)
1188 struct qcom_scm_desc desc = {
1189 .svc = QCOM_SCM_SVC_ES,
1190 .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
1191 .arginfo = QCOM_SCM_ARGS(1),
1192 .args[0] = index,
1193 .owner = ARM_SMCCC_OWNER_SIP,
1196 return qcom_scm_call(__scm->dev, &desc, NULL);
1198 EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key);
1201 * qcom_scm_ice_set_key() - Set an inline encryption key
1202 * @index: the keyslot into which to set the key
1203 * @key: the key to program
1204 * @key_size: the size of the key in bytes
1205 * @cipher: the encryption algorithm the key is for
1206 * @data_unit_size: the encryption data unit size, i.e. the size of each
1207 * individual plaintext and ciphertext. Given in 512-byte
1208 * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1210 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
1211 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
1213 * The UFSHCI and eMMC standards define a standard way to do this, but it
1214 * doesn't work on these SoCs; only this SCM call does.
1216 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1217 * call doesn't specify which ICE instance the keyslot belongs to.
1219 * Return: 0 on success; -errno on failure.
1221 int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1222 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1224 struct qcom_scm_desc desc = {
1225 .svc = QCOM_SCM_SVC_ES,
1226 .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1227 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1228 QCOM_SCM_VAL, QCOM_SCM_VAL,
1229 QCOM_SCM_VAL),
1230 .args[0] = index,
1231 .args[2] = key_size,
1232 .args[3] = cipher,
1233 .args[4] = data_unit_size,
1234 .owner = ARM_SMCCC_OWNER_SIP,
1237 int ret;
1239 void *keybuf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1240 key_size,
1241 GFP_KERNEL);
1242 if (!keybuf)
1243 return -ENOMEM;
1244 memcpy(keybuf, key, key_size);
1245 desc.args[1] = qcom_tzmem_to_phys(keybuf);
1247 ret = qcom_scm_call(__scm->dev, &desc, NULL);
1249 memzero_explicit(keybuf, key_size);
1251 return ret;
1253 EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
1256 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1258 * Return true if HDCP is supported, false if not.
1260 bool qcom_scm_hdcp_available(void)
1262 bool avail;
1263 int ret = qcom_scm_clk_enable();
1265 if (ret)
1266 return ret;
1268 avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1269 QCOM_SCM_HDCP_INVOKE);
1271 qcom_scm_clk_disable();
1273 return avail;
1275 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available);
1278 * qcom_scm_hdcp_req() - Send HDCP request.
1279 * @req: HDCP request array
1280 * @req_cnt: HDCP request array count
1281 * @resp: response buffer passed to SCM
1283 * Write HDCP register(s) through SCM.
1285 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1287 int ret;
1288 struct qcom_scm_desc desc = {
1289 .svc = QCOM_SCM_SVC_HDCP,
1290 .cmd = QCOM_SCM_HDCP_INVOKE,
1291 .arginfo = QCOM_SCM_ARGS(10),
1292 .args = {
1293 req[0].addr,
1294 req[0].val,
1295 req[1].addr,
1296 req[1].val,
1297 req[2].addr,
1298 req[2].val,
1299 req[3].addr,
1300 req[3].val,
1301 req[4].addr,
1302 req[4].val
1304 .owner = ARM_SMCCC_OWNER_SIP,
1306 struct qcom_scm_res res;
1308 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1309 return -ERANGE;
1311 ret = qcom_scm_clk_enable();
1312 if (ret)
1313 return ret;
1315 ret = qcom_scm_call(__scm->dev, &desc, &res);
1316 *resp = res.result[0];
1318 qcom_scm_clk_disable();
1320 return ret;
1322 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req);
1324 int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt)
1326 struct qcom_scm_desc desc = {
1327 .svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1328 .cmd = QCOM_SCM_SMMU_PT_FORMAT,
1329 .arginfo = QCOM_SCM_ARGS(3),
1330 .args[0] = sec_id,
1331 .args[1] = ctx_num,
1332 .args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */
1333 .owner = ARM_SMCCC_OWNER_SIP,
1336 return qcom_scm_call(__scm->dev, &desc, NULL);
1338 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format);
1340 int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1342 struct qcom_scm_desc desc = {
1343 .svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1344 .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1345 .arginfo = QCOM_SCM_ARGS(2),
1346 .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1347 .args[1] = en,
1348 .owner = ARM_SMCCC_OWNER_SIP,
1352 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1354 EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle);
1356 bool qcom_scm_lmh_dcvsh_available(void)
1358 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
1360 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
1362 int qcom_scm_shm_bridge_enable(void)
1364 struct qcom_scm_desc desc = {
1365 .svc = QCOM_SCM_SVC_MP,
1366 .cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE,
1367 .owner = ARM_SMCCC_OWNER_SIP
1370 struct qcom_scm_res res;
1372 if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
1373 QCOM_SCM_MP_SHM_BRIDGE_ENABLE))
1374 return -EOPNOTSUPP;
1376 return qcom_scm_call(__scm->dev, &desc, &res) ?: res.result[0];
1378 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable);
1380 int qcom_scm_shm_bridge_create(struct device *dev, u64 pfn_and_ns_perm_flags,
1381 u64 ipfn_and_s_perm_flags, u64 size_and_flags,
1382 u64 ns_vmids, u64 *handle)
1384 struct qcom_scm_desc desc = {
1385 .svc = QCOM_SCM_SVC_MP,
1386 .cmd = QCOM_SCM_MP_SHM_BRIDGE_CREATE,
1387 .owner = ARM_SMCCC_OWNER_SIP,
1388 .args[0] = pfn_and_ns_perm_flags,
1389 .args[1] = ipfn_and_s_perm_flags,
1390 .args[2] = size_and_flags,
1391 .args[3] = ns_vmids,
1392 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
1393 QCOM_SCM_VAL, QCOM_SCM_VAL),
1396 struct qcom_scm_res res;
1397 int ret;
1399 ret = qcom_scm_call(__scm->dev, &desc, &res);
1401 if (handle && !ret)
1402 *handle = res.result[1];
1404 return ret ?: res.result[0];
1406 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create);
1408 int qcom_scm_shm_bridge_delete(struct device *dev, u64 handle)
1410 struct qcom_scm_desc desc = {
1411 .svc = QCOM_SCM_SVC_MP,
1412 .cmd = QCOM_SCM_MP_SHM_BRIDGE_DELETE,
1413 .owner = ARM_SMCCC_OWNER_SIP,
1414 .args[0] = handle,
1415 .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1418 return qcom_scm_call(__scm->dev, &desc, NULL);
1420 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_delete);
1422 int qcom_scm_lmh_profile_change(u32 profile_id)
1424 struct qcom_scm_desc desc = {
1425 .svc = QCOM_SCM_SVC_LMH,
1426 .cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
1427 .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1428 .args[0] = profile_id,
1429 .owner = ARM_SMCCC_OWNER_SIP,
1432 return qcom_scm_call(__scm->dev, &desc, NULL);
1434 EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change);
1436 int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
1437 u64 limit_node, u32 node_id, u64 version)
1439 int ret, payload_size = 5 * sizeof(u32);
1441 struct qcom_scm_desc desc = {
1442 .svc = QCOM_SCM_SVC_LMH,
1443 .cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
1444 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
1445 QCOM_SCM_VAL, QCOM_SCM_VAL),
1446 .args[1] = payload_size,
1447 .args[2] = limit_node,
1448 .args[3] = node_id,
1449 .args[4] = version,
1450 .owner = ARM_SMCCC_OWNER_SIP,
1453 u32 *payload_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1454 payload_size,
1455 GFP_KERNEL);
1456 if (!payload_buf)
1457 return -ENOMEM;
1459 payload_buf[0] = payload_fn;
1460 payload_buf[1] = 0;
1461 payload_buf[2] = payload_reg;
1462 payload_buf[3] = 1;
1463 payload_buf[4] = payload_val;
1465 desc.args[0] = qcom_tzmem_to_phys(payload_buf);
1467 ret = qcom_scm_call(__scm->dev, &desc, NULL);
1469 return ret;
1471 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh);
1473 int qcom_scm_gpu_init_regs(u32 gpu_req)
1475 struct qcom_scm_desc desc = {
1476 .svc = QCOM_SCM_SVC_GPU,
1477 .cmd = QCOM_SCM_SVC_GPU_INIT_REGS,
1478 .arginfo = QCOM_SCM_ARGS(1),
1479 .args[0] = gpu_req,
1480 .owner = ARM_SMCCC_OWNER_SIP,
1483 return qcom_scm_call(__scm->dev, &desc, NULL);
1485 EXPORT_SYMBOL_GPL(qcom_scm_gpu_init_regs);
1487 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1489 struct device_node *tcsr;
1490 struct device_node *np = dev->of_node;
1491 struct resource res;
1492 u32 offset;
1493 int ret;
1495 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1496 if (!tcsr)
1497 return 0;
1499 ret = of_address_to_resource(tcsr, 0, &res);
1500 of_node_put(tcsr);
1501 if (ret)
1502 return ret;
1504 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1505 if (ret < 0)
1506 return ret;
1508 *addr = res.start + offset;
1510 return 0;
1513 #ifdef CONFIG_QCOM_QSEECOM
1515 /* Lock for QSEECOM SCM call executions */
1516 static DEFINE_MUTEX(qcom_scm_qseecom_call_lock);
1518 static int __qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1519 struct qcom_scm_qseecom_resp *res)
1521 struct qcom_scm_res scm_res = {};
1522 int status;
1525 * QSEECOM SCM calls should not be executed concurrently. Therefore, we
1526 * require the respective call lock to be held.
1528 lockdep_assert_held(&qcom_scm_qseecom_call_lock);
1530 status = qcom_scm_call(__scm->dev, desc, &scm_res);
1532 res->result = scm_res.result[0];
1533 res->resp_type = scm_res.result[1];
1534 res->data = scm_res.result[2];
1536 if (status)
1537 return status;
1539 return 0;
1543 * qcom_scm_qseecom_call() - Perform a QSEECOM SCM call.
1544 * @desc: SCM call descriptor.
1545 * @res: SCM call response (output).
1547 * Performs the QSEECOM SCM call described by @desc, returning the response in
1548 * @rsp.
1550 * Return: Zero on success, nonzero on failure.
1552 static int qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1553 struct qcom_scm_qseecom_resp *res)
1555 int status;
1558 * Note: Multiple QSEECOM SCM calls should not be executed same time,
1559 * so lock things here. This needs to be extended to callback/listener
1560 * handling when support for that is implemented.
1563 mutex_lock(&qcom_scm_qseecom_call_lock);
1564 status = __qcom_scm_qseecom_call(desc, res);
1565 mutex_unlock(&qcom_scm_qseecom_call_lock);
1567 dev_dbg(__scm->dev, "%s: owner=%x, svc=%x, cmd=%x, result=%lld, type=%llx, data=%llx\n",
1568 __func__, desc->owner, desc->svc, desc->cmd, res->result,
1569 res->resp_type, res->data);
1571 if (status) {
1572 dev_err(__scm->dev, "qseecom: scm call failed with error %d\n", status);
1573 return status;
1577 * TODO: Handle incomplete and blocked calls:
1579 * Incomplete and blocked calls are not supported yet. Some devices
1580 * and/or commands require those, some don't. Let's warn about them
1581 * prominently in case someone attempts to try these commands with a
1582 * device/command combination that isn't supported yet.
1584 WARN_ON(res->result == QSEECOM_RESULT_INCOMPLETE);
1585 WARN_ON(res->result == QSEECOM_RESULT_BLOCKED_ON_LISTENER);
1587 return 0;
1591 * qcom_scm_qseecom_get_version() - Query the QSEECOM version.
1592 * @version: Pointer where the QSEECOM version will be stored.
1594 * Performs the QSEECOM SCM querying the QSEECOM version currently running in
1595 * the TrustZone.
1597 * Return: Zero on success, nonzero on failure.
1599 static int qcom_scm_qseecom_get_version(u32 *version)
1601 struct qcom_scm_desc desc = {};
1602 struct qcom_scm_qseecom_resp res = {};
1603 u32 feature = 10;
1604 int ret;
1606 desc.owner = QSEECOM_TZ_OWNER_SIP;
1607 desc.svc = QSEECOM_TZ_SVC_INFO;
1608 desc.cmd = QSEECOM_TZ_CMD_INFO_VERSION;
1609 desc.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL);
1610 desc.args[0] = feature;
1612 ret = qcom_scm_qseecom_call(&desc, &res);
1613 if (ret)
1614 return ret;
1616 *version = res.result;
1617 return 0;
1621 * qcom_scm_qseecom_app_get_id() - Query the app ID for a given QSEE app name.
1622 * @app_name: The name of the app.
1623 * @app_id: The returned app ID.
1625 * Query and return the application ID of the SEE app identified by the given
1626 * name. This returned ID is the unique identifier of the app required for
1627 * subsequent communication.
1629 * Return: Zero on success, nonzero on failure, -ENOENT if the app has not been
1630 * loaded or could not be found.
1632 int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
1634 unsigned long name_buf_size = QSEECOM_MAX_APP_NAME_SIZE;
1635 unsigned long app_name_len = strlen(app_name);
1636 struct qcom_scm_desc desc = {};
1637 struct qcom_scm_qseecom_resp res = {};
1638 int status;
1640 if (app_name_len >= name_buf_size)
1641 return -EINVAL;
1643 char *name_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1644 name_buf_size,
1645 GFP_KERNEL);
1646 if (!name_buf)
1647 return -ENOMEM;
1649 memcpy(name_buf, app_name, app_name_len);
1651 desc.owner = QSEECOM_TZ_OWNER_QSEE_OS;
1652 desc.svc = QSEECOM_TZ_SVC_APP_MGR;
1653 desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP;
1654 desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL);
1655 desc.args[0] = qcom_tzmem_to_phys(name_buf);
1656 desc.args[1] = app_name_len;
1658 status = qcom_scm_qseecom_call(&desc, &res);
1660 if (status)
1661 return status;
1663 if (res.result == QSEECOM_RESULT_FAILURE)
1664 return -ENOENT;
1666 if (res.result != QSEECOM_RESULT_SUCCESS)
1667 return -EINVAL;
1669 if (res.resp_type != QSEECOM_SCM_RES_APP_ID)
1670 return -EINVAL;
1672 *app_id = res.data;
1673 return 0;
1675 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
1678 * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app.
1679 * @app_id: The ID of the target app.
1680 * @req: Request buffer sent to the app (must be TZ memory)
1681 * @req_size: Size of the request buffer.
1682 * @rsp: Response buffer, written to by the app (must be TZ memory)
1683 * @rsp_size: Size of the response buffer.
1685 * Sends a request to the QSEE app associated with the given ID and read back
1686 * its response. The caller must provide two DMA memory regions, one for the
1687 * request and one for the response, and fill out the @req region with the
1688 * respective (app-specific) request data. The QSEE app reads this and returns
1689 * its response in the @rsp region.
1691 * Return: Zero on success, nonzero on failure.
1693 int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size,
1694 void *rsp, size_t rsp_size)
1696 struct qcom_scm_qseecom_resp res = {};
1697 struct qcom_scm_desc desc = {};
1698 phys_addr_t req_phys;
1699 phys_addr_t rsp_phys;
1700 int status;
1702 req_phys = qcom_tzmem_to_phys(req);
1703 rsp_phys = qcom_tzmem_to_phys(rsp);
1705 desc.owner = QSEECOM_TZ_OWNER_TZ_APPS;
1706 desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER;
1707 desc.cmd = QSEECOM_TZ_CMD_APP_SEND;
1708 desc.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL,
1709 QCOM_SCM_RW, QCOM_SCM_VAL,
1710 QCOM_SCM_RW, QCOM_SCM_VAL);
1711 desc.args[0] = app_id;
1712 desc.args[1] = req_phys;
1713 desc.args[2] = req_size;
1714 desc.args[3] = rsp_phys;
1715 desc.args[4] = rsp_size;
1717 status = qcom_scm_qseecom_call(&desc, &res);
1719 if (status)
1720 return status;
1722 if (res.result != QSEECOM_RESULT_SUCCESS)
1723 return -EIO;
1725 return 0;
1727 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
1730 * We do not yet support re-entrant calls via the qseecom interface. To prevent
1731 + any potential issues with this, only allow validated machines for now.
1733 static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
1734 { .compatible = "lenovo,flex-5g" },
1735 { .compatible = "lenovo,thinkpad-t14s" },
1736 { .compatible = "lenovo,thinkpad-x13s", },
1737 { .compatible = "microsoft,romulus13", },
1738 { .compatible = "microsoft,romulus15", },
1739 { .compatible = "qcom,sc8180x-primus" },
1740 { .compatible = "qcom,x1e80100-crd" },
1741 { .compatible = "qcom,x1e80100-qcp" },
1745 static bool qcom_scm_qseecom_machine_is_allowed(void)
1747 struct device_node *np;
1748 bool match;
1750 np = of_find_node_by_path("/");
1751 if (!np)
1752 return false;
1754 match = of_match_node(qcom_scm_qseecom_allowlist, np);
1755 of_node_put(np);
1757 return match;
1760 static void qcom_scm_qseecom_free(void *data)
1762 struct platform_device *qseecom_dev = data;
1764 platform_device_del(qseecom_dev);
1765 platform_device_put(qseecom_dev);
1768 static int qcom_scm_qseecom_init(struct qcom_scm *scm)
1770 struct platform_device *qseecom_dev;
1771 u32 version;
1772 int ret;
1775 * Note: We do two steps of validation here: First, we try to query the
1776 * QSEECOM version as a check to see if the interface exists on this
1777 * device. Second, we check against known good devices due to current
1778 * driver limitations (see comment in qcom_scm_qseecom_allowlist).
1780 * Note that we deliberately do the machine check after the version
1781 * check so that we can log potentially supported devices. This should
1782 * be safe as downstream sources indicate that the version query is
1783 * neither blocking nor reentrant.
1785 ret = qcom_scm_qseecom_get_version(&version);
1786 if (ret)
1787 return 0;
1789 dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version);
1791 if (!qcom_scm_qseecom_machine_is_allowed()) {
1792 dev_info(scm->dev, "qseecom: untested machine, skipping\n");
1793 return 0;
1797 * Set up QSEECOM interface device. All application clients will be
1798 * set up and managed by the corresponding driver for it.
1800 qseecom_dev = platform_device_alloc("qcom_qseecom", -1);
1801 if (!qseecom_dev)
1802 return -ENOMEM;
1804 qseecom_dev->dev.parent = scm->dev;
1806 ret = platform_device_add(qseecom_dev);
1807 if (ret) {
1808 platform_device_put(qseecom_dev);
1809 return ret;
1812 return devm_add_action_or_reset(scm->dev, qcom_scm_qseecom_free, qseecom_dev);
1815 #else /* CONFIG_QCOM_QSEECOM */
1817 static int qcom_scm_qseecom_init(struct qcom_scm *scm)
1819 return 0;
1822 #endif /* CONFIG_QCOM_QSEECOM */
1825 * qcom_scm_is_available() - Checks if SCM is available
1827 bool qcom_scm_is_available(void)
1829 return !!READ_ONCE(__scm);
1831 EXPORT_SYMBOL_GPL(qcom_scm_is_available);
1833 static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)
1835 /* FW currently only supports a single wq_ctx (zero).
1836 * TODO: Update this logic to include dynamic allocation and lookup of
1837 * completion structs when FW supports more wq_ctx values.
1839 if (wq_ctx != 0) {
1840 dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n");
1841 return -EINVAL;
1844 return 0;
1847 int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
1849 int ret;
1851 ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
1852 if (ret)
1853 return ret;
1855 wait_for_completion(&__scm->waitq_comp);
1857 return 0;
1860 static int qcom_scm_waitq_wakeup(unsigned int wq_ctx)
1862 int ret;
1864 ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
1865 if (ret)
1866 return ret;
1868 complete(&__scm->waitq_comp);
1870 return 0;
1873 static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
1875 int ret;
1876 struct qcom_scm *scm = data;
1877 u32 wq_ctx, flags, more_pending = 0;
1879 do {
1880 ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending);
1881 if (ret) {
1882 dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret);
1883 goto out;
1886 if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE) {
1887 dev_err(scm->dev, "Invalid flags received for wq_ctx: %u\n", flags);
1888 goto out;
1891 ret = qcom_scm_waitq_wakeup(wq_ctx);
1892 if (ret)
1893 goto out;
1894 } while (more_pending);
1896 out:
1897 return IRQ_HANDLED;
1900 static int get_download_mode(char *buffer, const struct kernel_param *kp)
1902 if (download_mode >= ARRAY_SIZE(download_mode_name))
1903 return sysfs_emit(buffer, "unknown mode\n");
1905 return sysfs_emit(buffer, "%s\n", download_mode_name[download_mode]);
1908 static int set_download_mode(const char *val, const struct kernel_param *kp)
1910 bool tmp;
1911 int ret;
1913 ret = sysfs_match_string(download_mode_name, val);
1914 if (ret < 0) {
1915 ret = kstrtobool(val, &tmp);
1916 if (ret < 0) {
1917 pr_err("qcom_scm: err: %d\n", ret);
1918 return ret;
1921 ret = tmp ? 1 : 0;
1924 download_mode = ret;
1925 if (__scm)
1926 qcom_scm_set_download_mode(download_mode);
1928 return 0;
1931 static const struct kernel_param_ops download_mode_param_ops = {
1932 .get = get_download_mode,
1933 .set = set_download_mode,
1936 module_param_cb(download_mode, &download_mode_param_ops, NULL, 0644);
1937 MODULE_PARM_DESC(download_mode, "download mode: off/0/N for no dump mode, full/on/1/Y for full dump mode, mini for minidump mode and full,mini for both full and minidump mode together are acceptable values");
1939 static int qcom_scm_probe(struct platform_device *pdev)
1941 struct qcom_tzmem_pool_config pool_config;
1942 struct qcom_scm *scm;
1943 int irq, ret;
1945 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
1946 if (!scm)
1947 return -ENOMEM;
1949 scm->dev = &pdev->dev;
1950 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
1951 if (ret < 0)
1952 return ret;
1954 init_completion(&scm->waitq_comp);
1955 mutex_init(&scm->scm_bw_lock);
1957 scm->path = devm_of_icc_get(&pdev->dev, NULL);
1958 if (IS_ERR(scm->path))
1959 return dev_err_probe(&pdev->dev, PTR_ERR(scm->path),
1960 "failed to acquire interconnect path\n");
1962 scm->core_clk = devm_clk_get_optional(&pdev->dev, "core");
1963 if (IS_ERR(scm->core_clk))
1964 return PTR_ERR(scm->core_clk);
1966 scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface");
1967 if (IS_ERR(scm->iface_clk))
1968 return PTR_ERR(scm->iface_clk);
1970 scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus");
1971 if (IS_ERR(scm->bus_clk))
1972 return PTR_ERR(scm->bus_clk);
1974 scm->reset.ops = &qcom_scm_pas_reset_ops;
1975 scm->reset.nr_resets = 1;
1976 scm->reset.of_node = pdev->dev.of_node;
1977 ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
1978 if (ret)
1979 return ret;
1981 /* vote for max clk rate for highest performance */
1982 ret = clk_set_rate(scm->core_clk, INT_MAX);
1983 if (ret)
1984 return ret;
1986 /* Let all above stores be available after this */
1987 smp_store_release(&__scm, scm);
1989 irq = platform_get_irq_optional(pdev, 0);
1990 if (irq < 0) {
1991 if (irq != -ENXIO)
1992 return irq;
1993 } else {
1994 ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
1995 IRQF_ONESHOT, "qcom-scm", __scm);
1996 if (ret < 0)
1997 return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
2000 __get_convention();
2003 * If "download mode" is requested, from this point on warmboot
2004 * will cause the boot stages to enter download mode, unless
2005 * disabled below by a clean shutdown/reboot.
2007 qcom_scm_set_download_mode(download_mode);
2010 * Disable SDI if indicated by DT that it is enabled by default.
2012 if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled") || !download_mode)
2013 qcom_scm_disable_sdi();
2015 ret = of_reserved_mem_device_init(__scm->dev);
2016 if (ret && ret != -ENODEV)
2017 return dev_err_probe(__scm->dev, ret,
2018 "Failed to setup the reserved memory region for TZ mem\n");
2020 ret = qcom_tzmem_enable(__scm->dev);
2021 if (ret)
2022 return dev_err_probe(__scm->dev, ret,
2023 "Failed to enable the TrustZone memory allocator\n");
2025 memset(&pool_config, 0, sizeof(pool_config));
2026 pool_config.initial_size = 0;
2027 pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND;
2028 pool_config.max_size = SZ_256K;
2030 __scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config);
2031 if (IS_ERR(__scm->mempool))
2032 return dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
2033 "Failed to create the SCM memory pool\n");
2036 * Initialize the QSEECOM interface.
2038 * Note: QSEECOM is fairly self-contained and this only adds the
2039 * interface device (the driver of which does most of the heavy
2040 * lifting). So any errors returned here should be either -ENOMEM or
2041 * -EINVAL (with the latter only in case there's a bug in our code).
2042 * This means that there is no need to bring down the whole SCM driver.
2043 * Just log the error instead and let SCM live.
2045 ret = qcom_scm_qseecom_init(scm);
2046 WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
2048 return 0;
2051 static void qcom_scm_shutdown(struct platform_device *pdev)
2053 /* Clean shutdown, disable download mode to allow normal restart */
2054 qcom_scm_set_download_mode(QCOM_DLOAD_NODUMP);
2057 static const struct of_device_id qcom_scm_dt_match[] = {
2058 { .compatible = "qcom,scm" },
2060 /* Legacy entries kept for backwards compatibility */
2061 { .compatible = "qcom,scm-apq8064" },
2062 { .compatible = "qcom,scm-apq8084" },
2063 { .compatible = "qcom,scm-ipq4019" },
2064 { .compatible = "qcom,scm-msm8953" },
2065 { .compatible = "qcom,scm-msm8974" },
2066 { .compatible = "qcom,scm-msm8996" },
2069 MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
2071 static struct platform_driver qcom_scm_driver = {
2072 .driver = {
2073 .name = "qcom_scm",
2074 .of_match_table = qcom_scm_dt_match,
2075 .suppress_bind_attrs = true,
2077 .probe = qcom_scm_probe,
2078 .shutdown = qcom_scm_shutdown,
2081 static int __init qcom_scm_init(void)
2083 return platform_driver_register(&qcom_scm_driver);
2085 subsys_initcall(qcom_scm_init);
2087 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
2088 MODULE_LICENSE("GPL v2");