1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
6 * Copyright (C) 2015 Linaro Ltd.
8 #include <linux/platform_device.h>
9 #include <linux/init.h>
10 #include <linux/cpumask.h>
11 #include <linux/export.h>
12 #include <linux/dma-direct.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/qcom_scm.h>
18 #include <linux/of_address.h>
19 #include <linux/of_platform.h>
20 #include <linux/clk.h>
21 #include <linux/reset-controller.h>
25 static bool download_mode
= IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT
);
26 module_param(download_mode
, bool, 0);
28 #define SCM_HAS_CORE_CLK BIT(0)
29 #define SCM_HAS_IFACE_CLK BIT(1)
30 #define SCM_HAS_BUS_CLK BIT(2)
35 struct clk
*iface_clk
;
37 struct reset_controller_dev reset
;
42 struct qcom_scm_current_perm_info
{
50 struct qcom_scm_mem_map_info
{
55 static struct qcom_scm
*__scm
;
57 static int qcom_scm_clk_enable(void)
61 ret
= clk_prepare_enable(__scm
->core_clk
);
65 ret
= clk_prepare_enable(__scm
->iface_clk
);
69 ret
= clk_prepare_enable(__scm
->bus_clk
);
76 clk_disable_unprepare(__scm
->iface_clk
);
78 clk_disable_unprepare(__scm
->core_clk
);
83 static void qcom_scm_clk_disable(void)
85 clk_disable_unprepare(__scm
->core_clk
);
86 clk_disable_unprepare(__scm
->iface_clk
);
87 clk_disable_unprepare(__scm
->bus_clk
);
91 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
92 * @entry: Entry point function for the cpus
93 * @cpus: The cpumask of cpus that will use the entry point
95 * Set the cold boot address of the cpus. Any cpu outside the supported
96 * range would be removed from the cpu present mask.
98 int qcom_scm_set_cold_boot_addr(void *entry
, const cpumask_t
*cpus
)
100 return __qcom_scm_set_cold_boot_addr(entry
, cpus
);
102 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr
);
105 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
106 * @entry: Entry point function for the cpus
107 * @cpus: The cpumask of cpus that will use the entry point
109 * Set the Linux entry point for the SCM to transfer control to when coming
110 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
112 int qcom_scm_set_warm_boot_addr(void *entry
, const cpumask_t
*cpus
)
114 return __qcom_scm_set_warm_boot_addr(__scm
->dev
, entry
, cpus
);
116 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr
);
119 * qcom_scm_cpu_power_down() - Power down the cpu
120 * @flags - Flags to flush cache
122 * This is an end point to power down cpu. If there was a pending interrupt,
123 * the control would return from this function, otherwise, the cpu jumps to the
124 * warm boot entry point set for this cpu upon reset.
126 void qcom_scm_cpu_power_down(u32 flags
)
128 __qcom_scm_cpu_power_down(flags
);
130 EXPORT_SYMBOL(qcom_scm_cpu_power_down
);
133 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
135 * Return true if HDCP is supported, false if not.
137 bool qcom_scm_hdcp_available(void)
139 int ret
= qcom_scm_clk_enable();
144 ret
= __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_HDCP
,
147 qcom_scm_clk_disable();
149 return ret
> 0 ? true : false;
151 EXPORT_SYMBOL(qcom_scm_hdcp_available
);
154 * qcom_scm_hdcp_req() - Send HDCP request.
155 * @req: HDCP request array
156 * @req_cnt: HDCP request array count
157 * @resp: response buffer passed to SCM
159 * Write HDCP register(s) through SCM.
161 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req
*req
, u32 req_cnt
, u32
*resp
)
163 int ret
= qcom_scm_clk_enable();
168 ret
= __qcom_scm_hdcp_req(__scm
->dev
, req
, req_cnt
, resp
);
169 qcom_scm_clk_disable();
172 EXPORT_SYMBOL(qcom_scm_hdcp_req
);
175 * qcom_scm_pas_supported() - Check if the peripheral authentication service is
176 * available for the given peripherial
177 * @peripheral: peripheral id
179 * Returns true if PAS is supported for this peripheral, otherwise false.
181 bool qcom_scm_pas_supported(u32 peripheral
)
185 ret
= __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_PIL
,
186 QCOM_SCM_PAS_IS_SUPPORTED_CMD
);
190 return __qcom_scm_pas_supported(__scm
->dev
, peripheral
);
192 EXPORT_SYMBOL(qcom_scm_pas_supported
);
195 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
197 bool qcom_scm_ocmem_lock_available(void)
199 return __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_OCMEM_SVC
,
200 QCOM_SCM_OCMEM_LOCK_CMD
);
202 EXPORT_SYMBOL(qcom_scm_ocmem_lock_available
);
205 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
206 * region to the specified initiator
208 * @id: tz initiator id
209 * @offset: OCMEM offset
211 * @mode: access mode (WIDE/NARROW)
213 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id
, u32 offset
, u32 size
,
216 return __qcom_scm_ocmem_lock(__scm
->dev
, id
, offset
, size
, mode
);
218 EXPORT_SYMBOL(qcom_scm_ocmem_lock
);
221 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
222 * region from the specified initiator
224 * @id: tz initiator id
225 * @offset: OCMEM offset
228 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id
, u32 offset
, u32 size
)
230 return __qcom_scm_ocmem_unlock(__scm
->dev
, id
, offset
, size
);
232 EXPORT_SYMBOL(qcom_scm_ocmem_unlock
);
235 * qcom_scm_pas_init_image() - Initialize peripheral authentication service
236 * state machine for a given peripheral, using the
238 * @peripheral: peripheral id
239 * @metadata: pointer to memory containing ELF header, program header table
240 * and optional blob of data used for authenticating the metadata
241 * and the rest of the firmware
242 * @size: size of the metadata
244 * Returns 0 on success.
246 int qcom_scm_pas_init_image(u32 peripheral
, const void *metadata
, size_t size
)
248 dma_addr_t mdata_phys
;
253 * During the scm call memory protection will be enabled for the meta
254 * data blob, so make sure it's physically contiguous, 4K aligned and
255 * non-cachable to avoid XPU violations.
257 mdata_buf
= dma_alloc_coherent(__scm
->dev
, size
, &mdata_phys
,
260 dev_err(__scm
->dev
, "Allocation of metadata buffer failed.\n");
263 memcpy(mdata_buf
, metadata
, size
);
265 ret
= qcom_scm_clk_enable();
269 ret
= __qcom_scm_pas_init_image(__scm
->dev
, peripheral
, mdata_phys
);
271 qcom_scm_clk_disable();
274 dma_free_coherent(__scm
->dev
, size
, mdata_buf
, mdata_phys
);
278 EXPORT_SYMBOL(qcom_scm_pas_init_image
);
281 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
282 * for firmware loading
283 * @peripheral: peripheral id
284 * @addr: start address of memory area to prepare
285 * @size: size of the memory area to prepare
287 * Returns 0 on success.
289 int qcom_scm_pas_mem_setup(u32 peripheral
, phys_addr_t addr
, phys_addr_t size
)
293 ret
= qcom_scm_clk_enable();
297 ret
= __qcom_scm_pas_mem_setup(__scm
->dev
, peripheral
, addr
, size
);
298 qcom_scm_clk_disable();
302 EXPORT_SYMBOL(qcom_scm_pas_mem_setup
);
305 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
306 * and reset the remote processor
307 * @peripheral: peripheral id
309 * Return 0 on success.
311 int qcom_scm_pas_auth_and_reset(u32 peripheral
)
315 ret
= qcom_scm_clk_enable();
319 ret
= __qcom_scm_pas_auth_and_reset(__scm
->dev
, peripheral
);
320 qcom_scm_clk_disable();
324 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset
);
327 * qcom_scm_pas_shutdown() - Shut down the remote processor
328 * @peripheral: peripheral id
330 * Returns 0 on success.
332 int qcom_scm_pas_shutdown(u32 peripheral
)
336 ret
= qcom_scm_clk_enable();
340 ret
= __qcom_scm_pas_shutdown(__scm
->dev
, peripheral
);
341 qcom_scm_clk_disable();
345 EXPORT_SYMBOL(qcom_scm_pas_shutdown
);
347 static int qcom_scm_pas_reset_assert(struct reset_controller_dev
*rcdev
,
353 return __qcom_scm_pas_mss_reset(__scm
->dev
, 1);
356 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev
*rcdev
,
362 return __qcom_scm_pas_mss_reset(__scm
->dev
, 0);
365 static const struct reset_control_ops qcom_scm_pas_reset_ops
= {
366 .assert = qcom_scm_pas_reset_assert
,
367 .deassert
= qcom_scm_pas_reset_deassert
,
371 * qcom_scm_restore_sec_cfg_available() - Check if secure environment
372 * supports restore security config interface.
374 * Return true if restore-cfg interface is supported, false if not.
376 bool qcom_scm_restore_sec_cfg_available(void)
378 return __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_MP
,
379 QCOM_SCM_RESTORE_SEC_CFG
);
381 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available
);
383 int qcom_scm_restore_sec_cfg(u32 device_id
, u32 spare
)
385 return __qcom_scm_restore_sec_cfg(__scm
->dev
, device_id
, spare
);
387 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg
);
389 int qcom_scm_iommu_secure_ptbl_size(u32 spare
, size_t *size
)
391 return __qcom_scm_iommu_secure_ptbl_size(__scm
->dev
, spare
, size
);
393 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size
);
395 int qcom_scm_iommu_secure_ptbl_init(u64 addr
, u32 size
, u32 spare
)
397 return __qcom_scm_iommu_secure_ptbl_init(__scm
->dev
, addr
, size
, spare
);
399 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init
);
401 int qcom_scm_qsmmu500_wait_safe_toggle(bool en
)
403 return __qcom_scm_qsmmu500_wait_safe_toggle(__scm
->dev
, en
);
405 EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle
);
407 int qcom_scm_io_readl(phys_addr_t addr
, unsigned int *val
)
409 return __qcom_scm_io_readl(__scm
->dev
, addr
, val
);
411 EXPORT_SYMBOL(qcom_scm_io_readl
);
413 int qcom_scm_io_writel(phys_addr_t addr
, unsigned int val
)
415 return __qcom_scm_io_writel(__scm
->dev
, addr
, val
);
417 EXPORT_SYMBOL(qcom_scm_io_writel
);
419 static void qcom_scm_set_download_mode(bool enable
)
424 avail
= __qcom_scm_is_call_available(__scm
->dev
,
426 QCOM_SCM_SET_DLOAD_MODE
);
428 ret
= __qcom_scm_set_dload_mode(__scm
->dev
, enable
);
429 } else if (__scm
->dload_mode_addr
) {
430 ret
= __qcom_scm_io_writel(__scm
->dev
, __scm
->dload_mode_addr
,
431 enable
? QCOM_SCM_SET_DLOAD_MODE
: 0);
434 "No available mechanism for setting download mode\n");
438 dev_err(__scm
->dev
, "failed to set download mode: %d\n", ret
);
441 static int qcom_scm_find_dload_address(struct device
*dev
, u64
*addr
)
443 struct device_node
*tcsr
;
444 struct device_node
*np
= dev
->of_node
;
449 tcsr
= of_parse_phandle(np
, "qcom,dload-mode", 0);
453 ret
= of_address_to_resource(tcsr
, 0, &res
);
458 ret
= of_property_read_u32_index(np
, "qcom,dload-mode", 1, &offset
);
462 *addr
= res
.start
+ offset
;
468 * qcom_scm_is_available() - Checks if SCM is available
470 bool qcom_scm_is_available(void)
474 EXPORT_SYMBOL(qcom_scm_is_available
);
476 int qcom_scm_set_remote_state(u32 state
, u32 id
)
478 return __qcom_scm_set_remote_state(__scm
->dev
, state
, id
);
480 EXPORT_SYMBOL(qcom_scm_set_remote_state
);
483 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
484 * @mem_addr: mem region whose ownership need to be reassigned
485 * @mem_sz: size of the region.
486 * @srcvm: vmid for current set of owners, each set bit in
487 * flag indicate a unique owner
488 * @newvm: array having new owners and corresponding permission
490 * @dest_cnt: number of owners in next set.
492 * Return negative errno on failure or 0 on success with @srcvm updated.
494 int qcom_scm_assign_mem(phys_addr_t mem_addr
, size_t mem_sz
,
496 const struct qcom_scm_vmperm
*newvm
,
497 unsigned int dest_cnt
)
499 struct qcom_scm_current_perm_info
*destvm
;
500 struct qcom_scm_mem_map_info
*mem_to_map
;
501 phys_addr_t mem_to_map_phys
;
502 phys_addr_t dest_phys
;
503 phys_addr_t ptr_phys
;
505 size_t mem_to_map_sz
;
513 unsigned long srcvm_bits
= *srcvm
;
515 src_sz
= hweight_long(srcvm_bits
) * sizeof(*src
);
516 mem_to_map_sz
= sizeof(*mem_to_map
);
517 dest_sz
= dest_cnt
* sizeof(*destvm
);
518 ptr_sz
= ALIGN(src_sz
, SZ_64
) + ALIGN(mem_to_map_sz
, SZ_64
) +
519 ALIGN(dest_sz
, SZ_64
);
521 ptr
= dma_alloc_coherent(__scm
->dev
, ptr_sz
, &ptr_dma
, GFP_KERNEL
);
524 ptr_phys
= dma_to_phys(__scm
->dev
, ptr_dma
);
526 /* Fill source vmid detail */
529 for_each_set_bit(b
, &srcvm_bits
, BITS_PER_LONG
)
530 src
[i
++] = cpu_to_le32(b
);
532 /* Fill details of mem buff to map */
533 mem_to_map
= ptr
+ ALIGN(src_sz
, SZ_64
);
534 mem_to_map_phys
= ptr_phys
+ ALIGN(src_sz
, SZ_64
);
535 mem_to_map
->mem_addr
= cpu_to_le64(mem_addr
);
536 mem_to_map
->mem_size
= cpu_to_le64(mem_sz
);
539 /* Fill details of next vmid detail */
540 destvm
= ptr
+ ALIGN(mem_to_map_sz
, SZ_64
) + ALIGN(src_sz
, SZ_64
);
541 dest_phys
= ptr_phys
+ ALIGN(mem_to_map_sz
, SZ_64
) + ALIGN(src_sz
, SZ_64
);
542 for (i
= 0; i
< dest_cnt
; i
++, destvm
++, newvm
++) {
543 destvm
->vmid
= cpu_to_le32(newvm
->vmid
);
544 destvm
->perm
= cpu_to_le32(newvm
->perm
);
546 destvm
->ctx_size
= 0;
547 next_vm
|= BIT(newvm
->vmid
);
550 ret
= __qcom_scm_assign_mem(__scm
->dev
, mem_to_map_phys
, mem_to_map_sz
,
551 ptr_phys
, src_sz
, dest_phys
, dest_sz
);
552 dma_free_coherent(__scm
->dev
, ptr_sz
, ptr
, ptr_dma
);
555 "Assign memory protection call failed %d\n", ret
);
562 EXPORT_SYMBOL(qcom_scm_assign_mem
);
564 static int qcom_scm_probe(struct platform_device
*pdev
)
566 struct qcom_scm
*scm
;
570 scm
= devm_kzalloc(&pdev
->dev
, sizeof(*scm
), GFP_KERNEL
);
574 ret
= qcom_scm_find_dload_address(&pdev
->dev
, &scm
->dload_mode_addr
);
578 clks
= (unsigned long)of_device_get_match_data(&pdev
->dev
);
580 scm
->core_clk
= devm_clk_get(&pdev
->dev
, "core");
581 if (IS_ERR(scm
->core_clk
)) {
582 if (PTR_ERR(scm
->core_clk
) == -EPROBE_DEFER
)
583 return PTR_ERR(scm
->core_clk
);
585 if (clks
& SCM_HAS_CORE_CLK
) {
586 dev_err(&pdev
->dev
, "failed to acquire core clk\n");
587 return PTR_ERR(scm
->core_clk
);
590 scm
->core_clk
= NULL
;
593 scm
->iface_clk
= devm_clk_get(&pdev
->dev
, "iface");
594 if (IS_ERR(scm
->iface_clk
)) {
595 if (PTR_ERR(scm
->iface_clk
) == -EPROBE_DEFER
)
596 return PTR_ERR(scm
->iface_clk
);
598 if (clks
& SCM_HAS_IFACE_CLK
) {
599 dev_err(&pdev
->dev
, "failed to acquire iface clk\n");
600 return PTR_ERR(scm
->iface_clk
);
603 scm
->iface_clk
= NULL
;
606 scm
->bus_clk
= devm_clk_get(&pdev
->dev
, "bus");
607 if (IS_ERR(scm
->bus_clk
)) {
608 if (PTR_ERR(scm
->bus_clk
) == -EPROBE_DEFER
)
609 return PTR_ERR(scm
->bus_clk
);
611 if (clks
& SCM_HAS_BUS_CLK
) {
612 dev_err(&pdev
->dev
, "failed to acquire bus clk\n");
613 return PTR_ERR(scm
->bus_clk
);
619 scm
->reset
.ops
= &qcom_scm_pas_reset_ops
;
620 scm
->reset
.nr_resets
= 1;
621 scm
->reset
.of_node
= pdev
->dev
.of_node
;
622 ret
= devm_reset_controller_register(&pdev
->dev
, &scm
->reset
);
626 /* vote for max clk rate for highest performance */
627 ret
= clk_set_rate(scm
->core_clk
, INT_MAX
);
632 __scm
->dev
= &pdev
->dev
;
637 * If requested enable "download mode", from this point on warmboot
638 * will cause the the boot stages to enter download mode, unless
639 * disabled below by a clean shutdown/reboot.
642 qcom_scm_set_download_mode(true);
647 static void qcom_scm_shutdown(struct platform_device
*pdev
)
649 /* Clean shutdown, disable download mode to allow normal restart */
651 qcom_scm_set_download_mode(false);
654 static const struct of_device_id qcom_scm_dt_match
[] = {
655 { .compatible
= "qcom,scm-apq8064",
656 /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
658 { .compatible
= "qcom,scm-apq8084", .data
= (void *)(SCM_HAS_CORE_CLK
|
662 { .compatible
= "qcom,scm-ipq4019" },
663 { .compatible
= "qcom,scm-msm8660", .data
= (void *) SCM_HAS_CORE_CLK
},
664 { .compatible
= "qcom,scm-msm8960", .data
= (void *) SCM_HAS_CORE_CLK
},
665 { .compatible
= "qcom,scm-msm8916", .data
= (void *)(SCM_HAS_CORE_CLK
|
669 { .compatible
= "qcom,scm-msm8974", .data
= (void *)(SCM_HAS_CORE_CLK
|
673 { .compatible
= "qcom,scm-msm8996" },
674 { .compatible
= "qcom,scm" },
678 static struct platform_driver qcom_scm_driver
= {
681 .of_match_table
= qcom_scm_dt_match
,
683 .probe
= qcom_scm_probe
,
684 .shutdown
= qcom_scm_shutdown
,
687 static int __init
qcom_scm_init(void)
689 return platform_driver_register(&qcom_scm_driver
);
691 subsys_initcall(qcom_scm_init
);