4 * Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
5 * Copyright (C) 2015 Linaro Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
17 #include <linux/platform_device.h>
18 #include <linux/init.h>
19 #include <linux/cpumask.h>
20 #include <linux/export.h>
21 #include <linux/dma-direct.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/qcom_scm.h>
27 #include <linux/of_address.h>
28 #include <linux/of_platform.h>
29 #include <linux/clk.h>
30 #include <linux/reset-controller.h>
34 static bool download_mode
= IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT
);
35 module_param(download_mode
, bool, 0);
37 #define SCM_HAS_CORE_CLK BIT(0)
38 #define SCM_HAS_IFACE_CLK BIT(1)
39 #define SCM_HAS_BUS_CLK BIT(2)
44 struct clk
*iface_clk
;
46 struct reset_controller_dev reset
;
51 struct qcom_scm_current_perm_info
{
59 struct qcom_scm_mem_map_info
{
64 static struct qcom_scm
*__scm
;
66 static int qcom_scm_clk_enable(void)
70 ret
= clk_prepare_enable(__scm
->core_clk
);
74 ret
= clk_prepare_enable(__scm
->iface_clk
);
78 ret
= clk_prepare_enable(__scm
->bus_clk
);
85 clk_disable_unprepare(__scm
->iface_clk
);
87 clk_disable_unprepare(__scm
->core_clk
);
92 static void qcom_scm_clk_disable(void)
94 clk_disable_unprepare(__scm
->core_clk
);
95 clk_disable_unprepare(__scm
->iface_clk
);
96 clk_disable_unprepare(__scm
->bus_clk
);
100 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
101 * @entry: Entry point function for the cpus
102 * @cpus: The cpumask of cpus that will use the entry point
104 * Set the cold boot address of the cpus. Any cpu outside the supported
105 * range would be removed from the cpu present mask.
107 int qcom_scm_set_cold_boot_addr(void *entry
, const cpumask_t
*cpus
)
109 return __qcom_scm_set_cold_boot_addr(entry
, cpus
);
111 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr
);
114 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
115 * @entry: Entry point function for the cpus
116 * @cpus: The cpumask of cpus that will use the entry point
118 * Set the Linux entry point for the SCM to transfer control to when coming
119 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
121 int qcom_scm_set_warm_boot_addr(void *entry
, const cpumask_t
*cpus
)
123 return __qcom_scm_set_warm_boot_addr(__scm
->dev
, entry
, cpus
);
125 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr
);
128 * qcom_scm_cpu_power_down() - Power down the cpu
129 * @flags - Flags to flush cache
131 * This is an end point to power down cpu. If there was a pending interrupt,
132 * the control would return from this function, otherwise, the cpu jumps to the
133 * warm boot entry point set for this cpu upon reset.
135 void qcom_scm_cpu_power_down(u32 flags
)
137 __qcom_scm_cpu_power_down(flags
);
139 EXPORT_SYMBOL(qcom_scm_cpu_power_down
);
142 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
144 * Return true if HDCP is supported, false if not.
146 bool qcom_scm_hdcp_available(void)
148 int ret
= qcom_scm_clk_enable();
153 ret
= __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_HDCP
,
156 qcom_scm_clk_disable();
158 return ret
> 0 ? true : false;
160 EXPORT_SYMBOL(qcom_scm_hdcp_available
);
163 * qcom_scm_hdcp_req() - Send HDCP request.
164 * @req: HDCP request array
165 * @req_cnt: HDCP request array count
166 * @resp: response buffer passed to SCM
168 * Write HDCP register(s) through SCM.
170 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req
*req
, u32 req_cnt
, u32
*resp
)
172 int ret
= qcom_scm_clk_enable();
177 ret
= __qcom_scm_hdcp_req(__scm
->dev
, req
, req_cnt
, resp
);
178 qcom_scm_clk_disable();
181 EXPORT_SYMBOL(qcom_scm_hdcp_req
);
184 * qcom_scm_pas_supported() - Check if the peripheral authentication service is
185 * available for the given peripherial
186 * @peripheral: peripheral id
188 * Returns true if PAS is supported for this peripheral, otherwise false.
190 bool qcom_scm_pas_supported(u32 peripheral
)
194 ret
= __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_PIL
,
195 QCOM_SCM_PAS_IS_SUPPORTED_CMD
);
199 return __qcom_scm_pas_supported(__scm
->dev
, peripheral
);
201 EXPORT_SYMBOL(qcom_scm_pas_supported
);
204 * qcom_scm_pas_init_image() - Initialize peripheral authentication service
205 * state machine for a given peripheral, using the
207 * @peripheral: peripheral id
208 * @metadata: pointer to memory containing ELF header, program header table
209 * and optional blob of data used for authenticating the metadata
210 * and the rest of the firmware
211 * @size: size of the metadata
213 * Returns 0 on success.
215 int qcom_scm_pas_init_image(u32 peripheral
, const void *metadata
, size_t size
)
217 dma_addr_t mdata_phys
;
222 * During the scm call memory protection will be enabled for the meta
223 * data blob, so make sure it's physically contiguous, 4K aligned and
224 * non-cachable to avoid XPU violations.
226 mdata_buf
= dma_alloc_coherent(__scm
->dev
, size
, &mdata_phys
,
229 dev_err(__scm
->dev
, "Allocation of metadata buffer failed.\n");
232 memcpy(mdata_buf
, metadata
, size
);
234 ret
= qcom_scm_clk_enable();
238 ret
= __qcom_scm_pas_init_image(__scm
->dev
, peripheral
, mdata_phys
);
240 qcom_scm_clk_disable();
243 dma_free_coherent(__scm
->dev
, size
, mdata_buf
, mdata_phys
);
247 EXPORT_SYMBOL(qcom_scm_pas_init_image
);
250 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
251 * for firmware loading
252 * @peripheral: peripheral id
253 * @addr: start address of memory area to prepare
254 * @size: size of the memory area to prepare
256 * Returns 0 on success.
258 int qcom_scm_pas_mem_setup(u32 peripheral
, phys_addr_t addr
, phys_addr_t size
)
262 ret
= qcom_scm_clk_enable();
266 ret
= __qcom_scm_pas_mem_setup(__scm
->dev
, peripheral
, addr
, size
);
267 qcom_scm_clk_disable();
271 EXPORT_SYMBOL(qcom_scm_pas_mem_setup
);
274 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
275 * and reset the remote processor
276 * @peripheral: peripheral id
278 * Return 0 on success.
280 int qcom_scm_pas_auth_and_reset(u32 peripheral
)
284 ret
= qcom_scm_clk_enable();
288 ret
= __qcom_scm_pas_auth_and_reset(__scm
->dev
, peripheral
);
289 qcom_scm_clk_disable();
293 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset
);
296 * qcom_scm_pas_shutdown() - Shut down the remote processor
297 * @peripheral: peripheral id
299 * Returns 0 on success.
301 int qcom_scm_pas_shutdown(u32 peripheral
)
305 ret
= qcom_scm_clk_enable();
309 ret
= __qcom_scm_pas_shutdown(__scm
->dev
, peripheral
);
310 qcom_scm_clk_disable();
314 EXPORT_SYMBOL(qcom_scm_pas_shutdown
);
316 static int qcom_scm_pas_reset_assert(struct reset_controller_dev
*rcdev
,
322 return __qcom_scm_pas_mss_reset(__scm
->dev
, 1);
325 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev
*rcdev
,
331 return __qcom_scm_pas_mss_reset(__scm
->dev
, 0);
334 static const struct reset_control_ops qcom_scm_pas_reset_ops
= {
335 .assert = qcom_scm_pas_reset_assert
,
336 .deassert
= qcom_scm_pas_reset_deassert
,
339 int qcom_scm_restore_sec_cfg(u32 device_id
, u32 spare
)
341 return __qcom_scm_restore_sec_cfg(__scm
->dev
, device_id
, spare
);
343 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg
);
345 int qcom_scm_iommu_secure_ptbl_size(u32 spare
, size_t *size
)
347 return __qcom_scm_iommu_secure_ptbl_size(__scm
->dev
, spare
, size
);
349 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size
);
351 int qcom_scm_iommu_secure_ptbl_init(u64 addr
, u32 size
, u32 spare
)
353 return __qcom_scm_iommu_secure_ptbl_init(__scm
->dev
, addr
, size
, spare
);
355 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init
);
357 int qcom_scm_io_readl(phys_addr_t addr
, unsigned int *val
)
359 return __qcom_scm_io_readl(__scm
->dev
, addr
, val
);
361 EXPORT_SYMBOL(qcom_scm_io_readl
);
363 int qcom_scm_io_writel(phys_addr_t addr
, unsigned int val
)
365 return __qcom_scm_io_writel(__scm
->dev
, addr
, val
);
367 EXPORT_SYMBOL(qcom_scm_io_writel
);
369 static void qcom_scm_set_download_mode(bool enable
)
374 avail
= __qcom_scm_is_call_available(__scm
->dev
,
376 QCOM_SCM_SET_DLOAD_MODE
);
378 ret
= __qcom_scm_set_dload_mode(__scm
->dev
, enable
);
379 } else if (__scm
->dload_mode_addr
) {
380 ret
= __qcom_scm_io_writel(__scm
->dev
, __scm
->dload_mode_addr
,
381 enable
? QCOM_SCM_SET_DLOAD_MODE
: 0);
384 "No available mechanism for setting download mode\n");
388 dev_err(__scm
->dev
, "failed to set download mode: %d\n", ret
);
391 static int qcom_scm_find_dload_address(struct device
*dev
, u64
*addr
)
393 struct device_node
*tcsr
;
394 struct device_node
*np
= dev
->of_node
;
399 tcsr
= of_parse_phandle(np
, "qcom,dload-mode", 0);
403 ret
= of_address_to_resource(tcsr
, 0, &res
);
408 ret
= of_property_read_u32_index(np
, "qcom,dload-mode", 1, &offset
);
412 *addr
= res
.start
+ offset
;
418 * qcom_scm_is_available() - Checks if SCM is available
420 bool qcom_scm_is_available(void)
424 EXPORT_SYMBOL(qcom_scm_is_available
);
426 int qcom_scm_set_remote_state(u32 state
, u32 id
)
428 return __qcom_scm_set_remote_state(__scm
->dev
, state
, id
);
430 EXPORT_SYMBOL(qcom_scm_set_remote_state
);
433 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
434 * @mem_addr: mem region whose ownership need to be reassigned
435 * @mem_sz: size of the region.
436 * @srcvm: vmid for current set of owners, each set bit in
437 * flag indicate a unique owner
438 * @newvm: array having new owners and corrsponding permission
440 * @dest_cnt: number of owners in next set.
442 * Return negative errno on failure, 0 on success, with @srcvm updated.
444 int qcom_scm_assign_mem(phys_addr_t mem_addr
, size_t mem_sz
,
446 struct qcom_scm_vmperm
*newvm
, int dest_cnt
)
448 struct qcom_scm_current_perm_info
*destvm
;
449 struct qcom_scm_mem_map_info
*mem_to_map
;
450 phys_addr_t mem_to_map_phys
;
451 phys_addr_t dest_phys
;
452 phys_addr_t ptr_phys
;
454 size_t mem_to_map_sz
;
465 src_sz
= hweight_long(*srcvm
) * sizeof(*src
);
466 mem_to_map_sz
= sizeof(*mem_to_map
);
467 dest_sz
= dest_cnt
* sizeof(*destvm
);
468 ptr_sz
= ALIGN(src_sz
, SZ_64
) + ALIGN(mem_to_map_sz
, SZ_64
) +
469 ALIGN(dest_sz
, SZ_64
);
471 ptr
= dma_alloc_coherent(__scm
->dev
, ptr_sz
, &ptr_dma
, GFP_KERNEL
);
474 ptr_phys
= dma_to_phys(__scm
->dev
, ptr_dma
);
476 /* Fill source vmid detail */
478 len
= hweight_long(*srcvm
);
479 for (i
= 0; i
< len
; i
++) {
480 src
[i
] = cpu_to_le32(ffs(*srcvm
) - 1);
481 *srcvm
^= 1 << (ffs(*srcvm
) - 1);
484 /* Fill details of mem buff to map */
485 mem_to_map
= ptr
+ ALIGN(src_sz
, SZ_64
);
486 mem_to_map_phys
= ptr_phys
+ ALIGN(src_sz
, SZ_64
);
487 mem_to_map
[0].mem_addr
= cpu_to_le64(mem_addr
);
488 mem_to_map
[0].mem_size
= cpu_to_le64(mem_sz
);
491 /* Fill details of next vmid detail */
492 destvm
= ptr
+ ALIGN(mem_to_map_sz
, SZ_64
) + ALIGN(src_sz
, SZ_64
);
493 dest_phys
= ptr_phys
+ ALIGN(mem_to_map_sz
, SZ_64
) + ALIGN(src_sz
, SZ_64
);
494 for (i
= 0; i
< dest_cnt
; i
++) {
495 destvm
[i
].vmid
= cpu_to_le32(newvm
[i
].vmid
);
496 destvm
[i
].perm
= cpu_to_le32(newvm
[i
].perm
);
498 destvm
[i
].ctx_size
= 0;
499 next_vm
|= BIT(newvm
[i
].vmid
);
502 ret
= __qcom_scm_assign_mem(__scm
->dev
, mem_to_map_phys
, mem_to_map_sz
,
503 ptr_phys
, src_sz
, dest_phys
, dest_sz
);
504 dma_free_coherent(__scm
->dev
, ptr_sz
, ptr
, ptr_dma
);
507 "Assign memory protection call failed %d.\n", ret
);
514 EXPORT_SYMBOL(qcom_scm_assign_mem
);
516 static int qcom_scm_probe(struct platform_device
*pdev
)
518 struct qcom_scm
*scm
;
522 scm
= devm_kzalloc(&pdev
->dev
, sizeof(*scm
), GFP_KERNEL
);
526 ret
= qcom_scm_find_dload_address(&pdev
->dev
, &scm
->dload_mode_addr
);
530 clks
= (unsigned long)of_device_get_match_data(&pdev
->dev
);
531 if (clks
& SCM_HAS_CORE_CLK
) {
532 scm
->core_clk
= devm_clk_get(&pdev
->dev
, "core");
533 if (IS_ERR(scm
->core_clk
)) {
534 if (PTR_ERR(scm
->core_clk
) != -EPROBE_DEFER
)
536 "failed to acquire core clk\n");
537 return PTR_ERR(scm
->core_clk
);
541 if (clks
& SCM_HAS_IFACE_CLK
) {
542 scm
->iface_clk
= devm_clk_get(&pdev
->dev
, "iface");
543 if (IS_ERR(scm
->iface_clk
)) {
544 if (PTR_ERR(scm
->iface_clk
) != -EPROBE_DEFER
)
546 "failed to acquire iface clk\n");
547 return PTR_ERR(scm
->iface_clk
);
551 if (clks
& SCM_HAS_BUS_CLK
) {
552 scm
->bus_clk
= devm_clk_get(&pdev
->dev
, "bus");
553 if (IS_ERR(scm
->bus_clk
)) {
554 if (PTR_ERR(scm
->bus_clk
) != -EPROBE_DEFER
)
556 "failed to acquire bus clk\n");
557 return PTR_ERR(scm
->bus_clk
);
561 scm
->reset
.ops
= &qcom_scm_pas_reset_ops
;
562 scm
->reset
.nr_resets
= 1;
563 scm
->reset
.of_node
= pdev
->dev
.of_node
;
564 ret
= devm_reset_controller_register(&pdev
->dev
, &scm
->reset
);
568 /* vote for max clk rate for highest performance */
569 ret
= clk_set_rate(scm
->core_clk
, INT_MAX
);
574 __scm
->dev
= &pdev
->dev
;
579 * If requested enable "download mode", from this point on warmboot
580 * will cause the the boot stages to enter download mode, unless
581 * disabled below by a clean shutdown/reboot.
584 qcom_scm_set_download_mode(true);
589 static void qcom_scm_shutdown(struct platform_device
*pdev
)
591 /* Clean shutdown, disable download mode to allow normal restart */
593 qcom_scm_set_download_mode(false);
596 static const struct of_device_id qcom_scm_dt_match
[] = {
597 { .compatible
= "qcom,scm-apq8064",
598 /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
600 { .compatible
= "qcom,scm-msm8660",
601 .data
= (void *) SCM_HAS_CORE_CLK
,
603 { .compatible
= "qcom,scm-msm8960",
604 .data
= (void *) SCM_HAS_CORE_CLK
,
606 { .compatible
= "qcom,scm-msm8996",
607 .data
= NULL
, /* no clocks */
609 { .compatible
= "qcom,scm-ipq4019",
610 .data
= NULL
, /* no clocks */
612 { .compatible
= "qcom,scm",
613 .data
= (void *)(SCM_HAS_CORE_CLK
620 static struct platform_driver qcom_scm_driver
= {
623 .of_match_table
= qcom_scm_dt_match
,
625 .probe
= qcom_scm_probe
,
626 .shutdown
= qcom_scm_shutdown
,
629 static int __init
qcom_scm_init(void)
631 return platform_driver_register(&qcom_scm_driver
);
633 subsys_initcall(qcom_scm_init
);