4 * Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
5 * Copyright (C) 2015 Linaro Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
17 #include <linux/platform_device.h>
18 #include <linux/init.h>
19 #include <linux/cpumask.h>
20 #include <linux/export.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/qcom_scm.h>
26 #include <linux/of_address.h>
27 #include <linux/of_platform.h>
28 #include <linux/clk.h>
29 #include <linux/reset-controller.h>
33 static bool download_mode
= IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT
);
34 module_param(download_mode
, bool, 0);
36 #define SCM_HAS_CORE_CLK BIT(0)
37 #define SCM_HAS_IFACE_CLK BIT(1)
38 #define SCM_HAS_BUS_CLK BIT(2)
43 struct clk
*iface_clk
;
45 struct reset_controller_dev reset
;
50 struct qcom_scm_current_perm_info
{
58 struct qcom_scm_mem_map_info
{
63 static struct qcom_scm
*__scm
;
65 static int qcom_scm_clk_enable(void)
69 ret
= clk_prepare_enable(__scm
->core_clk
);
73 ret
= clk_prepare_enable(__scm
->iface_clk
);
77 ret
= clk_prepare_enable(__scm
->bus_clk
);
84 clk_disable_unprepare(__scm
->iface_clk
);
86 clk_disable_unprepare(__scm
->core_clk
);
91 static void qcom_scm_clk_disable(void)
93 clk_disable_unprepare(__scm
->core_clk
);
94 clk_disable_unprepare(__scm
->iface_clk
);
95 clk_disable_unprepare(__scm
->bus_clk
);
99 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
100 * @entry: Entry point function for the cpus
101 * @cpus: The cpumask of cpus that will use the entry point
103 * Set the cold boot address of the cpus. Any cpu outside the supported
104 * range would be removed from the cpu present mask.
106 int qcom_scm_set_cold_boot_addr(void *entry
, const cpumask_t
*cpus
)
108 return __qcom_scm_set_cold_boot_addr(entry
, cpus
);
110 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr
);
113 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
114 * @entry: Entry point function for the cpus
115 * @cpus: The cpumask of cpus that will use the entry point
117 * Set the Linux entry point for the SCM to transfer control to when coming
118 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
120 int qcom_scm_set_warm_boot_addr(void *entry
, const cpumask_t
*cpus
)
122 return __qcom_scm_set_warm_boot_addr(__scm
->dev
, entry
, cpus
);
124 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr
);
127 * qcom_scm_cpu_power_down() - Power down the cpu
128 * @flags - Flags to flush cache
130 * This is an end point to power down cpu. If there was a pending interrupt,
131 * the control would return from this function, otherwise, the cpu jumps to the
132 * warm boot entry point set for this cpu upon reset.
134 void qcom_scm_cpu_power_down(u32 flags
)
136 __qcom_scm_cpu_power_down(flags
);
138 EXPORT_SYMBOL(qcom_scm_cpu_power_down
);
141 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
143 * Return true if HDCP is supported, false if not.
145 bool qcom_scm_hdcp_available(void)
147 int ret
= qcom_scm_clk_enable();
152 ret
= __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_HDCP
,
155 qcom_scm_clk_disable();
157 return ret
> 0 ? true : false;
159 EXPORT_SYMBOL(qcom_scm_hdcp_available
);
162 * qcom_scm_hdcp_req() - Send HDCP request.
163 * @req: HDCP request array
164 * @req_cnt: HDCP request array count
165 * @resp: response buffer passed to SCM
167 * Write HDCP register(s) through SCM.
169 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req
*req
, u32 req_cnt
, u32
*resp
)
171 int ret
= qcom_scm_clk_enable();
176 ret
= __qcom_scm_hdcp_req(__scm
->dev
, req
, req_cnt
, resp
);
177 qcom_scm_clk_disable();
180 EXPORT_SYMBOL(qcom_scm_hdcp_req
);
183 * qcom_scm_pas_supported() - Check if the peripheral authentication service is
184 * available for the given peripherial
185 * @peripheral: peripheral id
187 * Returns true if PAS is supported for this peripheral, otherwise false.
189 bool qcom_scm_pas_supported(u32 peripheral
)
193 ret
= __qcom_scm_is_call_available(__scm
->dev
, QCOM_SCM_SVC_PIL
,
194 QCOM_SCM_PAS_IS_SUPPORTED_CMD
);
198 return __qcom_scm_pas_supported(__scm
->dev
, peripheral
);
200 EXPORT_SYMBOL(qcom_scm_pas_supported
);
203 * qcom_scm_pas_init_image() - Initialize peripheral authentication service
204 * state machine for a given peripheral, using the
206 * @peripheral: peripheral id
207 * @metadata: pointer to memory containing ELF header, program header table
208 * and optional blob of data used for authenticating the metadata
209 * and the rest of the firmware
210 * @size: size of the metadata
212 * Returns 0 on success.
214 int qcom_scm_pas_init_image(u32 peripheral
, const void *metadata
, size_t size
)
216 dma_addr_t mdata_phys
;
221 * During the scm call memory protection will be enabled for the meta
222 * data blob, so make sure it's physically contiguous, 4K aligned and
223 * non-cachable to avoid XPU violations.
225 mdata_buf
= dma_alloc_coherent(__scm
->dev
, size
, &mdata_phys
,
228 dev_err(__scm
->dev
, "Allocation of metadata buffer failed.\n");
231 memcpy(mdata_buf
, metadata
, size
);
233 ret
= qcom_scm_clk_enable();
237 ret
= __qcom_scm_pas_init_image(__scm
->dev
, peripheral
, mdata_phys
);
239 qcom_scm_clk_disable();
242 dma_free_coherent(__scm
->dev
, size
, mdata_buf
, mdata_phys
);
246 EXPORT_SYMBOL(qcom_scm_pas_init_image
);
249 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
250 * for firmware loading
251 * @peripheral: peripheral id
252 * @addr: start address of memory area to prepare
253 * @size: size of the memory area to prepare
255 * Returns 0 on success.
257 int qcom_scm_pas_mem_setup(u32 peripheral
, phys_addr_t addr
, phys_addr_t size
)
261 ret
= qcom_scm_clk_enable();
265 ret
= __qcom_scm_pas_mem_setup(__scm
->dev
, peripheral
, addr
, size
);
266 qcom_scm_clk_disable();
270 EXPORT_SYMBOL(qcom_scm_pas_mem_setup
);
273 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
274 * and reset the remote processor
275 * @peripheral: peripheral id
277 * Return 0 on success.
279 int qcom_scm_pas_auth_and_reset(u32 peripheral
)
283 ret
= qcom_scm_clk_enable();
287 ret
= __qcom_scm_pas_auth_and_reset(__scm
->dev
, peripheral
);
288 qcom_scm_clk_disable();
292 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset
);
295 * qcom_scm_pas_shutdown() - Shut down the remote processor
296 * @peripheral: peripheral id
298 * Returns 0 on success.
300 int qcom_scm_pas_shutdown(u32 peripheral
)
304 ret
= qcom_scm_clk_enable();
308 ret
= __qcom_scm_pas_shutdown(__scm
->dev
, peripheral
);
309 qcom_scm_clk_disable();
313 EXPORT_SYMBOL(qcom_scm_pas_shutdown
);
315 static int qcom_scm_pas_reset_assert(struct reset_controller_dev
*rcdev
,
321 return __qcom_scm_pas_mss_reset(__scm
->dev
, 1);
324 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev
*rcdev
,
330 return __qcom_scm_pas_mss_reset(__scm
->dev
, 0);
333 static const struct reset_control_ops qcom_scm_pas_reset_ops
= {
334 .assert = qcom_scm_pas_reset_assert
,
335 .deassert
= qcom_scm_pas_reset_deassert
,
338 int qcom_scm_restore_sec_cfg(u32 device_id
, u32 spare
)
340 return __qcom_scm_restore_sec_cfg(__scm
->dev
, device_id
, spare
);
342 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg
);
344 int qcom_scm_iommu_secure_ptbl_size(u32 spare
, size_t *size
)
346 return __qcom_scm_iommu_secure_ptbl_size(__scm
->dev
, spare
, size
);
348 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size
);
350 int qcom_scm_iommu_secure_ptbl_init(u64 addr
, u32 size
, u32 spare
)
352 return __qcom_scm_iommu_secure_ptbl_init(__scm
->dev
, addr
, size
, spare
);
354 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init
);
356 int qcom_scm_io_readl(phys_addr_t addr
, unsigned int *val
)
358 return __qcom_scm_io_readl(__scm
->dev
, addr
, val
);
360 EXPORT_SYMBOL(qcom_scm_io_readl
);
362 int qcom_scm_io_writel(phys_addr_t addr
, unsigned int val
)
364 return __qcom_scm_io_writel(__scm
->dev
, addr
, val
);
366 EXPORT_SYMBOL(qcom_scm_io_writel
);
368 static void qcom_scm_set_download_mode(bool enable
)
373 avail
= __qcom_scm_is_call_available(__scm
->dev
,
375 QCOM_SCM_SET_DLOAD_MODE
);
377 ret
= __qcom_scm_set_dload_mode(__scm
->dev
, enable
);
378 } else if (__scm
->dload_mode_addr
) {
379 ret
= __qcom_scm_io_writel(__scm
->dev
, __scm
->dload_mode_addr
,
380 enable
? QCOM_SCM_SET_DLOAD_MODE
: 0);
383 "No available mechanism for setting download mode\n");
387 dev_err(__scm
->dev
, "failed to set download mode: %d\n", ret
);
390 static int qcom_scm_find_dload_address(struct device
*dev
, u64
*addr
)
392 struct device_node
*tcsr
;
393 struct device_node
*np
= dev
->of_node
;
398 tcsr
= of_parse_phandle(np
, "qcom,dload-mode", 0);
402 ret
= of_address_to_resource(tcsr
, 0, &res
);
407 ret
= of_property_read_u32_index(np
, "qcom,dload-mode", 1, &offset
);
411 *addr
= res
.start
+ offset
;
417 * qcom_scm_is_available() - Checks if SCM is available
419 bool qcom_scm_is_available(void)
423 EXPORT_SYMBOL(qcom_scm_is_available
);
425 int qcom_scm_set_remote_state(u32 state
, u32 id
)
427 return __qcom_scm_set_remote_state(__scm
->dev
, state
, id
);
429 EXPORT_SYMBOL(qcom_scm_set_remote_state
);
432 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
433 * @mem_addr: mem region whose ownership need to be reassigned
434 * @mem_sz: size of the region.
435 * @srcvm: vmid for current set of owners, each set bit in
436 * flag indicate a unique owner
437 * @newvm: array having new owners and corrsponding permission
439 * @dest_cnt: number of owners in next set.
441 * Return negative errno on failure, 0 on success, with @srcvm updated.
443 int qcom_scm_assign_mem(phys_addr_t mem_addr
, size_t mem_sz
,
445 struct qcom_scm_vmperm
*newvm
, int dest_cnt
)
447 struct qcom_scm_current_perm_info
*destvm
;
448 struct qcom_scm_mem_map_info
*mem_to_map
;
449 phys_addr_t mem_to_map_phys
;
450 phys_addr_t dest_phys
;
451 phys_addr_t ptr_phys
;
452 size_t mem_to_map_sz
;
463 src_sz
= hweight_long(*srcvm
) * sizeof(*src
);
464 mem_to_map_sz
= sizeof(*mem_to_map
);
465 dest_sz
= dest_cnt
* sizeof(*destvm
);
466 ptr_sz
= ALIGN(src_sz
, SZ_64
) + ALIGN(mem_to_map_sz
, SZ_64
) +
467 ALIGN(dest_sz
, SZ_64
);
469 ptr
= dma_alloc_coherent(__scm
->dev
, ptr_sz
, &ptr_phys
, GFP_KERNEL
);
473 /* Fill source vmid detail */
475 len
= hweight_long(*srcvm
);
476 for (i
= 0; i
< len
; i
++) {
477 src
[i
] = cpu_to_le32(ffs(*srcvm
) - 1);
478 *srcvm
^= 1 << (ffs(*srcvm
) - 1);
481 /* Fill details of mem buff to map */
482 mem_to_map
= ptr
+ ALIGN(src_sz
, SZ_64
);
483 mem_to_map_phys
= ptr_phys
+ ALIGN(src_sz
, SZ_64
);
484 mem_to_map
[0].mem_addr
= cpu_to_le64(mem_addr
);
485 mem_to_map
[0].mem_size
= cpu_to_le64(mem_sz
);
488 /* Fill details of next vmid detail */
489 destvm
= ptr
+ ALIGN(mem_to_map_sz
, SZ_64
) + ALIGN(src_sz
, SZ_64
);
490 dest_phys
= ptr_phys
+ ALIGN(mem_to_map_sz
, SZ_64
) + ALIGN(src_sz
, SZ_64
);
491 for (i
= 0; i
< dest_cnt
; i
++) {
492 destvm
[i
].vmid
= cpu_to_le32(newvm
[i
].vmid
);
493 destvm
[i
].perm
= cpu_to_le32(newvm
[i
].perm
);
495 destvm
[i
].ctx_size
= 0;
496 next_vm
|= BIT(newvm
[i
].vmid
);
499 ret
= __qcom_scm_assign_mem(__scm
->dev
, mem_to_map_phys
, mem_to_map_sz
,
500 ptr_phys
, src_sz
, dest_phys
, dest_sz
);
501 dma_free_coherent(__scm
->dev
, ALIGN(ptr_sz
, SZ_64
), ptr
, ptr_phys
);
504 "Assign memory protection call failed %d.\n", ret
);
511 EXPORT_SYMBOL(qcom_scm_assign_mem
);
513 static int qcom_scm_probe(struct platform_device
*pdev
)
515 struct qcom_scm
*scm
;
519 scm
= devm_kzalloc(&pdev
->dev
, sizeof(*scm
), GFP_KERNEL
);
523 ret
= qcom_scm_find_dload_address(&pdev
->dev
, &scm
->dload_mode_addr
);
527 clks
= (unsigned long)of_device_get_match_data(&pdev
->dev
);
529 scm
->core_clk
= devm_clk_get(&pdev
->dev
, "core");
530 if (IS_ERR(scm
->core_clk
)) {
531 if (PTR_ERR(scm
->core_clk
) == -EPROBE_DEFER
)
532 return PTR_ERR(scm
->core_clk
);
534 if (clks
& SCM_HAS_CORE_CLK
) {
535 dev_err(&pdev
->dev
, "failed to acquire core clk\n");
536 return PTR_ERR(scm
->core_clk
);
539 scm
->core_clk
= NULL
;
542 scm
->iface_clk
= devm_clk_get(&pdev
->dev
, "iface");
543 if (IS_ERR(scm
->iface_clk
)) {
544 if (PTR_ERR(scm
->iface_clk
) == -EPROBE_DEFER
)
545 return PTR_ERR(scm
->iface_clk
);
547 if (clks
& SCM_HAS_IFACE_CLK
) {
548 dev_err(&pdev
->dev
, "failed to acquire iface clk\n");
549 return PTR_ERR(scm
->iface_clk
);
552 scm
->iface_clk
= NULL
;
555 scm
->bus_clk
= devm_clk_get(&pdev
->dev
, "bus");
556 if (IS_ERR(scm
->bus_clk
)) {
557 if (PTR_ERR(scm
->bus_clk
) == -EPROBE_DEFER
)
558 return PTR_ERR(scm
->bus_clk
);
560 if (clks
& SCM_HAS_BUS_CLK
) {
561 dev_err(&pdev
->dev
, "failed to acquire bus clk\n");
562 return PTR_ERR(scm
->bus_clk
);
568 scm
->reset
.ops
= &qcom_scm_pas_reset_ops
;
569 scm
->reset
.nr_resets
= 1;
570 scm
->reset
.of_node
= pdev
->dev
.of_node
;
571 ret
= devm_reset_controller_register(&pdev
->dev
, &scm
->reset
);
575 /* vote for max clk rate for highest performance */
576 ret
= clk_set_rate(scm
->core_clk
, INT_MAX
);
581 __scm
->dev
= &pdev
->dev
;
586 * If requested enable "download mode", from this point on warmboot
587 * will cause the the boot stages to enter download mode, unless
588 * disabled below by a clean shutdown/reboot.
591 qcom_scm_set_download_mode(true);
596 static void qcom_scm_shutdown(struct platform_device
*pdev
)
598 /* Clean shutdown, disable download mode to allow normal restart */
600 qcom_scm_set_download_mode(false);
603 static const struct of_device_id qcom_scm_dt_match
[] = {
604 { .compatible
= "qcom,scm-apq8064",
605 /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
607 { .compatible
= "qcom,scm-apq8084", .data
= (void *)(SCM_HAS_CORE_CLK
|
611 { .compatible
= "qcom,scm-ipq4019" },
612 { .compatible
= "qcom,scm-msm8660", .data
= (void *) SCM_HAS_CORE_CLK
},
613 { .compatible
= "qcom,scm-msm8960", .data
= (void *) SCM_HAS_CORE_CLK
},
614 { .compatible
= "qcom,scm-msm8916", .data
= (void *)(SCM_HAS_CORE_CLK
|
618 { .compatible
= "qcom,scm-msm8974", .data
= (void *)(SCM_HAS_CORE_CLK
|
622 { .compatible
= "qcom,scm-msm8996" },
623 { .compatible
= "qcom,scm" },
627 static struct platform_driver qcom_scm_driver
= {
630 .of_match_table
= qcom_scm_dt_match
,
632 .probe
= qcom_scm_probe
,
633 .shutdown
= qcom_scm_shutdown
,
636 static int __init
qcom_scm_init(void)
638 return platform_driver_register(&qcom_scm_driver
);
640 subsys_initcall(qcom_scm_init
);