1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2017 Linaro Limited. All rights reserved.
5 * Author: Leo Yan <leo.yan@linaro.org>
7 #include <linux/acpi.h>
8 #include <linux/amba/bus.h>
9 #include <linux/coresight.h>
10 #include <linux/cpu.h>
11 #include <linux/debugfs.h>
12 #include <linux/delay.h>
13 #include <linux/device.h>
14 #include <linux/err.h>
15 #include <linux/init.h>
17 #include <linux/iopoll.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/panic_notifier.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_qos.h>
24 #include <linux/slab.h>
25 #include <linux/smp.h>
26 #include <linux/types.h>
27 #include <linux/uaccess.h>
29 #include "coresight-priv.h"
34 #define EDPCSR_HI 0x0AC
38 #define EDDEVID1 0xFC4
41 #define EDPCSR_PROHIBITED 0xFFFFFFFF
43 /* bits definition for EDPCSR */
44 #define EDPCSR_THUMB BIT(0)
45 #define EDPCSR_ARM_INST_MASK GENMASK(31, 2)
46 #define EDPCSR_THUMB_INST_MASK GENMASK(31, 1)
48 /* bits definition for EDPRCR */
49 #define EDPRCR_COREPURQ BIT(3)
50 #define EDPRCR_CORENPDRQ BIT(0)
52 /* bits definition for EDPRSR */
53 #define EDPRSR_DLK BIT(6)
54 #define EDPRSR_PU BIT(0)
56 /* bits definition for EDVIDSR */
57 #define EDVIDSR_NS BIT(31)
58 #define EDVIDSR_E2 BIT(30)
59 #define EDVIDSR_E3 BIT(29)
60 #define EDVIDSR_HV BIT(28)
61 #define EDVIDSR_VMID GENMASK(7, 0)
64 * bits definition for EDDEVID1:PSCROffset
66 * NOTE: armv8 and armv7 have different definition for the register,
67 * so consolidate the bits definition as below:
69 * 0b0000 - Sample offset applies based on the instruction state, we
70 * rely on EDDEVID to check if EDPCSR is implemented or not
71 * 0b0001 - No offset applies.
72 * 0b0010 - No offset applies, but do not use in AArch32 mode
75 #define EDDEVID1_PCSR_OFFSET_MASK GENMASK(3, 0)
76 #define EDDEVID1_PCSR_OFFSET_INS_SET (0x0)
77 #define EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32 (0x2)
79 /* bits definition for EDDEVID */
80 #define EDDEVID_PCSAMPLE_MODE GENMASK(3, 0)
81 #define EDDEVID_IMPL_EDPCSR (0x1)
82 #define EDDEVID_IMPL_EDPCSR_EDCIDSR (0x2)
83 #define EDDEVID_IMPL_FULL (0x3)
85 #define DEBUG_WAIT_SLEEP 1000
86 #define DEBUG_WAIT_TIMEOUT 32000
88 struct debug_drvdata
{
106 static DEFINE_MUTEX(debug_lock
);
107 static DEFINE_PER_CPU(struct debug_drvdata
*, debug_drvdata
);
108 static int debug_count
;
109 static struct dentry
*debug_debugfs_dir
;
111 static bool debug_enable
= IS_ENABLED(CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON
);
112 module_param_named(enable
, debug_enable
, bool, 0600);
113 MODULE_PARM_DESC(enable
, "Control to enable coresight CPU debug functionality");
115 static void debug_os_unlock(struct debug_drvdata
*drvdata
)
117 /* Unlocks the debug registers */
118 writel_relaxed(0x0, drvdata
->base
+ EDOSLAR
);
120 /* Make sure the registers are unlocked before accessing */
125 * According to ARM DDI 0487A.k, before access external debug
126 * registers should firstly check the access permission; if any
127 * below condition has been met then cannot access debug
128 * registers to avoid lockup issue:
130 * - CPU power domain is powered off;
131 * - The OS Double Lock is locked;
133 * By checking EDPRSR can get to know if meet these conditions.
135 static bool debug_access_permitted(struct debug_drvdata
*drvdata
)
137 /* CPU is powered off */
138 if (!(drvdata
->edprsr
& EDPRSR_PU
))
141 /* The OS Double Lock is locked */
142 if (drvdata
->edprsr
& EDPRSR_DLK
)
148 static void debug_force_cpu_powered_up(struct debug_drvdata
*drvdata
)
155 * Send request to power management controller and assert
156 * DBGPWRUPREQ signal; if power management controller has
157 * sane implementation, it should enable CPU power domain
158 * in case CPU is in low power state.
160 edprcr
= readl_relaxed(drvdata
->base
+ EDPRCR
);
161 edprcr
|= EDPRCR_COREPURQ
;
162 writel_relaxed(edprcr
, drvdata
->base
+ EDPRCR
);
164 /* Wait for CPU to be powered up (timeout~=32ms) */
165 if (readx_poll_timeout_atomic(readl_relaxed
, drvdata
->base
+ EDPRSR
,
166 drvdata
->edprsr
, (drvdata
->edprsr
& EDPRSR_PU
),
167 DEBUG_WAIT_SLEEP
, DEBUG_WAIT_TIMEOUT
)) {
169 * Unfortunately the CPU cannot be powered up, so return
170 * back and later has no permission to access other
171 * registers. For this case, should disable CPU low power
172 * states to ensure CPU power domain is enabled!
174 dev_err(drvdata
->dev
, "%s: power up request for CPU%d failed\n",
175 __func__
, drvdata
->cpu
);
180 * At this point the CPU is powered up, so set the no powerdown
181 * request bit so we don't lose power and emulate power down.
183 edprcr
= readl_relaxed(drvdata
->base
+ EDPRCR
);
184 edprcr
|= EDPRCR_COREPURQ
| EDPRCR_CORENPDRQ
;
185 writel_relaxed(edprcr
, drvdata
->base
+ EDPRCR
);
187 drvdata
->edprsr
= readl_relaxed(drvdata
->base
+ EDPRSR
);
189 /* The core power domain got switched off on use, try again */
190 if (unlikely(!(drvdata
->edprsr
& EDPRSR_PU
)))
194 static void debug_read_regs(struct debug_drvdata
*drvdata
)
198 CS_UNLOCK(drvdata
->base
);
201 debug_os_unlock(drvdata
);
203 /* Save EDPRCR register */
204 save_edprcr
= readl_relaxed(drvdata
->base
+ EDPRCR
);
207 * Ensure CPU power domain is enabled to let registers
210 debug_force_cpu_powered_up(drvdata
);
212 if (!debug_access_permitted(drvdata
))
215 drvdata
->edpcsr
= readl_relaxed(drvdata
->base
+ EDPCSR
);
218 * As described in ARM DDI 0487A.k, if the processing
219 * element (PE) is in debug state, or sample-based
220 * profiling is prohibited, EDPCSR reads as 0xFFFFFFFF;
221 * EDCIDSR, EDVIDSR and EDPCSR_HI registers also become
222 * UNKNOWN state. So directly bail out for this case.
224 if (drvdata
->edpcsr
== EDPCSR_PROHIBITED
)
228 * A read of the EDPCSR normally has the side-effect of
229 * indirectly writing to EDCIDSR, EDVIDSR and EDPCSR_HI;
230 * at this point it's safe to read value from them.
232 if (IS_ENABLED(CONFIG_64BIT
))
233 drvdata
->edpcsr_hi
= readl_relaxed(drvdata
->base
+ EDPCSR_HI
);
235 if (drvdata
->edcidsr_present
)
236 drvdata
->edcidsr
= readl_relaxed(drvdata
->base
+ EDCIDSR
);
238 if (drvdata
->edvidsr_present
)
239 drvdata
->edvidsr
= readl_relaxed(drvdata
->base
+ EDVIDSR
);
242 /* Restore EDPRCR register */
243 writel_relaxed(save_edprcr
, drvdata
->base
+ EDPRCR
);
245 CS_LOCK(drvdata
->base
);
249 static unsigned long debug_adjust_pc(struct debug_drvdata
*drvdata
)
251 return (unsigned long)drvdata
->edpcsr_hi
<< 32 |
252 (unsigned long)drvdata
->edpcsr
;
255 static unsigned long debug_adjust_pc(struct debug_drvdata
*drvdata
)
257 unsigned long arm_inst_offset
= 0, thumb_inst_offset
= 0;
260 pc
= (unsigned long)drvdata
->edpcsr
;
262 if (drvdata
->pc_has_offset
) {
264 thumb_inst_offset
= 4;
267 /* Handle thumb instruction */
268 if (pc
& EDPCSR_THUMB
) {
269 pc
= (pc
& EDPCSR_THUMB_INST_MASK
) - thumb_inst_offset
;
274 * Handle arm instruction offset, if the arm instruction
275 * is not 4 byte alignment then it's possible the case
276 * for implementation defined; keep original value for this
277 * case and print info for notice.
280 dev_emerg(drvdata
->dev
,
281 "Instruction offset is implementation defined\n");
283 pc
= (pc
& EDPCSR_ARM_INST_MASK
) - arm_inst_offset
;
289 static void debug_dump_regs(struct debug_drvdata
*drvdata
)
291 struct device
*dev
= drvdata
->dev
;
294 dev_emerg(dev
, " EDPRSR: %08x (Power:%s DLK:%s)\n",
296 drvdata
->edprsr
& EDPRSR_PU
? "On" : "Off",
297 drvdata
->edprsr
& EDPRSR_DLK
? "Lock" : "Unlock");
299 if (!debug_access_permitted(drvdata
)) {
300 dev_emerg(dev
, "No permission to access debug registers!\n");
304 if (drvdata
->edpcsr
== EDPCSR_PROHIBITED
) {
305 dev_emerg(dev
, "CPU is in Debug state or profiling is prohibited!\n");
309 pc
= debug_adjust_pc(drvdata
);
310 dev_emerg(dev
, " EDPCSR: %pS\n", (void *)pc
);
312 if (drvdata
->edcidsr_present
)
313 dev_emerg(dev
, " EDCIDSR: %08x\n", drvdata
->edcidsr
);
315 if (drvdata
->edvidsr_present
)
316 dev_emerg(dev
, " EDVIDSR: %08x (State:%s Mode:%s Width:%dbits VMID:%x)\n",
318 drvdata
->edvidsr
& EDVIDSR_NS
?
319 "Non-secure" : "Secure",
320 drvdata
->edvidsr
& EDVIDSR_E3
? "EL3" :
321 (drvdata
->edvidsr
& EDVIDSR_E2
?
323 drvdata
->edvidsr
& EDVIDSR_HV
? 64 : 32,
324 drvdata
->edvidsr
& (u32
)EDVIDSR_VMID
);
327 static void debug_init_arch_data(void *info
)
329 struct debug_drvdata
*drvdata
= info
;
330 u32 mode
, pcsr_offset
;
331 u32 eddevid
, eddevid1
;
333 CS_UNLOCK(drvdata
->base
);
335 /* Read device info */
336 eddevid
= readl_relaxed(drvdata
->base
+ EDDEVID
);
337 eddevid1
= readl_relaxed(drvdata
->base
+ EDDEVID1
);
339 CS_LOCK(drvdata
->base
);
341 /* Parse implementation feature */
342 mode
= eddevid
& EDDEVID_PCSAMPLE_MODE
;
343 pcsr_offset
= eddevid1
& EDDEVID1_PCSR_OFFSET_MASK
;
345 drvdata
->edpcsr_present
= false;
346 drvdata
->edcidsr_present
= false;
347 drvdata
->edvidsr_present
= false;
348 drvdata
->pc_has_offset
= false;
351 case EDDEVID_IMPL_FULL
:
352 drvdata
->edvidsr_present
= true;
354 case EDDEVID_IMPL_EDPCSR_EDCIDSR
:
355 drvdata
->edcidsr_present
= true;
357 case EDDEVID_IMPL_EDPCSR
:
359 * In ARM DDI 0487A.k, the EDDEVID1.PCSROffset is used to
360 * define if has the offset for PC sampling value; if read
361 * back EDDEVID1.PCSROffset == 0x2, then this means the debug
362 * module does not sample the instruction set state when
363 * armv8 CPU in AArch32 state.
365 drvdata
->edpcsr_present
=
366 ((IS_ENABLED(CONFIG_64BIT
) && pcsr_offset
!= 0) ||
367 (pcsr_offset
!= EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32
));
369 drvdata
->pc_has_offset
=
370 (pcsr_offset
== EDDEVID1_PCSR_OFFSET_INS_SET
);
378 * Dump out information on panic.
380 static int debug_notifier_call(struct notifier_block
*self
,
381 unsigned long v
, void *p
)
384 struct debug_drvdata
*drvdata
;
386 /* Bail out if we can't acquire the mutex or the functionality is off */
387 if (!mutex_trylock(&debug_lock
))
393 pr_emerg("ARM external debug module:\n");
395 for_each_possible_cpu(cpu
) {
396 drvdata
= per_cpu(debug_drvdata
, cpu
);
400 dev_emerg(drvdata
->dev
, "CPU[%d]:\n", drvdata
->cpu
);
402 debug_read_regs(drvdata
);
403 debug_dump_regs(drvdata
);
407 mutex_unlock(&debug_lock
);
411 static struct notifier_block debug_notifier
= {
412 .notifier_call
= debug_notifier_call
,
415 static int debug_enable_func(void)
417 struct debug_drvdata
*drvdata
;
422 * Use cpumask to track which debug power domains have
423 * been powered on and use it to handle failure case.
425 cpumask_clear(&mask
);
427 for_each_possible_cpu(cpu
) {
428 drvdata
= per_cpu(debug_drvdata
, cpu
);
432 ret
= pm_runtime_get_sync(drvdata
->dev
);
436 cpumask_set_cpu(cpu
, &mask
);
443 * If pm_runtime_get_sync() has failed, need rollback on
444 * all the other CPUs that have been enabled before that.
446 for_each_cpu(cpu
, &mask
) {
447 drvdata
= per_cpu(debug_drvdata
, cpu
);
448 pm_runtime_put_noidle(drvdata
->dev
);
454 static int debug_disable_func(void)
456 struct debug_drvdata
*drvdata
;
457 int cpu
, ret
, err
= 0;
460 * Disable debug power domains, records the error and keep
461 * circling through all other CPUs when an error has been
464 for_each_possible_cpu(cpu
) {
465 drvdata
= per_cpu(debug_drvdata
, cpu
);
469 ret
= pm_runtime_put(drvdata
->dev
);
477 static ssize_t
debug_func_knob_write(struct file
*f
,
478 const char __user
*buf
, size_t count
, loff_t
*ppos
)
483 ret
= kstrtou8_from_user(buf
, count
, 2, &val
);
487 mutex_lock(&debug_lock
);
489 if (val
== debug_enable
)
493 ret
= debug_enable_func();
495 ret
= debug_disable_func();
498 pr_err("%s: unable to %s debug function: %d\n",
499 __func__
, val
? "enable" : "disable", ret
);
507 mutex_unlock(&debug_lock
);
511 static ssize_t
debug_func_knob_read(struct file
*f
,
512 char __user
*ubuf
, size_t count
, loff_t
*ppos
)
517 mutex_lock(&debug_lock
);
518 snprintf(buf
, sizeof(buf
), "%d\n", debug_enable
);
519 mutex_unlock(&debug_lock
);
521 ret
= simple_read_from_buffer(ubuf
, count
, ppos
, buf
, sizeof(buf
));
525 static const struct file_operations debug_func_knob_fops
= {
527 .read
= debug_func_knob_read
,
528 .write
= debug_func_knob_write
,
531 static int debug_func_init(void)
535 /* Create debugfs node */
536 debug_debugfs_dir
= debugfs_create_dir("coresight_cpu_debug", NULL
);
537 debugfs_create_file("enable", 0644, debug_debugfs_dir
, NULL
,
538 &debug_func_knob_fops
);
540 /* Register function to be called for panic */
541 ret
= atomic_notifier_chain_register(&panic_notifier_list
,
544 pr_err("%s: unable to register notifier: %d\n",
552 debugfs_remove_recursive(debug_debugfs_dir
);
556 static void debug_func_exit(void)
558 atomic_notifier_chain_unregister(&panic_notifier_list
,
560 debugfs_remove_recursive(debug_debugfs_dir
);
563 static int __debug_probe(struct device
*dev
, struct resource
*res
)
565 struct debug_drvdata
*drvdata
= dev_get_drvdata(dev
);
569 drvdata
->cpu
= coresight_get_cpu(dev
);
570 if (drvdata
->cpu
< 0)
573 if (per_cpu(debug_drvdata
, drvdata
->cpu
)) {
574 dev_err(dev
, "CPU%d drvdata has already been initialized\n",
580 base
= devm_ioremap_resource(dev
, res
);
582 return PTR_ERR(base
);
584 drvdata
->base
= base
;
587 per_cpu(debug_drvdata
, drvdata
->cpu
) = drvdata
;
588 ret
= smp_call_function_single(drvdata
->cpu
, debug_init_arch_data
,
593 dev_err(dev
, "CPU%d debug arch init failed\n", drvdata
->cpu
);
597 if (!drvdata
->edpcsr_present
) {
598 dev_err(dev
, "CPU%d sample-based profiling isn't implemented\n",
604 if (!debug_count
++) {
605 ret
= debug_func_init();
610 mutex_lock(&debug_lock
);
611 /* Turn off debug power domain if debugging is disabled */
614 mutex_unlock(&debug_lock
);
616 dev_info(dev
, "Coresight debug-CPU%d initialized\n", drvdata
->cpu
);
622 per_cpu(debug_drvdata
, drvdata
->cpu
) = NULL
;
626 static int debug_probe(struct amba_device
*adev
, const struct amba_id
*id
)
628 struct debug_drvdata
*drvdata
;
630 drvdata
= devm_kzalloc(&adev
->dev
, sizeof(*drvdata
), GFP_KERNEL
);
634 amba_set_drvdata(adev
, drvdata
);
635 return __debug_probe(&adev
->dev
, &adev
->res
);
638 static void __debug_remove(struct device
*dev
)
640 struct debug_drvdata
*drvdata
= dev_get_drvdata(dev
);
642 per_cpu(debug_drvdata
, drvdata
->cpu
) = NULL
;
644 mutex_lock(&debug_lock
);
645 /* Turn off debug power domain before rmmod the module */
648 mutex_unlock(&debug_lock
);
654 static void debug_remove(struct amba_device
*adev
)
656 __debug_remove(&adev
->dev
);
659 static const struct amba_cs_uci_id uci_id_debug
[] = {
661 /* CPU Debug UCI data */
662 .devarch
= 0x47706a15,
663 .devarch_mask
= 0xfff0ffff,
664 .devtype
= 0x00000015,
668 static const struct amba_id debug_ids
[] = {
669 CS_AMBA_ID(0x000bbd03), /* Cortex-A53 */
670 CS_AMBA_ID(0x000bbd07), /* Cortex-A57 */
671 CS_AMBA_ID(0x000bbd08), /* Cortex-A72 */
672 CS_AMBA_ID(0x000bbd09), /* Cortex-A73 */
673 CS_AMBA_UCI_ID(0x000f0205, uci_id_debug
), /* Qualcomm Kryo */
674 CS_AMBA_UCI_ID(0x000f0211, uci_id_debug
), /* Qualcomm Kryo */
678 MODULE_DEVICE_TABLE(amba
, debug_ids
);
680 static struct amba_driver debug_driver
= {
682 .name
= "coresight-cpu-debug",
683 .suppress_bind_attrs
= true,
685 .probe
= debug_probe
,
686 .remove
= debug_remove
,
687 .id_table
= debug_ids
,
690 static int debug_platform_probe(struct platform_device
*pdev
)
692 struct resource
*res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
693 struct debug_drvdata
*drvdata
;
696 drvdata
= devm_kzalloc(&pdev
->dev
, sizeof(*drvdata
), GFP_KERNEL
);
700 drvdata
->pclk
= coresight_get_enable_apb_pclk(&pdev
->dev
);
701 if (IS_ERR(drvdata
->pclk
))
704 dev_set_drvdata(&pdev
->dev
, drvdata
);
705 pm_runtime_get_noresume(&pdev
->dev
);
706 pm_runtime_set_active(&pdev
->dev
);
707 pm_runtime_enable(&pdev
->dev
);
709 ret
= __debug_probe(&pdev
->dev
, res
);
711 pm_runtime_put_noidle(&pdev
->dev
);
712 pm_runtime_disable(&pdev
->dev
);
713 if (!IS_ERR_OR_NULL(drvdata
->pclk
))
714 clk_put(drvdata
->pclk
);
719 static void debug_platform_remove(struct platform_device
*pdev
)
721 struct debug_drvdata
*drvdata
= dev_get_drvdata(&pdev
->dev
);
723 if (WARN_ON(!drvdata
))
726 __debug_remove(&pdev
->dev
);
727 pm_runtime_disable(&pdev
->dev
);
728 if (!IS_ERR_OR_NULL(drvdata
->pclk
))
729 clk_put(drvdata
->pclk
);
733 static const struct acpi_device_id debug_platform_ids
[] = {
734 {"ARMHC503", 0, 0, 0}, /* ARM CoreSight Debug */
737 MODULE_DEVICE_TABLE(acpi
, debug_platform_ids
);
741 static int debug_runtime_suspend(struct device
*dev
)
743 struct debug_drvdata
*drvdata
= dev_get_drvdata(dev
);
745 if (drvdata
&& !IS_ERR_OR_NULL(drvdata
->pclk
))
746 clk_disable_unprepare(drvdata
->pclk
);
750 static int debug_runtime_resume(struct device
*dev
)
752 struct debug_drvdata
*drvdata
= dev_get_drvdata(dev
);
754 if (drvdata
&& !IS_ERR_OR_NULL(drvdata
->pclk
))
755 clk_prepare_enable(drvdata
->pclk
);
760 static const struct dev_pm_ops debug_dev_pm_ops
= {
761 SET_RUNTIME_PM_OPS(debug_runtime_suspend
, debug_runtime_resume
, NULL
)
764 static struct platform_driver debug_platform_driver
= {
765 .probe
= debug_platform_probe
,
766 .remove
= debug_platform_remove
,
768 .name
= "coresight-debug-platform",
769 .acpi_match_table
= ACPI_PTR(debug_platform_ids
),
770 .suppress_bind_attrs
= true,
771 .pm
= &debug_dev_pm_ops
,
775 static int __init
debug_init(void)
777 return coresight_init_driver("debug", &debug_driver
, &debug_platform_driver
);
780 static void __exit
debug_exit(void)
782 coresight_remove_driver(&debug_driver
, &debug_platform_driver
);
784 module_init(debug_init
);
785 module_exit(debug_exit
);
787 MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
788 MODULE_DESCRIPTION("ARM Coresight CPU Debug Driver");
789 MODULE_LICENSE("GPL");