2 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
3 * using the CPU's debug registers.
5 * Copyright (C) 2012 ARM Limited
6 * Author: Will Deacon <will.deacon@arm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #define pr_fmt(fmt) "hw-breakpoint: " fmt
23 #include <linux/compat.h>
24 #include <linux/cpu_pm.h>
25 #include <linux/errno.h>
26 #include <linux/hw_breakpoint.h>
27 #include <linux/kprobes.h>
28 #include <linux/perf_event.h>
29 #include <linux/ptrace.h>
30 #include <linux/smp.h>
32 #include <asm/compat.h>
33 #include <asm/current.h>
34 #include <asm/debug-monitors.h>
35 #include <asm/hw_breakpoint.h>
36 #include <asm/traps.h>
37 #include <asm/cputype.h>
38 #include <asm/system_misc.h>
39 #include <asm/uaccess.h>
41 /* Breakpoint currently in use for each BRP. */
42 static DEFINE_PER_CPU(struct perf_event
*, bp_on_reg
[ARM_MAX_BRP
]);
44 /* Watchpoint currently in use for each WRP. */
45 static DEFINE_PER_CPU(struct perf_event
*, wp_on_reg
[ARM_MAX_WRP
]);
47 /* Currently stepping a per-CPU kernel breakpoint. */
48 static DEFINE_PER_CPU(int, stepping_kernel_bp
);
50 /* Number of BRP/WRP registers on this CPU. */
51 static int core_num_brps
;
52 static int core_num_wrps
;
54 int hw_breakpoint_slots(int type
)
57 * We can be called early, so don't rely on
58 * our static variables being initialised.
62 return get_num_brps();
64 return get_num_wrps();
66 pr_warning("unknown slot type: %d\n", type
);
71 #define READ_WB_REG_CASE(OFF, N, REG, VAL) \
73 AARCH64_DBG_READ(N, REG, VAL); \
76 #define WRITE_WB_REG_CASE(OFF, N, REG, VAL) \
78 AARCH64_DBG_WRITE(N, REG, VAL); \
81 #define GEN_READ_WB_REG_CASES(OFF, REG, VAL) \
82 READ_WB_REG_CASE(OFF, 0, REG, VAL); \
83 READ_WB_REG_CASE(OFF, 1, REG, VAL); \
84 READ_WB_REG_CASE(OFF, 2, REG, VAL); \
85 READ_WB_REG_CASE(OFF, 3, REG, VAL); \
86 READ_WB_REG_CASE(OFF, 4, REG, VAL); \
87 READ_WB_REG_CASE(OFF, 5, REG, VAL); \
88 READ_WB_REG_CASE(OFF, 6, REG, VAL); \
89 READ_WB_REG_CASE(OFF, 7, REG, VAL); \
90 READ_WB_REG_CASE(OFF, 8, REG, VAL); \
91 READ_WB_REG_CASE(OFF, 9, REG, VAL); \
92 READ_WB_REG_CASE(OFF, 10, REG, VAL); \
93 READ_WB_REG_CASE(OFF, 11, REG, VAL); \
94 READ_WB_REG_CASE(OFF, 12, REG, VAL); \
95 READ_WB_REG_CASE(OFF, 13, REG, VAL); \
96 READ_WB_REG_CASE(OFF, 14, REG, VAL); \
97 READ_WB_REG_CASE(OFF, 15, REG, VAL)
99 #define GEN_WRITE_WB_REG_CASES(OFF, REG, VAL) \
100 WRITE_WB_REG_CASE(OFF, 0, REG, VAL); \
101 WRITE_WB_REG_CASE(OFF, 1, REG, VAL); \
102 WRITE_WB_REG_CASE(OFF, 2, REG, VAL); \
103 WRITE_WB_REG_CASE(OFF, 3, REG, VAL); \
104 WRITE_WB_REG_CASE(OFF, 4, REG, VAL); \
105 WRITE_WB_REG_CASE(OFF, 5, REG, VAL); \
106 WRITE_WB_REG_CASE(OFF, 6, REG, VAL); \
107 WRITE_WB_REG_CASE(OFF, 7, REG, VAL); \
108 WRITE_WB_REG_CASE(OFF, 8, REG, VAL); \
109 WRITE_WB_REG_CASE(OFF, 9, REG, VAL); \
110 WRITE_WB_REG_CASE(OFF, 10, REG, VAL); \
111 WRITE_WB_REG_CASE(OFF, 11, REG, VAL); \
112 WRITE_WB_REG_CASE(OFF, 12, REG, VAL); \
113 WRITE_WB_REG_CASE(OFF, 13, REG, VAL); \
114 WRITE_WB_REG_CASE(OFF, 14, REG, VAL); \
115 WRITE_WB_REG_CASE(OFF, 15, REG, VAL)
117 static u64
read_wb_reg(int reg
, int n
)
122 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BVR
, AARCH64_DBG_REG_NAME_BVR
, val
);
123 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BCR
, AARCH64_DBG_REG_NAME_BCR
, val
);
124 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WVR
, AARCH64_DBG_REG_NAME_WVR
, val
);
125 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WCR
, AARCH64_DBG_REG_NAME_WCR
, val
);
127 pr_warning("attempt to read from unknown breakpoint register %d\n", n
);
132 NOKPROBE_SYMBOL(read_wb_reg
);
134 static void write_wb_reg(int reg
, int n
, u64 val
)
137 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BVR
, AARCH64_DBG_REG_NAME_BVR
, val
);
138 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BCR
, AARCH64_DBG_REG_NAME_BCR
, val
);
139 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WVR
, AARCH64_DBG_REG_NAME_WVR
, val
);
140 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WCR
, AARCH64_DBG_REG_NAME_WCR
, val
);
142 pr_warning("attempt to write to unknown breakpoint register %d\n", n
);
146 NOKPROBE_SYMBOL(write_wb_reg
);
149 * Convert a breakpoint privilege level to the corresponding exception
152 static enum dbg_active_el
debug_exception_level(int privilege
)
155 case AARCH64_BREAKPOINT_EL0
:
156 return DBG_ACTIVE_EL0
;
157 case AARCH64_BREAKPOINT_EL1
:
158 return DBG_ACTIVE_EL1
;
160 pr_warning("invalid breakpoint privilege level %d\n", privilege
);
164 NOKPROBE_SYMBOL(debug_exception_level
);
166 enum hw_breakpoint_ops
{
167 HW_BREAKPOINT_INSTALL
,
168 HW_BREAKPOINT_UNINSTALL
,
169 HW_BREAKPOINT_RESTORE
172 static int is_compat_bp(struct perf_event
*bp
)
174 struct task_struct
*tsk
= bp
->hw
.target
;
177 * tsk can be NULL for per-cpu (non-ptrace) breakpoints.
178 * In this case, use the native interface, since we don't have
179 * the notion of a "compat CPU" and could end up relying on
180 * deprecated behaviour if we use unaligned watchpoints in
183 return tsk
&& is_compat_thread(task_thread_info(tsk
));
187 * hw_breakpoint_slot_setup - Find and setup a perf slot according to
190 * @slots: pointer to array of slots
191 * @max_slots: max number of slots
192 * @bp: perf_event to setup
193 * @ops: operation to be carried out on the slot
196 * slot index on success
197 * -ENOSPC if no slot is available/matches
198 * -EINVAL on wrong operations parameter
200 static int hw_breakpoint_slot_setup(struct perf_event
**slots
, int max_slots
,
201 struct perf_event
*bp
,
202 enum hw_breakpoint_ops ops
)
205 struct perf_event
**slot
;
207 for (i
= 0; i
< max_slots
; ++i
) {
210 case HW_BREAKPOINT_INSTALL
:
216 case HW_BREAKPOINT_UNINSTALL
:
222 case HW_BREAKPOINT_RESTORE
:
227 pr_warn_once("Unhandled hw breakpoint ops %d\n", ops
);
234 static int hw_breakpoint_control(struct perf_event
*bp
,
235 enum hw_breakpoint_ops ops
)
237 struct arch_hw_breakpoint
*info
= counter_arch_bp(bp
);
238 struct perf_event
**slots
;
239 struct debug_info
*debug_info
= ¤t
->thread
.debug
;
240 int i
, max_slots
, ctrl_reg
, val_reg
, reg_enable
;
241 enum dbg_active_el dbg_el
= debug_exception_level(info
->ctrl
.privilege
);
244 if (info
->ctrl
.type
== ARM_BREAKPOINT_EXECUTE
) {
246 ctrl_reg
= AARCH64_DBG_REG_BCR
;
247 val_reg
= AARCH64_DBG_REG_BVR
;
248 slots
= this_cpu_ptr(bp_on_reg
);
249 max_slots
= core_num_brps
;
250 reg_enable
= !debug_info
->bps_disabled
;
253 ctrl_reg
= AARCH64_DBG_REG_WCR
;
254 val_reg
= AARCH64_DBG_REG_WVR
;
255 slots
= this_cpu_ptr(wp_on_reg
);
256 max_slots
= core_num_wrps
;
257 reg_enable
= !debug_info
->wps_disabled
;
260 i
= hw_breakpoint_slot_setup(slots
, max_slots
, bp
, ops
);
262 if (WARN_ONCE(i
< 0, "Can't find any breakpoint slot"))
266 case HW_BREAKPOINT_INSTALL
:
268 * Ensure debug monitors are enabled at the correct exception
271 enable_debug_monitors(dbg_el
);
273 case HW_BREAKPOINT_RESTORE
:
274 /* Setup the address register. */
275 write_wb_reg(val_reg
, i
, info
->address
);
277 /* Setup the control register. */
278 ctrl
= encode_ctrl_reg(info
->ctrl
);
279 write_wb_reg(ctrl_reg
, i
,
280 reg_enable
? ctrl
| 0x1 : ctrl
& ~0x1);
282 case HW_BREAKPOINT_UNINSTALL
:
283 /* Reset the control register. */
284 write_wb_reg(ctrl_reg
, i
, 0);
287 * Release the debug monitors for the correct exception
290 disable_debug_monitors(dbg_el
);
298 * Install a perf counter breakpoint.
300 int arch_install_hw_breakpoint(struct perf_event
*bp
)
302 return hw_breakpoint_control(bp
, HW_BREAKPOINT_INSTALL
);
305 void arch_uninstall_hw_breakpoint(struct perf_event
*bp
)
307 hw_breakpoint_control(bp
, HW_BREAKPOINT_UNINSTALL
);
310 static int get_hbp_len(u8 hbp_len
)
312 unsigned int len_in_bytes
= 0;
315 case ARM_BREAKPOINT_LEN_1
:
318 case ARM_BREAKPOINT_LEN_2
:
321 case ARM_BREAKPOINT_LEN_4
:
324 case ARM_BREAKPOINT_LEN_8
:
333 * Check whether bp virtual address is in kernel space.
335 int arch_check_bp_in_kernelspace(struct perf_event
*bp
)
339 struct arch_hw_breakpoint
*info
= counter_arch_bp(bp
);
342 len
= get_hbp_len(info
->ctrl
.len
);
344 return (va
>= TASK_SIZE
) && ((va
+ len
- 1) >= TASK_SIZE
);
348 * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
349 * Hopefully this will disappear when ptrace can bypass the conversion
350 * to generic breakpoint descriptions.
352 int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl
,
353 int *gen_len
, int *gen_type
)
357 case ARM_BREAKPOINT_EXECUTE
:
358 *gen_type
= HW_BREAKPOINT_X
;
360 case ARM_BREAKPOINT_LOAD
:
361 *gen_type
= HW_BREAKPOINT_R
;
363 case ARM_BREAKPOINT_STORE
:
364 *gen_type
= HW_BREAKPOINT_W
;
366 case ARM_BREAKPOINT_LOAD
| ARM_BREAKPOINT_STORE
:
367 *gen_type
= HW_BREAKPOINT_RW
;
375 case ARM_BREAKPOINT_LEN_1
:
376 *gen_len
= HW_BREAKPOINT_LEN_1
;
378 case ARM_BREAKPOINT_LEN_2
:
379 *gen_len
= HW_BREAKPOINT_LEN_2
;
381 case ARM_BREAKPOINT_LEN_4
:
382 *gen_len
= HW_BREAKPOINT_LEN_4
;
384 case ARM_BREAKPOINT_LEN_8
:
385 *gen_len
= HW_BREAKPOINT_LEN_8
;
395 * Construct an arch_hw_breakpoint from a perf_event.
397 static int arch_build_bp_info(struct perf_event
*bp
)
399 struct arch_hw_breakpoint
*info
= counter_arch_bp(bp
);
402 switch (bp
->attr
.bp_type
) {
403 case HW_BREAKPOINT_X
:
404 info
->ctrl
.type
= ARM_BREAKPOINT_EXECUTE
;
406 case HW_BREAKPOINT_R
:
407 info
->ctrl
.type
= ARM_BREAKPOINT_LOAD
;
409 case HW_BREAKPOINT_W
:
410 info
->ctrl
.type
= ARM_BREAKPOINT_STORE
;
412 case HW_BREAKPOINT_RW
:
413 info
->ctrl
.type
= ARM_BREAKPOINT_LOAD
| ARM_BREAKPOINT_STORE
;
420 switch (bp
->attr
.bp_len
) {
421 case HW_BREAKPOINT_LEN_1
:
422 info
->ctrl
.len
= ARM_BREAKPOINT_LEN_1
;
424 case HW_BREAKPOINT_LEN_2
:
425 info
->ctrl
.len
= ARM_BREAKPOINT_LEN_2
;
427 case HW_BREAKPOINT_LEN_4
:
428 info
->ctrl
.len
= ARM_BREAKPOINT_LEN_4
;
430 case HW_BREAKPOINT_LEN_8
:
431 info
->ctrl
.len
= ARM_BREAKPOINT_LEN_8
;
438 * On AArch64, we only permit breakpoints of length 4, whereas
439 * AArch32 also requires breakpoints of length 2 for Thumb.
440 * Watchpoints can be of length 1, 2, 4 or 8 bytes.
442 if (info
->ctrl
.type
== ARM_BREAKPOINT_EXECUTE
) {
443 if (is_compat_bp(bp
)) {
444 if (info
->ctrl
.len
!= ARM_BREAKPOINT_LEN_2
&&
445 info
->ctrl
.len
!= ARM_BREAKPOINT_LEN_4
)
447 } else if (info
->ctrl
.len
!= ARM_BREAKPOINT_LEN_4
) {
449 * FIXME: Some tools (I'm looking at you perf) assume
450 * that breakpoints should be sizeof(long). This
451 * is nonsense. For now, we fix up the parameter
452 * but we should probably return -EINVAL instead.
454 info
->ctrl
.len
= ARM_BREAKPOINT_LEN_4
;
459 info
->address
= bp
->attr
.bp_addr
;
463 * Note that we disallow combined EL0/EL1 breakpoints because
464 * that would complicate the stepping code.
466 if (arch_check_bp_in_kernelspace(bp
))
467 info
->ctrl
.privilege
= AARCH64_BREAKPOINT_EL1
;
469 info
->ctrl
.privilege
= AARCH64_BREAKPOINT_EL0
;
472 info
->ctrl
.enabled
= !bp
->attr
.disabled
;
478 * Validate the arch-specific HW Breakpoint register settings.
480 int arch_validate_hwbkpt_settings(struct perf_event
*bp
)
482 struct arch_hw_breakpoint
*info
= counter_arch_bp(bp
);
484 u64 alignment_mask
, offset
;
486 /* Build the arch_hw_breakpoint. */
487 ret
= arch_build_bp_info(bp
);
492 * Check address alignment.
493 * We don't do any clever alignment correction for watchpoints
494 * because using 64-bit unaligned addresses is deprecated for
497 * AArch32 tasks expect some simple alignment fixups, so emulate
500 if (is_compat_bp(bp
)) {
501 if (info
->ctrl
.len
== ARM_BREAKPOINT_LEN_8
)
502 alignment_mask
= 0x7;
504 alignment_mask
= 0x3;
505 offset
= info
->address
& alignment_mask
;
511 /* Allow single byte watchpoint. */
512 if (info
->ctrl
.len
== ARM_BREAKPOINT_LEN_1
)
515 /* Allow halfword watchpoints and breakpoints. */
516 if (info
->ctrl
.len
== ARM_BREAKPOINT_LEN_2
)
522 info
->address
&= ~alignment_mask
;
523 info
->ctrl
.len
<<= offset
;
525 if (info
->ctrl
.type
== ARM_BREAKPOINT_EXECUTE
)
526 alignment_mask
= 0x3;
528 alignment_mask
= 0x7;
529 if (info
->address
& alignment_mask
)
534 * Disallow per-task kernel breakpoints since these would
535 * complicate the stepping code.
537 if (info
->ctrl
.privilege
== AARCH64_BREAKPOINT_EL1
&& bp
->hw
.target
)
544 * Enable/disable all of the breakpoints active at the specified
545 * exception level at the register level.
546 * This is used when single-stepping after a breakpoint exception.
548 static void toggle_bp_registers(int reg
, enum dbg_active_el el
, int enable
)
550 int i
, max_slots
, privilege
;
552 struct perf_event
**slots
;
555 case AARCH64_DBG_REG_BCR
:
556 slots
= this_cpu_ptr(bp_on_reg
);
557 max_slots
= core_num_brps
;
559 case AARCH64_DBG_REG_WCR
:
560 slots
= this_cpu_ptr(wp_on_reg
);
561 max_slots
= core_num_wrps
;
567 for (i
= 0; i
< max_slots
; ++i
) {
571 privilege
= counter_arch_bp(slots
[i
])->ctrl
.privilege
;
572 if (debug_exception_level(privilege
) != el
)
575 ctrl
= read_wb_reg(reg
, i
);
580 write_wb_reg(reg
, i
, ctrl
);
583 NOKPROBE_SYMBOL(toggle_bp_registers
);
586 * Debug exception handlers.
588 static int breakpoint_handler(unsigned long unused
, unsigned int esr
,
589 struct pt_regs
*regs
)
591 int i
, step
= 0, *kernel_step
;
594 struct perf_event
*bp
, **slots
;
595 struct debug_info
*debug_info
;
596 struct arch_hw_breakpoint_ctrl ctrl
;
598 slots
= this_cpu_ptr(bp_on_reg
);
599 addr
= instruction_pointer(regs
);
600 debug_info
= ¤t
->thread
.debug
;
602 for (i
= 0; i
< core_num_brps
; ++i
) {
610 /* Check if the breakpoint value matches. */
611 val
= read_wb_reg(AARCH64_DBG_REG_BVR
, i
);
612 if (val
!= (addr
& ~0x3))
615 /* Possible match, check the byte address select to confirm. */
616 ctrl_reg
= read_wb_reg(AARCH64_DBG_REG_BCR
, i
);
617 decode_ctrl_reg(ctrl_reg
, &ctrl
);
618 if (!((1 << (addr
& 0x3)) & ctrl
.len
))
621 counter_arch_bp(bp
)->trigger
= addr
;
622 perf_bp_event(bp
, regs
);
624 /* Do we need to handle the stepping? */
625 if (is_default_overflow_handler(bp
))
634 if (user_mode(regs
)) {
635 debug_info
->bps_disabled
= 1;
636 toggle_bp_registers(AARCH64_DBG_REG_BCR
, DBG_ACTIVE_EL0
, 0);
638 /* If we're already stepping a watchpoint, just return. */
639 if (debug_info
->wps_disabled
)
642 if (test_thread_flag(TIF_SINGLESTEP
))
643 debug_info
->suspended_step
= 1;
645 user_enable_single_step(current
);
647 toggle_bp_registers(AARCH64_DBG_REG_BCR
, DBG_ACTIVE_EL1
, 0);
648 kernel_step
= this_cpu_ptr(&stepping_kernel_bp
);
650 if (*kernel_step
!= ARM_KERNEL_STEP_NONE
)
653 if (kernel_active_single_step()) {
654 *kernel_step
= ARM_KERNEL_STEP_SUSPEND
;
656 *kernel_step
= ARM_KERNEL_STEP_ACTIVE
;
657 kernel_enable_single_step(regs
);
663 NOKPROBE_SYMBOL(breakpoint_handler
);
665 static int watchpoint_handler(unsigned long addr
, unsigned int esr
,
666 struct pt_regs
*regs
)
668 int i
, step
= 0, *kernel_step
, access
;
670 u64 val
, alignment_mask
;
671 struct perf_event
*wp
, **slots
;
672 struct debug_info
*debug_info
;
673 struct arch_hw_breakpoint
*info
;
674 struct arch_hw_breakpoint_ctrl ctrl
;
676 slots
= this_cpu_ptr(wp_on_reg
);
677 debug_info
= ¤t
->thread
.debug
;
679 for (i
= 0; i
< core_num_wrps
; ++i
) {
687 info
= counter_arch_bp(wp
);
688 /* AArch32 watchpoints are either 4 or 8 bytes aligned. */
689 if (is_compat_task()) {
690 if (info
->ctrl
.len
== ARM_BREAKPOINT_LEN_8
)
691 alignment_mask
= 0x7;
693 alignment_mask
= 0x3;
695 alignment_mask
= 0x7;
698 /* Check if the watchpoint value matches. */
699 val
= read_wb_reg(AARCH64_DBG_REG_WVR
, i
);
700 if (val
!= (untagged_addr(addr
) & ~alignment_mask
))
703 /* Possible match, check the byte address select to confirm. */
704 ctrl_reg
= read_wb_reg(AARCH64_DBG_REG_WCR
, i
);
705 decode_ctrl_reg(ctrl_reg
, &ctrl
);
706 if (!((1 << (addr
& alignment_mask
)) & ctrl
.len
))
710 * Check that the access type matches.
711 * 0 => load, otherwise => store
713 access
= (esr
& AARCH64_ESR_ACCESS_MASK
) ? HW_BREAKPOINT_W
:
715 if (!(access
& hw_breakpoint_type(wp
)))
718 info
->trigger
= addr
;
719 perf_bp_event(wp
, regs
);
721 /* Do we need to handle the stepping? */
722 if (is_default_overflow_handler(wp
))
733 * We always disable EL0 watchpoints because the kernel can
734 * cause these to fire via an unprivileged access.
736 toggle_bp_registers(AARCH64_DBG_REG_WCR
, DBG_ACTIVE_EL0
, 0);
738 if (user_mode(regs
)) {
739 debug_info
->wps_disabled
= 1;
741 /* If we're already stepping a breakpoint, just return. */
742 if (debug_info
->bps_disabled
)
745 if (test_thread_flag(TIF_SINGLESTEP
))
746 debug_info
->suspended_step
= 1;
748 user_enable_single_step(current
);
750 toggle_bp_registers(AARCH64_DBG_REG_WCR
, DBG_ACTIVE_EL1
, 0);
751 kernel_step
= this_cpu_ptr(&stepping_kernel_bp
);
753 if (*kernel_step
!= ARM_KERNEL_STEP_NONE
)
756 if (kernel_active_single_step()) {
757 *kernel_step
= ARM_KERNEL_STEP_SUSPEND
;
759 *kernel_step
= ARM_KERNEL_STEP_ACTIVE
;
760 kernel_enable_single_step(regs
);
766 NOKPROBE_SYMBOL(watchpoint_handler
);
769 * Handle single-step exception.
771 int reinstall_suspended_bps(struct pt_regs
*regs
)
773 struct debug_info
*debug_info
= ¤t
->thread
.debug
;
774 int handled_exception
= 0, *kernel_step
;
776 kernel_step
= this_cpu_ptr(&stepping_kernel_bp
);
779 * Called from single-step exception handler.
780 * Return 0 if execution can resume, 1 if a SIGTRAP should be
783 if (user_mode(regs
)) {
784 if (debug_info
->bps_disabled
) {
785 debug_info
->bps_disabled
= 0;
786 toggle_bp_registers(AARCH64_DBG_REG_BCR
, DBG_ACTIVE_EL0
, 1);
787 handled_exception
= 1;
790 if (debug_info
->wps_disabled
) {
791 debug_info
->wps_disabled
= 0;
792 toggle_bp_registers(AARCH64_DBG_REG_WCR
, DBG_ACTIVE_EL0
, 1);
793 handled_exception
= 1;
796 if (handled_exception
) {
797 if (debug_info
->suspended_step
) {
798 debug_info
->suspended_step
= 0;
799 /* Allow exception handling to fall-through. */
800 handled_exception
= 0;
802 user_disable_single_step(current
);
805 } else if (*kernel_step
!= ARM_KERNEL_STEP_NONE
) {
806 toggle_bp_registers(AARCH64_DBG_REG_BCR
, DBG_ACTIVE_EL1
, 1);
807 toggle_bp_registers(AARCH64_DBG_REG_WCR
, DBG_ACTIVE_EL1
, 1);
809 if (!debug_info
->wps_disabled
)
810 toggle_bp_registers(AARCH64_DBG_REG_WCR
, DBG_ACTIVE_EL0
, 1);
812 if (*kernel_step
!= ARM_KERNEL_STEP_SUSPEND
) {
813 kernel_disable_single_step();
814 handled_exception
= 1;
816 handled_exception
= 0;
819 *kernel_step
= ARM_KERNEL_STEP_NONE
;
822 return !handled_exception
;
824 NOKPROBE_SYMBOL(reinstall_suspended_bps
);
827 * Context-switcher for restoring suspended breakpoints.
829 void hw_breakpoint_thread_switch(struct task_struct
*next
)
833 * disabled: 0 0 => The usual case, NOTIFY_DONE
834 * 0 1 => Disable the registers
835 * 1 0 => Enable the registers
836 * 1 1 => NOTIFY_DONE. per-task bps will
837 * get taken care of by perf.
840 struct debug_info
*current_debug_info
, *next_debug_info
;
842 current_debug_info
= ¤t
->thread
.debug
;
843 next_debug_info
= &next
->thread
.debug
;
845 /* Update breakpoints. */
846 if (current_debug_info
->bps_disabled
!= next_debug_info
->bps_disabled
)
847 toggle_bp_registers(AARCH64_DBG_REG_BCR
,
849 !next_debug_info
->bps_disabled
);
851 /* Update watchpoints. */
852 if (current_debug_info
->wps_disabled
!= next_debug_info
->wps_disabled
)
853 toggle_bp_registers(AARCH64_DBG_REG_WCR
,
855 !next_debug_info
->wps_disabled
);
859 * CPU initialisation.
861 static int hw_breakpoint_reset(unsigned int cpu
)
864 struct perf_event
**slots
;
866 * When a CPU goes through cold-boot, it does not have any installed
867 * slot, so it is safe to share the same function for restoring and
868 * resetting breakpoints; when a CPU is hotplugged in, it goes
869 * through the slots, which are all empty, hence it just resets control
870 * and value for debug registers.
871 * When this function is triggered on warm-boot through a CPU PM
872 * notifier some slots might be initialized; if so they are
873 * reprogrammed according to the debug slots content.
875 for (slots
= this_cpu_ptr(bp_on_reg
), i
= 0; i
< core_num_brps
; ++i
) {
877 hw_breakpoint_control(slots
[i
], HW_BREAKPOINT_RESTORE
);
879 write_wb_reg(AARCH64_DBG_REG_BCR
, i
, 0UL);
880 write_wb_reg(AARCH64_DBG_REG_BVR
, i
, 0UL);
884 for (slots
= this_cpu_ptr(wp_on_reg
), i
= 0; i
< core_num_wrps
; ++i
) {
886 hw_breakpoint_control(slots
[i
], HW_BREAKPOINT_RESTORE
);
888 write_wb_reg(AARCH64_DBG_REG_WCR
, i
, 0UL);
889 write_wb_reg(AARCH64_DBG_REG_WVR
, i
, 0UL);
897 extern void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore
)(unsigned int));
899 static inline void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore
)(unsigned int))
905 * One-time initialisation.
907 static int __init
arch_hw_breakpoint_init(void)
911 core_num_brps
= get_num_brps();
912 core_num_wrps
= get_num_wrps();
914 pr_info("found %d breakpoint and %d watchpoint registers.\n",
915 core_num_brps
, core_num_wrps
);
917 /* Register debug fault handlers. */
918 hook_debug_fault_code(DBG_ESR_EVT_HWBP
, breakpoint_handler
, SIGTRAP
,
919 TRAP_HWBKPT
, "hw-breakpoint handler");
920 hook_debug_fault_code(DBG_ESR_EVT_HWWP
, watchpoint_handler
, SIGTRAP
,
921 TRAP_HWBKPT
, "hw-watchpoint handler");
924 * Reset the breakpoint resources. We assume that a halting
925 * debugger will leave the world in a nice state for us.
927 ret
= cpuhp_setup_state(CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING
,
928 "CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING",
929 hw_breakpoint_reset
, NULL
);
931 pr_err("failed to register CPU hotplug notifier: %d\n", ret
);
933 /* Register cpu_suspend hw breakpoint restore hook */
934 cpu_suspend_set_dbg_restorer(hw_breakpoint_reset
);
938 arch_initcall(arch_hw_breakpoint_init
);
940 void hw_breakpoint_pmu_read(struct perf_event
*bp
)
945 * Dummy function to register with die_notifier.
947 int hw_breakpoint_exceptions_notify(struct notifier_block
*unused
,
948 unsigned long val
, void *data
)