2 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
3 * using the CPU's debug registers.
5 * Copyright (C) 2012 ARM Limited
6 * Author: Will Deacon <will.deacon@arm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #define pr_fmt(fmt) "hw-breakpoint: " fmt
23 #include <linux/cpu_pm.h>
24 #include <linux/errno.h>
25 #include <linux/hw_breakpoint.h>
26 #include <linux/perf_event.h>
27 #include <linux/ptrace.h>
28 #include <linux/smp.h>
30 #include <asm/compat.h>
31 #include <asm/current.h>
32 #include <asm/debug-monitors.h>
33 #include <asm/hw_breakpoint.h>
34 #include <asm/kdebug.h>
35 #include <asm/traps.h>
36 #include <asm/cputype.h>
37 #include <asm/system_misc.h>
39 /* Breakpoint currently in use for each BRP. */
40 static DEFINE_PER_CPU(struct perf_event
*, bp_on_reg
[ARM_MAX_BRP
]);
42 /* Watchpoint currently in use for each WRP. */
43 static DEFINE_PER_CPU(struct perf_event
*, wp_on_reg
[ARM_MAX_WRP
]);
45 /* Currently stepping a per-CPU kernel breakpoint. */
46 static DEFINE_PER_CPU(int, stepping_kernel_bp
);
48 /* Number of BRP/WRP registers on this CPU. */
49 static int core_num_brps
;
50 static int core_num_wrps
;
52 /* Determine number of BRP registers available. */
53 static int get_num_brps(void)
55 return ((read_cpuid(ID_AA64DFR0_EL1
) >> 12) & 0xf) + 1;
58 /* Determine number of WRP registers available. */
59 static int get_num_wrps(void)
61 return ((read_cpuid(ID_AA64DFR0_EL1
) >> 20) & 0xf) + 1;
64 int hw_breakpoint_slots(int type
)
67 * We can be called early, so don't rely on
68 * our static variables being initialised.
72 return get_num_brps();
74 return get_num_wrps();
76 pr_warning("unknown slot type: %d\n", type
);
81 #define READ_WB_REG_CASE(OFF, N, REG, VAL) \
83 AARCH64_DBG_READ(N, REG, VAL); \
86 #define WRITE_WB_REG_CASE(OFF, N, REG, VAL) \
88 AARCH64_DBG_WRITE(N, REG, VAL); \
91 #define GEN_READ_WB_REG_CASES(OFF, REG, VAL) \
92 READ_WB_REG_CASE(OFF, 0, REG, VAL); \
93 READ_WB_REG_CASE(OFF, 1, REG, VAL); \
94 READ_WB_REG_CASE(OFF, 2, REG, VAL); \
95 READ_WB_REG_CASE(OFF, 3, REG, VAL); \
96 READ_WB_REG_CASE(OFF, 4, REG, VAL); \
97 READ_WB_REG_CASE(OFF, 5, REG, VAL); \
98 READ_WB_REG_CASE(OFF, 6, REG, VAL); \
99 READ_WB_REG_CASE(OFF, 7, REG, VAL); \
100 READ_WB_REG_CASE(OFF, 8, REG, VAL); \
101 READ_WB_REG_CASE(OFF, 9, REG, VAL); \
102 READ_WB_REG_CASE(OFF, 10, REG, VAL); \
103 READ_WB_REG_CASE(OFF, 11, REG, VAL); \
104 READ_WB_REG_CASE(OFF, 12, REG, VAL); \
105 READ_WB_REG_CASE(OFF, 13, REG, VAL); \
106 READ_WB_REG_CASE(OFF, 14, REG, VAL); \
107 READ_WB_REG_CASE(OFF, 15, REG, VAL)
109 #define GEN_WRITE_WB_REG_CASES(OFF, REG, VAL) \
110 WRITE_WB_REG_CASE(OFF, 0, REG, VAL); \
111 WRITE_WB_REG_CASE(OFF, 1, REG, VAL); \
112 WRITE_WB_REG_CASE(OFF, 2, REG, VAL); \
113 WRITE_WB_REG_CASE(OFF, 3, REG, VAL); \
114 WRITE_WB_REG_CASE(OFF, 4, REG, VAL); \
115 WRITE_WB_REG_CASE(OFF, 5, REG, VAL); \
116 WRITE_WB_REG_CASE(OFF, 6, REG, VAL); \
117 WRITE_WB_REG_CASE(OFF, 7, REG, VAL); \
118 WRITE_WB_REG_CASE(OFF, 8, REG, VAL); \
119 WRITE_WB_REG_CASE(OFF, 9, REG, VAL); \
120 WRITE_WB_REG_CASE(OFF, 10, REG, VAL); \
121 WRITE_WB_REG_CASE(OFF, 11, REG, VAL); \
122 WRITE_WB_REG_CASE(OFF, 12, REG, VAL); \
123 WRITE_WB_REG_CASE(OFF, 13, REG, VAL); \
124 WRITE_WB_REG_CASE(OFF, 14, REG, VAL); \
125 WRITE_WB_REG_CASE(OFF, 15, REG, VAL)
127 static u64
read_wb_reg(int reg
, int n
)
132 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BVR
, AARCH64_DBG_REG_NAME_BVR
, val
);
133 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BCR
, AARCH64_DBG_REG_NAME_BCR
, val
);
134 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WVR
, AARCH64_DBG_REG_NAME_WVR
, val
);
135 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WCR
, AARCH64_DBG_REG_NAME_WCR
, val
);
137 pr_warning("attempt to read from unknown breakpoint register %d\n", n
);
143 static void write_wb_reg(int reg
, int n
, u64 val
)
146 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BVR
, AARCH64_DBG_REG_NAME_BVR
, val
);
147 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BCR
, AARCH64_DBG_REG_NAME_BCR
, val
);
148 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WVR
, AARCH64_DBG_REG_NAME_WVR
, val
);
149 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WCR
, AARCH64_DBG_REG_NAME_WCR
, val
);
151 pr_warning("attempt to write to unknown breakpoint register %d\n", n
);
157 * Convert a breakpoint privilege level to the corresponding exception
160 static enum debug_el
debug_exception_level(int privilege
)
163 case AARCH64_BREAKPOINT_EL0
:
164 return DBG_ACTIVE_EL0
;
165 case AARCH64_BREAKPOINT_EL1
:
166 return DBG_ACTIVE_EL1
;
168 pr_warning("invalid breakpoint privilege level %d\n", privilege
);
173 enum hw_breakpoint_ops
{
174 HW_BREAKPOINT_INSTALL
,
175 HW_BREAKPOINT_UNINSTALL
,
176 HW_BREAKPOINT_RESTORE
180 * hw_breakpoint_slot_setup - Find and setup a perf slot according to
183 * @slots: pointer to array of slots
184 * @max_slots: max number of slots
185 * @bp: perf_event to setup
186 * @ops: operation to be carried out on the slot
189 * slot index on success
190 * -ENOSPC if no slot is available/matches
191 * -EINVAL on wrong operations parameter
193 static int hw_breakpoint_slot_setup(struct perf_event
**slots
, int max_slots
,
194 struct perf_event
*bp
,
195 enum hw_breakpoint_ops ops
)
198 struct perf_event
**slot
;
200 for (i
= 0; i
< max_slots
; ++i
) {
203 case HW_BREAKPOINT_INSTALL
:
209 case HW_BREAKPOINT_UNINSTALL
:
215 case HW_BREAKPOINT_RESTORE
:
220 pr_warn_once("Unhandled hw breakpoint ops %d\n", ops
);
227 static int hw_breakpoint_control(struct perf_event
*bp
,
228 enum hw_breakpoint_ops ops
)
230 struct arch_hw_breakpoint
*info
= counter_arch_bp(bp
);
231 struct perf_event
**slots
;
232 struct debug_info
*debug_info
= ¤t
->thread
.debug
;
233 int i
, max_slots
, ctrl_reg
, val_reg
, reg_enable
;
234 enum debug_el dbg_el
= debug_exception_level(info
->ctrl
.privilege
);
237 if (info
->ctrl
.type
== ARM_BREAKPOINT_EXECUTE
) {
239 ctrl_reg
= AARCH64_DBG_REG_BCR
;
240 val_reg
= AARCH64_DBG_REG_BVR
;
241 slots
= this_cpu_ptr(bp_on_reg
);
242 max_slots
= core_num_brps
;
243 reg_enable
= !debug_info
->bps_disabled
;
246 ctrl_reg
= AARCH64_DBG_REG_WCR
;
247 val_reg
= AARCH64_DBG_REG_WVR
;
248 slots
= this_cpu_ptr(wp_on_reg
);
249 max_slots
= core_num_wrps
;
250 reg_enable
= !debug_info
->wps_disabled
;
253 i
= hw_breakpoint_slot_setup(slots
, max_slots
, bp
, ops
);
255 if (WARN_ONCE(i
< 0, "Can't find any breakpoint slot"))
259 case HW_BREAKPOINT_INSTALL
:
261 * Ensure debug monitors are enabled at the correct exception
264 enable_debug_monitors(dbg_el
);
266 case HW_BREAKPOINT_RESTORE
:
267 /* Setup the address register. */
268 write_wb_reg(val_reg
, i
, info
->address
);
270 /* Setup the control register. */
271 ctrl
= encode_ctrl_reg(info
->ctrl
);
272 write_wb_reg(ctrl_reg
, i
,
273 reg_enable
? ctrl
| 0x1 : ctrl
& ~0x1);
275 case HW_BREAKPOINT_UNINSTALL
:
276 /* Reset the control register. */
277 write_wb_reg(ctrl_reg
, i
, 0);
280 * Release the debug monitors for the correct exception
283 disable_debug_monitors(dbg_el
);
291 * Install a perf counter breakpoint.
293 int arch_install_hw_breakpoint(struct perf_event
*bp
)
295 return hw_breakpoint_control(bp
, HW_BREAKPOINT_INSTALL
);
298 void arch_uninstall_hw_breakpoint(struct perf_event
*bp
)
300 hw_breakpoint_control(bp
, HW_BREAKPOINT_UNINSTALL
);
303 static int get_hbp_len(u8 hbp_len
)
305 unsigned int len_in_bytes
= 0;
308 case ARM_BREAKPOINT_LEN_1
:
311 case ARM_BREAKPOINT_LEN_2
:
314 case ARM_BREAKPOINT_LEN_4
:
317 case ARM_BREAKPOINT_LEN_8
:
326 * Check whether bp virtual address is in kernel space.
328 int arch_check_bp_in_kernelspace(struct perf_event
*bp
)
332 struct arch_hw_breakpoint
*info
= counter_arch_bp(bp
);
335 len
= get_hbp_len(info
->ctrl
.len
);
337 return (va
>= TASK_SIZE
) && ((va
+ len
- 1) >= TASK_SIZE
);
341 * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
342 * Hopefully this will disappear when ptrace can bypass the conversion
343 * to generic breakpoint descriptions.
345 int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl
,
346 int *gen_len
, int *gen_type
)
350 case ARM_BREAKPOINT_EXECUTE
:
351 *gen_type
= HW_BREAKPOINT_X
;
353 case ARM_BREAKPOINT_LOAD
:
354 *gen_type
= HW_BREAKPOINT_R
;
356 case ARM_BREAKPOINT_STORE
:
357 *gen_type
= HW_BREAKPOINT_W
;
359 case ARM_BREAKPOINT_LOAD
| ARM_BREAKPOINT_STORE
:
360 *gen_type
= HW_BREAKPOINT_RW
;
368 case ARM_BREAKPOINT_LEN_1
:
369 *gen_len
= HW_BREAKPOINT_LEN_1
;
371 case ARM_BREAKPOINT_LEN_2
:
372 *gen_len
= HW_BREAKPOINT_LEN_2
;
374 case ARM_BREAKPOINT_LEN_4
:
375 *gen_len
= HW_BREAKPOINT_LEN_4
;
377 case ARM_BREAKPOINT_LEN_8
:
378 *gen_len
= HW_BREAKPOINT_LEN_8
;
388 * Construct an arch_hw_breakpoint from a perf_event.
390 static int arch_build_bp_info(struct perf_event
*bp
)
392 struct arch_hw_breakpoint
*info
= counter_arch_bp(bp
);
395 switch (bp
->attr
.bp_type
) {
396 case HW_BREAKPOINT_X
:
397 info
->ctrl
.type
= ARM_BREAKPOINT_EXECUTE
;
399 case HW_BREAKPOINT_R
:
400 info
->ctrl
.type
= ARM_BREAKPOINT_LOAD
;
402 case HW_BREAKPOINT_W
:
403 info
->ctrl
.type
= ARM_BREAKPOINT_STORE
;
405 case HW_BREAKPOINT_RW
:
406 info
->ctrl
.type
= ARM_BREAKPOINT_LOAD
| ARM_BREAKPOINT_STORE
;
413 switch (bp
->attr
.bp_len
) {
414 case HW_BREAKPOINT_LEN_1
:
415 info
->ctrl
.len
= ARM_BREAKPOINT_LEN_1
;
417 case HW_BREAKPOINT_LEN_2
:
418 info
->ctrl
.len
= ARM_BREAKPOINT_LEN_2
;
420 case HW_BREAKPOINT_LEN_4
:
421 info
->ctrl
.len
= ARM_BREAKPOINT_LEN_4
;
423 case HW_BREAKPOINT_LEN_8
:
424 info
->ctrl
.len
= ARM_BREAKPOINT_LEN_8
;
431 * On AArch64, we only permit breakpoints of length 4, whereas
432 * AArch32 also requires breakpoints of length 2 for Thumb.
433 * Watchpoints can be of length 1, 2, 4 or 8 bytes.
435 if (info
->ctrl
.type
== ARM_BREAKPOINT_EXECUTE
) {
436 if (is_compat_task()) {
437 if (info
->ctrl
.len
!= ARM_BREAKPOINT_LEN_2
&&
438 info
->ctrl
.len
!= ARM_BREAKPOINT_LEN_4
)
440 } else if (info
->ctrl
.len
!= ARM_BREAKPOINT_LEN_4
) {
442 * FIXME: Some tools (I'm looking at you perf) assume
443 * that breakpoints should be sizeof(long). This
444 * is nonsense. For now, we fix up the parameter
445 * but we should probably return -EINVAL instead.
447 info
->ctrl
.len
= ARM_BREAKPOINT_LEN_4
;
452 info
->address
= bp
->attr
.bp_addr
;
456 * Note that we disallow combined EL0/EL1 breakpoints because
457 * that would complicate the stepping code.
459 if (arch_check_bp_in_kernelspace(bp
))
460 info
->ctrl
.privilege
= AARCH64_BREAKPOINT_EL1
;
462 info
->ctrl
.privilege
= AARCH64_BREAKPOINT_EL0
;
465 info
->ctrl
.enabled
= !bp
->attr
.disabled
;
471 * Validate the arch-specific HW Breakpoint register settings.
473 int arch_validate_hwbkpt_settings(struct perf_event
*bp
)
475 struct arch_hw_breakpoint
*info
= counter_arch_bp(bp
);
477 u64 alignment_mask
, offset
;
479 /* Build the arch_hw_breakpoint. */
480 ret
= arch_build_bp_info(bp
);
485 * Check address alignment.
486 * We don't do any clever alignment correction for watchpoints
487 * because using 64-bit unaligned addresses is deprecated for
490 * AArch32 tasks expect some simple alignment fixups, so emulate
493 if (is_compat_task()) {
494 if (info
->ctrl
.len
== ARM_BREAKPOINT_LEN_8
)
495 alignment_mask
= 0x7;
497 alignment_mask
= 0x3;
498 offset
= info
->address
& alignment_mask
;
504 /* Allow single byte watchpoint. */
505 if (info
->ctrl
.len
== ARM_BREAKPOINT_LEN_1
)
508 /* Allow halfword watchpoints and breakpoints. */
509 if (info
->ctrl
.len
== ARM_BREAKPOINT_LEN_2
)
515 info
->address
&= ~alignment_mask
;
516 info
->ctrl
.len
<<= offset
;
518 if (info
->ctrl
.type
== ARM_BREAKPOINT_EXECUTE
)
519 alignment_mask
= 0x3;
521 alignment_mask
= 0x7;
522 if (info
->address
& alignment_mask
)
527 * Disallow per-task kernel breakpoints since these would
528 * complicate the stepping code.
530 if (info
->ctrl
.privilege
== AARCH64_BREAKPOINT_EL1
&& bp
->hw
.bp_target
)
537 * Enable/disable all of the breakpoints active at the specified
538 * exception level at the register level.
539 * This is used when single-stepping after a breakpoint exception.
541 static void toggle_bp_registers(int reg
, enum debug_el el
, int enable
)
543 int i
, max_slots
, privilege
;
545 struct perf_event
**slots
;
548 case AARCH64_DBG_REG_BCR
:
549 slots
= this_cpu_ptr(bp_on_reg
);
550 max_slots
= core_num_brps
;
552 case AARCH64_DBG_REG_WCR
:
553 slots
= this_cpu_ptr(wp_on_reg
);
554 max_slots
= core_num_wrps
;
560 for (i
= 0; i
< max_slots
; ++i
) {
564 privilege
= counter_arch_bp(slots
[i
])->ctrl
.privilege
;
565 if (debug_exception_level(privilege
) != el
)
568 ctrl
= read_wb_reg(reg
, i
);
573 write_wb_reg(reg
, i
, ctrl
);
578 * Debug exception handlers.
580 static int breakpoint_handler(unsigned long unused
, unsigned int esr
,
581 struct pt_regs
*regs
)
583 int i
, step
= 0, *kernel_step
;
586 struct perf_event
*bp
, **slots
;
587 struct debug_info
*debug_info
;
588 struct arch_hw_breakpoint_ctrl ctrl
;
590 slots
= this_cpu_ptr(bp_on_reg
);
591 addr
= instruction_pointer(regs
);
592 debug_info
= ¤t
->thread
.debug
;
594 for (i
= 0; i
< core_num_brps
; ++i
) {
602 /* Check if the breakpoint value matches. */
603 val
= read_wb_reg(AARCH64_DBG_REG_BVR
, i
);
604 if (val
!= (addr
& ~0x3))
607 /* Possible match, check the byte address select to confirm. */
608 ctrl_reg
= read_wb_reg(AARCH64_DBG_REG_BCR
, i
);
609 decode_ctrl_reg(ctrl_reg
, &ctrl
);
610 if (!((1 << (addr
& 0x3)) & ctrl
.len
))
613 counter_arch_bp(bp
)->trigger
= addr
;
614 perf_bp_event(bp
, regs
);
616 /* Do we need to handle the stepping? */
617 if (!bp
->overflow_handler
)
626 if (user_mode(regs
)) {
627 debug_info
->bps_disabled
= 1;
628 toggle_bp_registers(AARCH64_DBG_REG_BCR
, DBG_ACTIVE_EL0
, 0);
630 /* If we're already stepping a watchpoint, just return. */
631 if (debug_info
->wps_disabled
)
634 if (test_thread_flag(TIF_SINGLESTEP
))
635 debug_info
->suspended_step
= 1;
637 user_enable_single_step(current
);
639 toggle_bp_registers(AARCH64_DBG_REG_BCR
, DBG_ACTIVE_EL1
, 0);
640 kernel_step
= this_cpu_ptr(&stepping_kernel_bp
);
642 if (*kernel_step
!= ARM_KERNEL_STEP_NONE
)
645 if (kernel_active_single_step()) {
646 *kernel_step
= ARM_KERNEL_STEP_SUSPEND
;
648 *kernel_step
= ARM_KERNEL_STEP_ACTIVE
;
649 kernel_enable_single_step(regs
);
656 static int watchpoint_handler(unsigned long addr
, unsigned int esr
,
657 struct pt_regs
*regs
)
659 int i
, step
= 0, *kernel_step
, access
;
661 u64 val
, alignment_mask
;
662 struct perf_event
*wp
, **slots
;
663 struct debug_info
*debug_info
;
664 struct arch_hw_breakpoint
*info
;
665 struct arch_hw_breakpoint_ctrl ctrl
;
667 slots
= this_cpu_ptr(wp_on_reg
);
668 debug_info
= ¤t
->thread
.debug
;
670 for (i
= 0; i
< core_num_wrps
; ++i
) {
678 info
= counter_arch_bp(wp
);
679 /* AArch32 watchpoints are either 4 or 8 bytes aligned. */
680 if (is_compat_task()) {
681 if (info
->ctrl
.len
== ARM_BREAKPOINT_LEN_8
)
682 alignment_mask
= 0x7;
684 alignment_mask
= 0x3;
686 alignment_mask
= 0x7;
689 /* Check if the watchpoint value matches. */
690 val
= read_wb_reg(AARCH64_DBG_REG_WVR
, i
);
691 if (val
!= (addr
& ~alignment_mask
))
694 /* Possible match, check the byte address select to confirm. */
695 ctrl_reg
= read_wb_reg(AARCH64_DBG_REG_WCR
, i
);
696 decode_ctrl_reg(ctrl_reg
, &ctrl
);
697 if (!((1 << (addr
& alignment_mask
)) & ctrl
.len
))
701 * Check that the access type matches.
702 * 0 => load, otherwise => store
704 access
= (esr
& AARCH64_ESR_ACCESS_MASK
) ? HW_BREAKPOINT_W
:
706 if (!(access
& hw_breakpoint_type(wp
)))
709 info
->trigger
= addr
;
710 perf_bp_event(wp
, regs
);
712 /* Do we need to handle the stepping? */
713 if (!wp
->overflow_handler
)
724 * We always disable EL0 watchpoints because the kernel can
725 * cause these to fire via an unprivileged access.
727 toggle_bp_registers(AARCH64_DBG_REG_WCR
, DBG_ACTIVE_EL0
, 0);
729 if (user_mode(regs
)) {
730 debug_info
->wps_disabled
= 1;
732 /* If we're already stepping a breakpoint, just return. */
733 if (debug_info
->bps_disabled
)
736 if (test_thread_flag(TIF_SINGLESTEP
))
737 debug_info
->suspended_step
= 1;
739 user_enable_single_step(current
);
741 toggle_bp_registers(AARCH64_DBG_REG_WCR
, DBG_ACTIVE_EL1
, 0);
742 kernel_step
= this_cpu_ptr(&stepping_kernel_bp
);
744 if (*kernel_step
!= ARM_KERNEL_STEP_NONE
)
747 if (kernel_active_single_step()) {
748 *kernel_step
= ARM_KERNEL_STEP_SUSPEND
;
750 *kernel_step
= ARM_KERNEL_STEP_ACTIVE
;
751 kernel_enable_single_step(regs
);
759 * Handle single-step exception.
761 int reinstall_suspended_bps(struct pt_regs
*regs
)
763 struct debug_info
*debug_info
= ¤t
->thread
.debug
;
764 int handled_exception
= 0, *kernel_step
;
766 kernel_step
= this_cpu_ptr(&stepping_kernel_bp
);
769 * Called from single-step exception handler.
770 * Return 0 if execution can resume, 1 if a SIGTRAP should be
773 if (user_mode(regs
)) {
774 if (debug_info
->bps_disabled
) {
775 debug_info
->bps_disabled
= 0;
776 toggle_bp_registers(AARCH64_DBG_REG_BCR
, DBG_ACTIVE_EL0
, 1);
777 handled_exception
= 1;
780 if (debug_info
->wps_disabled
) {
781 debug_info
->wps_disabled
= 0;
782 toggle_bp_registers(AARCH64_DBG_REG_WCR
, DBG_ACTIVE_EL0
, 1);
783 handled_exception
= 1;
786 if (handled_exception
) {
787 if (debug_info
->suspended_step
) {
788 debug_info
->suspended_step
= 0;
789 /* Allow exception handling to fall-through. */
790 handled_exception
= 0;
792 user_disable_single_step(current
);
795 } else if (*kernel_step
!= ARM_KERNEL_STEP_NONE
) {
796 toggle_bp_registers(AARCH64_DBG_REG_BCR
, DBG_ACTIVE_EL1
, 1);
797 toggle_bp_registers(AARCH64_DBG_REG_WCR
, DBG_ACTIVE_EL1
, 1);
799 if (!debug_info
->wps_disabled
)
800 toggle_bp_registers(AARCH64_DBG_REG_WCR
, DBG_ACTIVE_EL0
, 1);
802 if (*kernel_step
!= ARM_KERNEL_STEP_SUSPEND
) {
803 kernel_disable_single_step();
804 handled_exception
= 1;
806 handled_exception
= 0;
809 *kernel_step
= ARM_KERNEL_STEP_NONE
;
812 return !handled_exception
;
816 * Context-switcher for restoring suspended breakpoints.
818 void hw_breakpoint_thread_switch(struct task_struct
*next
)
822 * disabled: 0 0 => The usual case, NOTIFY_DONE
823 * 0 1 => Disable the registers
824 * 1 0 => Enable the registers
825 * 1 1 => NOTIFY_DONE. per-task bps will
826 * get taken care of by perf.
829 struct debug_info
*current_debug_info
, *next_debug_info
;
831 current_debug_info
= ¤t
->thread
.debug
;
832 next_debug_info
= &next
->thread
.debug
;
834 /* Update breakpoints. */
835 if (current_debug_info
->bps_disabled
!= next_debug_info
->bps_disabled
)
836 toggle_bp_registers(AARCH64_DBG_REG_BCR
,
838 !next_debug_info
->bps_disabled
);
840 /* Update watchpoints. */
841 if (current_debug_info
->wps_disabled
!= next_debug_info
->wps_disabled
)
842 toggle_bp_registers(AARCH64_DBG_REG_WCR
,
844 !next_debug_info
->wps_disabled
);
848 * CPU initialisation.
850 static void hw_breakpoint_reset(void *unused
)
853 struct perf_event
**slots
;
855 * When a CPU goes through cold-boot, it does not have any installed
856 * slot, so it is safe to share the same function for restoring and
857 * resetting breakpoints; when a CPU is hotplugged in, it goes
858 * through the slots, which are all empty, hence it just resets control
859 * and value for debug registers.
860 * When this function is triggered on warm-boot through a CPU PM
861 * notifier some slots might be initialized; if so they are
862 * reprogrammed according to the debug slots content.
864 for (slots
= this_cpu_ptr(bp_on_reg
), i
= 0; i
< core_num_brps
; ++i
) {
866 hw_breakpoint_control(slots
[i
], HW_BREAKPOINT_RESTORE
);
868 write_wb_reg(AARCH64_DBG_REG_BCR
, i
, 0UL);
869 write_wb_reg(AARCH64_DBG_REG_BVR
, i
, 0UL);
873 for (slots
= this_cpu_ptr(wp_on_reg
), i
= 0; i
< core_num_wrps
; ++i
) {
875 hw_breakpoint_control(slots
[i
], HW_BREAKPOINT_RESTORE
);
877 write_wb_reg(AARCH64_DBG_REG_WCR
, i
, 0UL);
878 write_wb_reg(AARCH64_DBG_REG_WVR
, i
, 0UL);
883 static int hw_breakpoint_reset_notify(struct notifier_block
*self
,
884 unsigned long action
,
887 int cpu
= (long)hcpu
;
888 if (action
== CPU_ONLINE
)
889 smp_call_function_single(cpu
, hw_breakpoint_reset
, NULL
, 1);
893 static struct notifier_block hw_breakpoint_reset_nb
= {
894 .notifier_call
= hw_breakpoint_reset_notify
,
897 #ifdef CONFIG_ARM64_CPU_SUSPEND
898 extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore
)(void *));
900 static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore
)(void *))
906 * One-time initialisation.
908 static int __init
arch_hw_breakpoint_init(void)
910 core_num_brps
= get_num_brps();
911 core_num_wrps
= get_num_wrps();
913 pr_info("found %d breakpoint and %d watchpoint registers.\n",
914 core_num_brps
, core_num_wrps
);
916 cpu_notifier_register_begin();
919 * Reset the breakpoint resources. We assume that a halting
920 * debugger will leave the world in a nice state for us.
922 smp_call_function(hw_breakpoint_reset
, NULL
, 1);
923 hw_breakpoint_reset(NULL
);
925 /* Register debug fault handlers. */
926 hook_debug_fault_code(DBG_ESR_EVT_HWBP
, breakpoint_handler
, SIGTRAP
,
927 TRAP_HWBKPT
, "hw-breakpoint handler");
928 hook_debug_fault_code(DBG_ESR_EVT_HWWP
, watchpoint_handler
, SIGTRAP
,
929 TRAP_HWBKPT
, "hw-watchpoint handler");
931 /* Register hotplug notifier. */
932 __register_cpu_notifier(&hw_breakpoint_reset_nb
);
934 cpu_notifier_register_done();
936 /* Register cpu_suspend hw breakpoint restore hook */
937 cpu_suspend_set_dbg_restorer(hw_breakpoint_reset
);
941 arch_initcall(arch_hw_breakpoint_init
);
943 void hw_breakpoint_pmu_read(struct perf_event
*bp
)
948 * Dummy function to register with die_notifier.
950 int hw_breakpoint_exceptions_notify(struct notifier_block
*unused
,
951 unsigned long val
, void *data
)