perf tools: Add more sort entry check functions
[linux/fpc-iii.git] / arch / arm64 / kernel / hw_breakpoint.c
blobb45c95d34b8323e74992e0a4a56e6da0e1257c60
1 /*
2 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
3 * using the CPU's debug registers.
5 * Copyright (C) 2012 ARM Limited
6 * Author: Will Deacon <will.deacon@arm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #define pr_fmt(fmt) "hw-breakpoint: " fmt
23 #include <linux/compat.h>
24 #include <linux/cpu_pm.h>
25 #include <linux/errno.h>
26 #include <linux/hw_breakpoint.h>
27 #include <linux/perf_event.h>
28 #include <linux/ptrace.h>
29 #include <linux/smp.h>
31 #include <asm/compat.h>
32 #include <asm/current.h>
33 #include <asm/debug-monitors.h>
34 #include <asm/hw_breakpoint.h>
35 #include <asm/traps.h>
36 #include <asm/cputype.h>
37 #include <asm/system_misc.h>
39 /* Breakpoint currently in use for each BRP. */
40 static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
42 /* Watchpoint currently in use for each WRP. */
43 static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
45 /* Currently stepping a per-CPU kernel breakpoint. */
46 static DEFINE_PER_CPU(int, stepping_kernel_bp);
48 /* Number of BRP/WRP registers on this CPU. */
49 static int core_num_brps;
50 static int core_num_wrps;
52 int hw_breakpoint_slots(int type)
55 * We can be called early, so don't rely on
56 * our static variables being initialised.
58 switch (type) {
59 case TYPE_INST:
60 return get_num_brps();
61 case TYPE_DATA:
62 return get_num_wrps();
63 default:
64 pr_warning("unknown slot type: %d\n", type);
65 return 0;
69 #define READ_WB_REG_CASE(OFF, N, REG, VAL) \
70 case (OFF + N): \
71 AARCH64_DBG_READ(N, REG, VAL); \
72 break
74 #define WRITE_WB_REG_CASE(OFF, N, REG, VAL) \
75 case (OFF + N): \
76 AARCH64_DBG_WRITE(N, REG, VAL); \
77 break
79 #define GEN_READ_WB_REG_CASES(OFF, REG, VAL) \
80 READ_WB_REG_CASE(OFF, 0, REG, VAL); \
81 READ_WB_REG_CASE(OFF, 1, REG, VAL); \
82 READ_WB_REG_CASE(OFF, 2, REG, VAL); \
83 READ_WB_REG_CASE(OFF, 3, REG, VAL); \
84 READ_WB_REG_CASE(OFF, 4, REG, VAL); \
85 READ_WB_REG_CASE(OFF, 5, REG, VAL); \
86 READ_WB_REG_CASE(OFF, 6, REG, VAL); \
87 READ_WB_REG_CASE(OFF, 7, REG, VAL); \
88 READ_WB_REG_CASE(OFF, 8, REG, VAL); \
89 READ_WB_REG_CASE(OFF, 9, REG, VAL); \
90 READ_WB_REG_CASE(OFF, 10, REG, VAL); \
91 READ_WB_REG_CASE(OFF, 11, REG, VAL); \
92 READ_WB_REG_CASE(OFF, 12, REG, VAL); \
93 READ_WB_REG_CASE(OFF, 13, REG, VAL); \
94 READ_WB_REG_CASE(OFF, 14, REG, VAL); \
95 READ_WB_REG_CASE(OFF, 15, REG, VAL)
97 #define GEN_WRITE_WB_REG_CASES(OFF, REG, VAL) \
98 WRITE_WB_REG_CASE(OFF, 0, REG, VAL); \
99 WRITE_WB_REG_CASE(OFF, 1, REG, VAL); \
100 WRITE_WB_REG_CASE(OFF, 2, REG, VAL); \
101 WRITE_WB_REG_CASE(OFF, 3, REG, VAL); \
102 WRITE_WB_REG_CASE(OFF, 4, REG, VAL); \
103 WRITE_WB_REG_CASE(OFF, 5, REG, VAL); \
104 WRITE_WB_REG_CASE(OFF, 6, REG, VAL); \
105 WRITE_WB_REG_CASE(OFF, 7, REG, VAL); \
106 WRITE_WB_REG_CASE(OFF, 8, REG, VAL); \
107 WRITE_WB_REG_CASE(OFF, 9, REG, VAL); \
108 WRITE_WB_REG_CASE(OFF, 10, REG, VAL); \
109 WRITE_WB_REG_CASE(OFF, 11, REG, VAL); \
110 WRITE_WB_REG_CASE(OFF, 12, REG, VAL); \
111 WRITE_WB_REG_CASE(OFF, 13, REG, VAL); \
112 WRITE_WB_REG_CASE(OFF, 14, REG, VAL); \
113 WRITE_WB_REG_CASE(OFF, 15, REG, VAL)
115 static u64 read_wb_reg(int reg, int n)
117 u64 val = 0;
119 switch (reg + n) {
120 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
121 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
122 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
123 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
124 default:
125 pr_warning("attempt to read from unknown breakpoint register %d\n", n);
128 return val;
131 static void write_wb_reg(int reg, int n, u64 val)
133 switch (reg + n) {
134 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
135 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
136 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
137 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
138 default:
139 pr_warning("attempt to write to unknown breakpoint register %d\n", n);
141 isb();
145 * Convert a breakpoint privilege level to the corresponding exception
146 * level.
148 static enum dbg_active_el debug_exception_level(int privilege)
150 switch (privilege) {
151 case AARCH64_BREAKPOINT_EL0:
152 return DBG_ACTIVE_EL0;
153 case AARCH64_BREAKPOINT_EL1:
154 return DBG_ACTIVE_EL1;
155 default:
156 pr_warning("invalid breakpoint privilege level %d\n", privilege);
157 return -EINVAL;
161 enum hw_breakpoint_ops {
162 HW_BREAKPOINT_INSTALL,
163 HW_BREAKPOINT_UNINSTALL,
164 HW_BREAKPOINT_RESTORE
167 static int is_compat_bp(struct perf_event *bp)
169 struct task_struct *tsk = bp->hw.target;
172 * tsk can be NULL for per-cpu (non-ptrace) breakpoints.
173 * In this case, use the native interface, since we don't have
174 * the notion of a "compat CPU" and could end up relying on
175 * deprecated behaviour if we use unaligned watchpoints in
176 * AArch64 state.
178 return tsk && is_compat_thread(task_thread_info(tsk));
182 * hw_breakpoint_slot_setup - Find and setup a perf slot according to
183 * operations
185 * @slots: pointer to array of slots
186 * @max_slots: max number of slots
187 * @bp: perf_event to setup
188 * @ops: operation to be carried out on the slot
190 * Return:
191 * slot index on success
192 * -ENOSPC if no slot is available/matches
193 * -EINVAL on wrong operations parameter
195 static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots,
196 struct perf_event *bp,
197 enum hw_breakpoint_ops ops)
199 int i;
200 struct perf_event **slot;
202 for (i = 0; i < max_slots; ++i) {
203 slot = &slots[i];
204 switch (ops) {
205 case HW_BREAKPOINT_INSTALL:
206 if (!*slot) {
207 *slot = bp;
208 return i;
210 break;
211 case HW_BREAKPOINT_UNINSTALL:
212 if (*slot == bp) {
213 *slot = NULL;
214 return i;
216 break;
217 case HW_BREAKPOINT_RESTORE:
218 if (*slot == bp)
219 return i;
220 break;
221 default:
222 pr_warn_once("Unhandled hw breakpoint ops %d\n", ops);
223 return -EINVAL;
226 return -ENOSPC;
229 static int hw_breakpoint_control(struct perf_event *bp,
230 enum hw_breakpoint_ops ops)
232 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
233 struct perf_event **slots;
234 struct debug_info *debug_info = &current->thread.debug;
235 int i, max_slots, ctrl_reg, val_reg, reg_enable;
236 enum dbg_active_el dbg_el = debug_exception_level(info->ctrl.privilege);
237 u32 ctrl;
239 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
240 /* Breakpoint */
241 ctrl_reg = AARCH64_DBG_REG_BCR;
242 val_reg = AARCH64_DBG_REG_BVR;
243 slots = this_cpu_ptr(bp_on_reg);
244 max_slots = core_num_brps;
245 reg_enable = !debug_info->bps_disabled;
246 } else {
247 /* Watchpoint */
248 ctrl_reg = AARCH64_DBG_REG_WCR;
249 val_reg = AARCH64_DBG_REG_WVR;
250 slots = this_cpu_ptr(wp_on_reg);
251 max_slots = core_num_wrps;
252 reg_enable = !debug_info->wps_disabled;
255 i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops);
257 if (WARN_ONCE(i < 0, "Can't find any breakpoint slot"))
258 return i;
260 switch (ops) {
261 case HW_BREAKPOINT_INSTALL:
263 * Ensure debug monitors are enabled at the correct exception
264 * level.
266 enable_debug_monitors(dbg_el);
267 /* Fall through */
268 case HW_BREAKPOINT_RESTORE:
269 /* Setup the address register. */
270 write_wb_reg(val_reg, i, info->address);
272 /* Setup the control register. */
273 ctrl = encode_ctrl_reg(info->ctrl);
274 write_wb_reg(ctrl_reg, i,
275 reg_enable ? ctrl | 0x1 : ctrl & ~0x1);
276 break;
277 case HW_BREAKPOINT_UNINSTALL:
278 /* Reset the control register. */
279 write_wb_reg(ctrl_reg, i, 0);
282 * Release the debug monitors for the correct exception
283 * level.
285 disable_debug_monitors(dbg_el);
286 break;
289 return 0;
293 * Install a perf counter breakpoint.
295 int arch_install_hw_breakpoint(struct perf_event *bp)
297 return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL);
300 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
302 hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL);
305 static int get_hbp_len(u8 hbp_len)
307 unsigned int len_in_bytes = 0;
309 switch (hbp_len) {
310 case ARM_BREAKPOINT_LEN_1:
311 len_in_bytes = 1;
312 break;
313 case ARM_BREAKPOINT_LEN_2:
314 len_in_bytes = 2;
315 break;
316 case ARM_BREAKPOINT_LEN_4:
317 len_in_bytes = 4;
318 break;
319 case ARM_BREAKPOINT_LEN_8:
320 len_in_bytes = 8;
321 break;
324 return len_in_bytes;
328 * Check whether bp virtual address is in kernel space.
330 int arch_check_bp_in_kernelspace(struct perf_event *bp)
332 unsigned int len;
333 unsigned long va;
334 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
336 va = info->address;
337 len = get_hbp_len(info->ctrl.len);
339 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
343 * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
344 * Hopefully this will disappear when ptrace can bypass the conversion
345 * to generic breakpoint descriptions.
347 int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
348 int *gen_len, int *gen_type)
350 /* Type */
351 switch (ctrl.type) {
352 case ARM_BREAKPOINT_EXECUTE:
353 *gen_type = HW_BREAKPOINT_X;
354 break;
355 case ARM_BREAKPOINT_LOAD:
356 *gen_type = HW_BREAKPOINT_R;
357 break;
358 case ARM_BREAKPOINT_STORE:
359 *gen_type = HW_BREAKPOINT_W;
360 break;
361 case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
362 *gen_type = HW_BREAKPOINT_RW;
363 break;
364 default:
365 return -EINVAL;
368 /* Len */
369 switch (ctrl.len) {
370 case ARM_BREAKPOINT_LEN_1:
371 *gen_len = HW_BREAKPOINT_LEN_1;
372 break;
373 case ARM_BREAKPOINT_LEN_2:
374 *gen_len = HW_BREAKPOINT_LEN_2;
375 break;
376 case ARM_BREAKPOINT_LEN_4:
377 *gen_len = HW_BREAKPOINT_LEN_4;
378 break;
379 case ARM_BREAKPOINT_LEN_8:
380 *gen_len = HW_BREAKPOINT_LEN_8;
381 break;
382 default:
383 return -EINVAL;
386 return 0;
390 * Construct an arch_hw_breakpoint from a perf_event.
392 static int arch_build_bp_info(struct perf_event *bp)
394 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
396 /* Type */
397 switch (bp->attr.bp_type) {
398 case HW_BREAKPOINT_X:
399 info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
400 break;
401 case HW_BREAKPOINT_R:
402 info->ctrl.type = ARM_BREAKPOINT_LOAD;
403 break;
404 case HW_BREAKPOINT_W:
405 info->ctrl.type = ARM_BREAKPOINT_STORE;
406 break;
407 case HW_BREAKPOINT_RW:
408 info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
409 break;
410 default:
411 return -EINVAL;
414 /* Len */
415 switch (bp->attr.bp_len) {
416 case HW_BREAKPOINT_LEN_1:
417 info->ctrl.len = ARM_BREAKPOINT_LEN_1;
418 break;
419 case HW_BREAKPOINT_LEN_2:
420 info->ctrl.len = ARM_BREAKPOINT_LEN_2;
421 break;
422 case HW_BREAKPOINT_LEN_4:
423 info->ctrl.len = ARM_BREAKPOINT_LEN_4;
424 break;
425 case HW_BREAKPOINT_LEN_8:
426 info->ctrl.len = ARM_BREAKPOINT_LEN_8;
427 break;
428 default:
429 return -EINVAL;
433 * On AArch64, we only permit breakpoints of length 4, whereas
434 * AArch32 also requires breakpoints of length 2 for Thumb.
435 * Watchpoints can be of length 1, 2, 4 or 8 bytes.
437 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
438 if (is_compat_bp(bp)) {
439 if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
440 info->ctrl.len != ARM_BREAKPOINT_LEN_4)
441 return -EINVAL;
442 } else if (info->ctrl.len != ARM_BREAKPOINT_LEN_4) {
444 * FIXME: Some tools (I'm looking at you perf) assume
445 * that breakpoints should be sizeof(long). This
446 * is nonsense. For now, we fix up the parameter
447 * but we should probably return -EINVAL instead.
449 info->ctrl.len = ARM_BREAKPOINT_LEN_4;
453 /* Address */
454 info->address = bp->attr.bp_addr;
457 * Privilege
458 * Note that we disallow combined EL0/EL1 breakpoints because
459 * that would complicate the stepping code.
461 if (arch_check_bp_in_kernelspace(bp))
462 info->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
463 else
464 info->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
466 /* Enabled? */
467 info->ctrl.enabled = !bp->attr.disabled;
469 return 0;
473 * Validate the arch-specific HW Breakpoint register settings.
475 int arch_validate_hwbkpt_settings(struct perf_event *bp)
477 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
478 int ret;
479 u64 alignment_mask, offset;
481 /* Build the arch_hw_breakpoint. */
482 ret = arch_build_bp_info(bp);
483 if (ret)
484 return ret;
487 * Check address alignment.
488 * We don't do any clever alignment correction for watchpoints
489 * because using 64-bit unaligned addresses is deprecated for
490 * AArch64.
492 * AArch32 tasks expect some simple alignment fixups, so emulate
493 * that here.
495 if (is_compat_bp(bp)) {
496 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
497 alignment_mask = 0x7;
498 else
499 alignment_mask = 0x3;
500 offset = info->address & alignment_mask;
501 switch (offset) {
502 case 0:
503 /* Aligned */
504 break;
505 case 1:
506 /* Allow single byte watchpoint. */
507 if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
508 break;
509 case 2:
510 /* Allow halfword watchpoints and breakpoints. */
511 if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
512 break;
513 default:
514 return -EINVAL;
517 info->address &= ~alignment_mask;
518 info->ctrl.len <<= offset;
519 } else {
520 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE)
521 alignment_mask = 0x3;
522 else
523 alignment_mask = 0x7;
524 if (info->address & alignment_mask)
525 return -EINVAL;
529 * Disallow per-task kernel breakpoints since these would
530 * complicate the stepping code.
532 if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
533 return -EINVAL;
535 return 0;
539 * Enable/disable all of the breakpoints active at the specified
540 * exception level at the register level.
541 * This is used when single-stepping after a breakpoint exception.
543 static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable)
545 int i, max_slots, privilege;
546 u32 ctrl;
547 struct perf_event **slots;
549 switch (reg) {
550 case AARCH64_DBG_REG_BCR:
551 slots = this_cpu_ptr(bp_on_reg);
552 max_slots = core_num_brps;
553 break;
554 case AARCH64_DBG_REG_WCR:
555 slots = this_cpu_ptr(wp_on_reg);
556 max_slots = core_num_wrps;
557 break;
558 default:
559 return;
562 for (i = 0; i < max_slots; ++i) {
563 if (!slots[i])
564 continue;
566 privilege = counter_arch_bp(slots[i])->ctrl.privilege;
567 if (debug_exception_level(privilege) != el)
568 continue;
570 ctrl = read_wb_reg(reg, i);
571 if (enable)
572 ctrl |= 0x1;
573 else
574 ctrl &= ~0x1;
575 write_wb_reg(reg, i, ctrl);
580 * Debug exception handlers.
582 static int breakpoint_handler(unsigned long unused, unsigned int esr,
583 struct pt_regs *regs)
585 int i, step = 0, *kernel_step;
586 u32 ctrl_reg;
587 u64 addr, val;
588 struct perf_event *bp, **slots;
589 struct debug_info *debug_info;
590 struct arch_hw_breakpoint_ctrl ctrl;
592 slots = this_cpu_ptr(bp_on_reg);
593 addr = instruction_pointer(regs);
594 debug_info = &current->thread.debug;
596 for (i = 0; i < core_num_brps; ++i) {
597 rcu_read_lock();
599 bp = slots[i];
601 if (bp == NULL)
602 goto unlock;
604 /* Check if the breakpoint value matches. */
605 val = read_wb_reg(AARCH64_DBG_REG_BVR, i);
606 if (val != (addr & ~0x3))
607 goto unlock;
609 /* Possible match, check the byte address select to confirm. */
610 ctrl_reg = read_wb_reg(AARCH64_DBG_REG_BCR, i);
611 decode_ctrl_reg(ctrl_reg, &ctrl);
612 if (!((1 << (addr & 0x3)) & ctrl.len))
613 goto unlock;
615 counter_arch_bp(bp)->trigger = addr;
616 perf_bp_event(bp, regs);
618 /* Do we need to handle the stepping? */
619 if (!bp->overflow_handler)
620 step = 1;
621 unlock:
622 rcu_read_unlock();
625 if (!step)
626 return 0;
628 if (user_mode(regs)) {
629 debug_info->bps_disabled = 1;
630 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 0);
632 /* If we're already stepping a watchpoint, just return. */
633 if (debug_info->wps_disabled)
634 return 0;
636 if (test_thread_flag(TIF_SINGLESTEP))
637 debug_info->suspended_step = 1;
638 else
639 user_enable_single_step(current);
640 } else {
641 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0);
642 kernel_step = this_cpu_ptr(&stepping_kernel_bp);
644 if (*kernel_step != ARM_KERNEL_STEP_NONE)
645 return 0;
647 if (kernel_active_single_step()) {
648 *kernel_step = ARM_KERNEL_STEP_SUSPEND;
649 } else {
650 *kernel_step = ARM_KERNEL_STEP_ACTIVE;
651 kernel_enable_single_step(regs);
655 return 0;
658 static int watchpoint_handler(unsigned long addr, unsigned int esr,
659 struct pt_regs *regs)
661 int i, step = 0, *kernel_step, access;
662 u32 ctrl_reg;
663 u64 val, alignment_mask;
664 struct perf_event *wp, **slots;
665 struct debug_info *debug_info;
666 struct arch_hw_breakpoint *info;
667 struct arch_hw_breakpoint_ctrl ctrl;
669 slots = this_cpu_ptr(wp_on_reg);
670 debug_info = &current->thread.debug;
672 for (i = 0; i < core_num_wrps; ++i) {
673 rcu_read_lock();
675 wp = slots[i];
677 if (wp == NULL)
678 goto unlock;
680 info = counter_arch_bp(wp);
681 /* AArch32 watchpoints are either 4 or 8 bytes aligned. */
682 if (is_compat_task()) {
683 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
684 alignment_mask = 0x7;
685 else
686 alignment_mask = 0x3;
687 } else {
688 alignment_mask = 0x7;
691 /* Check if the watchpoint value matches. */
692 val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
693 if (val != (addr & ~alignment_mask))
694 goto unlock;
696 /* Possible match, check the byte address select to confirm. */
697 ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
698 decode_ctrl_reg(ctrl_reg, &ctrl);
699 if (!((1 << (addr & alignment_mask)) & ctrl.len))
700 goto unlock;
703 * Check that the access type matches.
704 * 0 => load, otherwise => store
706 access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W :
707 HW_BREAKPOINT_R;
708 if (!(access & hw_breakpoint_type(wp)))
709 goto unlock;
711 info->trigger = addr;
712 perf_bp_event(wp, regs);
714 /* Do we need to handle the stepping? */
715 if (!wp->overflow_handler)
716 step = 1;
718 unlock:
719 rcu_read_unlock();
722 if (!step)
723 return 0;
726 * We always disable EL0 watchpoints because the kernel can
727 * cause these to fire via an unprivileged access.
729 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 0);
731 if (user_mode(regs)) {
732 debug_info->wps_disabled = 1;
734 /* If we're already stepping a breakpoint, just return. */
735 if (debug_info->bps_disabled)
736 return 0;
738 if (test_thread_flag(TIF_SINGLESTEP))
739 debug_info->suspended_step = 1;
740 else
741 user_enable_single_step(current);
742 } else {
743 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0);
744 kernel_step = this_cpu_ptr(&stepping_kernel_bp);
746 if (*kernel_step != ARM_KERNEL_STEP_NONE)
747 return 0;
749 if (kernel_active_single_step()) {
750 *kernel_step = ARM_KERNEL_STEP_SUSPEND;
751 } else {
752 *kernel_step = ARM_KERNEL_STEP_ACTIVE;
753 kernel_enable_single_step(regs);
757 return 0;
761 * Handle single-step exception.
763 int reinstall_suspended_bps(struct pt_regs *regs)
765 struct debug_info *debug_info = &current->thread.debug;
766 int handled_exception = 0, *kernel_step;
768 kernel_step = this_cpu_ptr(&stepping_kernel_bp);
771 * Called from single-step exception handler.
772 * Return 0 if execution can resume, 1 if a SIGTRAP should be
773 * reported.
775 if (user_mode(regs)) {
776 if (debug_info->bps_disabled) {
777 debug_info->bps_disabled = 0;
778 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 1);
779 handled_exception = 1;
782 if (debug_info->wps_disabled) {
783 debug_info->wps_disabled = 0;
784 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
785 handled_exception = 1;
788 if (handled_exception) {
789 if (debug_info->suspended_step) {
790 debug_info->suspended_step = 0;
791 /* Allow exception handling to fall-through. */
792 handled_exception = 0;
793 } else {
794 user_disable_single_step(current);
797 } else if (*kernel_step != ARM_KERNEL_STEP_NONE) {
798 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 1);
799 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 1);
801 if (!debug_info->wps_disabled)
802 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
804 if (*kernel_step != ARM_KERNEL_STEP_SUSPEND) {
805 kernel_disable_single_step();
806 handled_exception = 1;
807 } else {
808 handled_exception = 0;
811 *kernel_step = ARM_KERNEL_STEP_NONE;
814 return !handled_exception;
818 * Context-switcher for restoring suspended breakpoints.
820 void hw_breakpoint_thread_switch(struct task_struct *next)
823 * current next
824 * disabled: 0 0 => The usual case, NOTIFY_DONE
825 * 0 1 => Disable the registers
826 * 1 0 => Enable the registers
827 * 1 1 => NOTIFY_DONE. per-task bps will
828 * get taken care of by perf.
831 struct debug_info *current_debug_info, *next_debug_info;
833 current_debug_info = &current->thread.debug;
834 next_debug_info = &next->thread.debug;
836 /* Update breakpoints. */
837 if (current_debug_info->bps_disabled != next_debug_info->bps_disabled)
838 toggle_bp_registers(AARCH64_DBG_REG_BCR,
839 DBG_ACTIVE_EL0,
840 !next_debug_info->bps_disabled);
842 /* Update watchpoints. */
843 if (current_debug_info->wps_disabled != next_debug_info->wps_disabled)
844 toggle_bp_registers(AARCH64_DBG_REG_WCR,
845 DBG_ACTIVE_EL0,
846 !next_debug_info->wps_disabled);
850 * CPU initialisation.
852 static void hw_breakpoint_reset(void *unused)
854 int i;
855 struct perf_event **slots;
857 * When a CPU goes through cold-boot, it does not have any installed
858 * slot, so it is safe to share the same function for restoring and
859 * resetting breakpoints; when a CPU is hotplugged in, it goes
860 * through the slots, which are all empty, hence it just resets control
861 * and value for debug registers.
862 * When this function is triggered on warm-boot through a CPU PM
863 * notifier some slots might be initialized; if so they are
864 * reprogrammed according to the debug slots content.
866 for (slots = this_cpu_ptr(bp_on_reg), i = 0; i < core_num_brps; ++i) {
867 if (slots[i]) {
868 hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
869 } else {
870 write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
871 write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
875 for (slots = this_cpu_ptr(wp_on_reg), i = 0; i < core_num_wrps; ++i) {
876 if (slots[i]) {
877 hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
878 } else {
879 write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
880 write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
885 static int hw_breakpoint_reset_notify(struct notifier_block *self,
886 unsigned long action,
887 void *hcpu)
889 int cpu = (long)hcpu;
890 if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
891 smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1);
892 return NOTIFY_OK;
895 static struct notifier_block hw_breakpoint_reset_nb = {
896 .notifier_call = hw_breakpoint_reset_notify,
899 #ifdef CONFIG_CPU_PM
900 extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *));
901 #else
902 static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
905 #endif
908 * One-time initialisation.
910 static int __init arch_hw_breakpoint_init(void)
912 core_num_brps = get_num_brps();
913 core_num_wrps = get_num_wrps();
915 pr_info("found %d breakpoint and %d watchpoint registers.\n",
916 core_num_brps, core_num_wrps);
918 cpu_notifier_register_begin();
921 * Reset the breakpoint resources. We assume that a halting
922 * debugger will leave the world in a nice state for us.
924 smp_call_function(hw_breakpoint_reset, NULL, 1);
925 hw_breakpoint_reset(NULL);
927 /* Register debug fault handlers. */
928 hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP,
929 TRAP_HWBKPT, "hw-breakpoint handler");
930 hook_debug_fault_code(DBG_ESR_EVT_HWWP, watchpoint_handler, SIGTRAP,
931 TRAP_HWBKPT, "hw-watchpoint handler");
933 /* Register hotplug notifier. */
934 __register_cpu_notifier(&hw_breakpoint_reset_nb);
936 cpu_notifier_register_done();
938 /* Register cpu_suspend hw breakpoint restore hook */
939 cpu_suspend_set_dbg_restorer(hw_breakpoint_reset);
941 return 0;
943 arch_initcall(arch_hw_breakpoint_init);
945 void hw_breakpoint_pmu_read(struct perf_event *bp)
950 * Dummy function to register with die_notifier.
952 int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
953 unsigned long val, void *data)
955 return NOTIFY_DONE;