1 /* Copyright (C) 2009-2022 Free Software Foundation, Inc.
2 Contributed by ARM Ltd.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19 #include "gdbsupport/common-defs.h"
20 #include "gdbsupport/break-common.h"
21 #include "gdbsupport/common-regcache.h"
22 #include "nat/linux-nat.h"
23 #include "aarch64-linux-hw-point.h"
27 /* The order in which <sys/ptrace.h> and <asm/ptrace.h> are included
28 can be important. <sys/ptrace.h> often declares various PTRACE_*
29 enums. <asm/ptrace.h> often defines preprocessor constants for
30 these very same symbols. When that's the case, build errors will
31 result when <asm/ptrace.h> is included before <sys/ptrace.h>. */
32 #include <sys/ptrace.h>
33 #include <asm/ptrace.h>
37 /* Number of hardware breakpoints/watchpoints the target supports.
38 They are initialized with values obtained via the ptrace calls
39 with NT_ARM_HW_BREAK and NT_ARM_HW_WATCH respectively. */
41 int aarch64_num_bp_regs
;
42 int aarch64_num_wp_regs
;
44 /* True if this kernel does not have the bug described by PR
45 external/20207 (Linux >= 4.10). A fixed kernel supports any
46 contiguous range of bits in 8-bit byte DR_CONTROL_MASK. A buggy
47 kernel supports only 0x01, 0x03, 0x0f and 0xff. We start by
48 assuming the bug is fixed, and then detect the bug at
49 PTRACE_SETREGSET time. */
50 static bool kernel_supports_any_contiguous_range
= true;
52 /* Return starting byte 0..7 incl. of a watchpoint encoded by CTRL. */
55 aarch64_watchpoint_offset (unsigned int ctrl
)
57 uint8_t mask
= DR_CONTROL_MASK (ctrl
);
60 /* Shift out bottom zeros. */
61 for (retval
= 0; mask
&& (mask
& 1) == 0; ++retval
)
67 /* Utility function that returns the length in bytes of a watchpoint
68 according to the content of a hardware debug control register CTRL.
69 Any contiguous range of bytes in CTRL is supported. The returned
70 value can be between 0..8 (inclusive). */
73 aarch64_watchpoint_length (unsigned int ctrl
)
75 uint8_t mask
= DR_CONTROL_MASK (ctrl
);
78 /* Shift out bottom zeros. */
79 mask
>>= aarch64_watchpoint_offset (ctrl
);
81 /* Count bottom ones. */
82 for (retval
= 0; (mask
& 1) != 0; ++retval
)
86 error (_("Unexpected hardware watchpoint length register value 0x%x"),
87 DR_CONTROL_MASK (ctrl
));
92 /* Given the hardware breakpoint or watchpoint type TYPE and its
93 length LEN, return the expected encoding for a hardware
94 breakpoint/watchpoint control register. */
97 aarch64_point_encode_ctrl_reg (enum target_hw_bp_type type
, int offset
, int len
)
99 unsigned int ctrl
, ttype
;
101 gdb_assert (offset
== 0 || kernel_supports_any_contiguous_range
);
102 gdb_assert (offset
+ len
<= AARCH64_HWP_MAX_LEN_PER_REG
);
120 perror_with_name (_("Unrecognized breakpoint/watchpoint type"));
125 /* offset and length bitmask */
126 ctrl
|= ((1 << len
) - 1) << (5 + offset
);
128 ctrl
|= (2 << 1) | 1;
133 /* Addresses to be written to the hardware breakpoint and watchpoint
134 value registers need to be aligned; the alignment is 4-byte and
135 8-type respectively. Linux kernel rejects any non-aligned address
136 it receives from the related ptrace call. Furthermore, the kernel
137 currently only supports the following Byte Address Select (BAS)
138 values: 0x1, 0x3, 0xf and 0xff, which means that for a hardware
139 watchpoint to be accepted by the kernel (via ptrace call), its
140 valid length can only be 1 byte, 2 bytes, 4 bytes or 8 bytes.
141 Despite these limitations, the unaligned watchpoint is supported in
144 Return 0 for any non-compliant ADDR and/or LEN; return 1 otherwise. */
147 aarch64_point_is_aligned (int is_watchpoint
, CORE_ADDR addr
, int len
)
149 unsigned int alignment
= 0;
152 alignment
= AARCH64_HWP_ALIGNMENT
;
155 struct regcache
*regcache
156 = get_thread_regcache_for_ptid (current_lwp_ptid ());
158 /* Set alignment to 2 only if the current process is 32-bit,
159 since thumb instruction can be 2-byte aligned. Otherwise, set
160 alignment to AARCH64_HBP_ALIGNMENT. */
161 if (regcache_register_size (regcache
, 0) == 8)
162 alignment
= AARCH64_HBP_ALIGNMENT
;
167 if (addr
& (alignment
- 1))
170 if ((!kernel_supports_any_contiguous_range
171 && len
!= 8 && len
!= 4 && len
!= 2 && len
!= 1)
172 || (kernel_supports_any_contiguous_range
173 && (len
< 1 || len
> 8)))
179 /* Given the (potentially unaligned) watchpoint address in ADDR and
180 length in LEN, return the aligned address, offset from that base
181 address, and aligned length in *ALIGNED_ADDR_P, *ALIGNED_OFFSET_P
182 and *ALIGNED_LEN_P, respectively. The returned values will be
183 valid values to write to the hardware watchpoint value and control
186 The given watchpoint may get truncated if more than one hardware
187 register is needed to cover the watched region. *NEXT_ADDR_P
188 and *NEXT_LEN_P, if non-NULL, will return the address and length
189 of the remaining part of the watchpoint (which can be processed
190 by calling this routine again to generate another aligned address,
191 offset and length tuple.
193 Essentially, unaligned watchpoint is achieved by minimally
194 enlarging the watched area to meet the alignment requirement, and
195 if necessary, splitting the watchpoint over several hardware
196 watchpoint registers.
198 On kernels that predate the support for Byte Address Select (BAS)
199 in the hardware watchpoint control register, the offset from the
200 base address is always zero, and so in that case the trade-off is
201 that there will be false-positive hits for the read-type or the
202 access-type hardware watchpoints; for the write type, which is more
203 commonly used, there will be no such issues, as the higher-level
204 breakpoint management in gdb always examines the exact watched
205 region for any content change, and transparently resumes a thread
206 from a watchpoint trap if there is no change to the watched region.
208 Another limitation is that because the watched region is enlarged,
209 the watchpoint fault address discovered by
210 aarch64_stopped_data_address may be outside of the original watched
211 region, especially when the triggering instruction is accessing a
212 larger region. When the fault address is not within any known
213 range, watchpoints_triggered in gdb will get confused, as the
214 higher-level watchpoint management is only aware of original
215 watched regions, and will think that some unknown watchpoint has
216 been triggered. To prevent such a case,
217 aarch64_stopped_data_address implementations in gdb and gdbserver
218 try to match the trapped address with a watched region, and return
219 an address within the latter. */
222 aarch64_align_watchpoint (CORE_ADDR addr
, int len
, CORE_ADDR
*aligned_addr_p
,
223 int *aligned_offset_p
, int *aligned_len_p
,
224 CORE_ADDR
*next_addr_p
, int *next_len_p
,
225 CORE_ADDR
*next_addr_orig_p
)
228 unsigned int offset
, aligned_offset
;
229 CORE_ADDR aligned_addr
;
230 const unsigned int alignment
= AARCH64_HWP_ALIGNMENT
;
231 const unsigned int max_wp_len
= AARCH64_HWP_MAX_LEN_PER_REG
;
233 /* As assumed by the algorithm. */
234 gdb_assert (alignment
== max_wp_len
);
239 /* The address put into the hardware watchpoint value register must
241 offset
= addr
& (alignment
- 1);
242 aligned_addr
= addr
- offset
;
244 = kernel_supports_any_contiguous_range
? addr
& (alignment
- 1) : 0;
246 gdb_assert (offset
>= 0 && offset
< alignment
);
247 gdb_assert (aligned_addr
>= 0 && aligned_addr
<= addr
);
248 gdb_assert (offset
+ len
> 0);
250 if (offset
+ len
>= max_wp_len
)
252 /* Need more than one watchpoint register; truncate at the
253 alignment boundary. */
255 = max_wp_len
- (kernel_supports_any_contiguous_range
? offset
: 0);
256 len
-= (max_wp_len
- offset
);
257 addr
+= (max_wp_len
- offset
);
258 gdb_assert ((addr
& (alignment
- 1)) == 0);
262 /* Find the smallest valid length that is large enough to
263 accommodate this watchpoint. */
264 static const unsigned char
265 aligned_len_array
[AARCH64_HWP_MAX_LEN_PER_REG
] =
266 { 1, 2, 4, 4, 8, 8, 8, 8 };
268 aligned_len
= (kernel_supports_any_contiguous_range
269 ? len
: aligned_len_array
[offset
+ len
- 1]);
275 *aligned_addr_p
= aligned_addr
;
276 if (aligned_offset_p
)
277 *aligned_offset_p
= aligned_offset
;
279 *aligned_len_p
= aligned_len
;
284 if (next_addr_orig_p
)
285 *next_addr_orig_p
= align_down (*next_addr_orig_p
+ alignment
, alignment
);
288 /* Helper for aarch64_notify_debug_reg_change. Records the
289 information about the change of one hardware breakpoint/watchpoint
290 setting for the thread LWP.
291 N.B. The actual updating of hardware debug registers is not
292 carried out until the moment the thread is resumed. */
295 debug_reg_change_callback (struct lwp_info
*lwp
, int is_watchpoint
,
298 int tid
= ptid_of_lwp (lwp
).lwp ();
299 struct arch_lwp_info
*info
= lwp_arch_private_info (lwp
);
300 dr_changed_t
*dr_changed_ptr
;
301 dr_changed_t dr_changed
;
305 info
= XCNEW (struct arch_lwp_info
);
306 lwp_set_arch_private_info (lwp
, info
);
311 debug_printf ("debug_reg_change_callback: \n\tOn entry:\n");
312 debug_printf ("\ttid%d, dr_changed_bp=0x%s, "
313 "dr_changed_wp=0x%s\n", tid
,
314 phex (info
->dr_changed_bp
, 8),
315 phex (info
->dr_changed_wp
, 8));
318 dr_changed_ptr
= is_watchpoint
? &info
->dr_changed_wp
319 : &info
->dr_changed_bp
;
320 dr_changed
= *dr_changed_ptr
;
323 && (idx
<= (is_watchpoint
? aarch64_num_wp_regs
324 : aarch64_num_bp_regs
)));
326 /* The actual update is done later just before resuming the lwp,
327 we just mark that one register pair needs updating. */
328 DR_MARK_N_CHANGED (dr_changed
, idx
);
329 *dr_changed_ptr
= dr_changed
;
331 /* If the lwp isn't stopped, force it to momentarily pause, so
332 we can update its debug registers. */
333 if (!lwp_is_stopped (lwp
))
334 linux_stop_lwp (lwp
);
338 debug_printf ("\tOn exit:\n\ttid%d, dr_changed_bp=0x%s, "
339 "dr_changed_wp=0x%s\n", tid
,
340 phex (info
->dr_changed_bp
, 8),
341 phex (info
->dr_changed_wp
, 8));
347 /* Notify each thread that their IDXth breakpoint/watchpoint register
348 pair needs to be updated. The message will be recorded in each
349 thread's arch-specific data area, the actual updating will be done
350 when the thread is resumed. */
353 aarch64_notify_debug_reg_change (const struct aarch64_debug_reg_state
*state
,
354 int is_watchpoint
, unsigned int idx
)
356 ptid_t pid_ptid
= ptid_t (current_lwp_ptid ().pid ());
358 iterate_over_lwps (pid_ptid
, [=] (struct lwp_info
*info
)
360 return debug_reg_change_callback (info
,
366 /* Reconfigure STATE to be compatible with Linux kernels with the PR
367 external/20207 bug. This is called when
368 KERNEL_SUPPORTS_ANY_CONTIGUOUS_RANGE transitions to false. Note we
369 don't try to support combining watchpoints with matching (and thus
370 shared) masks, as it's too late when we get here. On buggy
371 kernels, GDB will try to first setup the perfect matching ranges,
372 which will run out of registers before this function can merge
373 them. It doesn't look like worth the effort to improve that, given
374 eventually buggy kernels will be phased out. */
377 aarch64_downgrade_regs (struct aarch64_debug_reg_state
*state
)
379 for (int i
= 0; i
< aarch64_num_wp_regs
; ++i
)
380 if ((state
->dr_ctrl_wp
[i
] & 1) != 0)
382 gdb_assert (state
->dr_ref_count_wp
[i
] != 0);
383 uint8_t mask_orig
= (state
->dr_ctrl_wp
[i
] >> 5) & 0xff;
384 gdb_assert (mask_orig
!= 0);
385 static const uint8_t old_valid
[] = { 0x01, 0x03, 0x0f, 0xff };
387 for (const uint8_t old_mask
: old_valid
)
388 if (mask_orig
<= old_mask
)
393 gdb_assert (mask
!= 0);
395 /* No update needed for this watchpoint? */
396 if (mask
== mask_orig
)
398 state
->dr_ctrl_wp
[i
] |= mask
<< 5;
400 = align_down (state
->dr_addr_wp
[i
], AARCH64_HWP_ALIGNMENT
);
402 /* Try to match duplicate entries. */
403 for (int j
= 0; j
< i
; ++j
)
404 if ((state
->dr_ctrl_wp
[j
] & 1) != 0
405 && state
->dr_addr_wp
[j
] == state
->dr_addr_wp
[i
]
406 && state
->dr_addr_orig_wp
[j
] == state
->dr_addr_orig_wp
[i
]
407 && state
->dr_ctrl_wp
[j
] == state
->dr_ctrl_wp
[i
])
409 state
->dr_ref_count_wp
[j
] += state
->dr_ref_count_wp
[i
];
410 state
->dr_ref_count_wp
[i
] = 0;
411 state
->dr_addr_wp
[i
] = 0;
412 state
->dr_addr_orig_wp
[i
] = 0;
413 state
->dr_ctrl_wp
[i
] &= ~1;
417 aarch64_notify_debug_reg_change (state
, 1 /* is_watchpoint */, i
);
421 /* Record the insertion of one breakpoint/watchpoint, as represented
422 by ADDR and CTRL, in the process' arch-specific data area *STATE. */
425 aarch64_dr_state_insert_one_point (struct aarch64_debug_reg_state
*state
,
426 enum target_hw_bp_type type
,
427 CORE_ADDR addr
, int offset
, int len
,
430 int i
, idx
, num_regs
, is_watchpoint
;
431 unsigned int ctrl
, *dr_ctrl_p
, *dr_ref_count
;
432 CORE_ADDR
*dr_addr_p
, *dr_addr_orig_p
;
434 /* Set up state pointers. */
435 is_watchpoint
= (type
!= hw_execute
);
436 gdb_assert (aarch64_point_is_aligned (is_watchpoint
, addr
, len
));
439 num_regs
= aarch64_num_wp_regs
;
440 dr_addr_p
= state
->dr_addr_wp
;
441 dr_addr_orig_p
= state
->dr_addr_orig_wp
;
442 dr_ctrl_p
= state
->dr_ctrl_wp
;
443 dr_ref_count
= state
->dr_ref_count_wp
;
447 num_regs
= aarch64_num_bp_regs
;
448 dr_addr_p
= state
->dr_addr_bp
;
449 dr_addr_orig_p
= nullptr;
450 dr_ctrl_p
= state
->dr_ctrl_bp
;
451 dr_ref_count
= state
->dr_ref_count_bp
;
454 ctrl
= aarch64_point_encode_ctrl_reg (type
, offset
, len
);
456 /* Find an existing or free register in our cache. */
458 for (i
= 0; i
< num_regs
; ++i
)
460 if ((dr_ctrl_p
[i
] & 1) == 0)
462 gdb_assert (dr_ref_count
[i
] == 0);
464 /* no break; continue hunting for an exising one. */
466 else if (dr_addr_p
[i
] == addr
467 && (dr_addr_orig_p
== nullptr || dr_addr_orig_p
[i
] == addr_orig
)
468 && dr_ctrl_p
[i
] == ctrl
)
470 gdb_assert (dr_ref_count
[i
] != 0);
480 /* Update our cache. */
481 if ((dr_ctrl_p
[idx
] & 1) == 0)
484 dr_addr_p
[idx
] = addr
;
485 if (dr_addr_orig_p
!= nullptr)
486 dr_addr_orig_p
[idx
] = addr_orig
;
487 dr_ctrl_p
[idx
] = ctrl
;
488 dr_ref_count
[idx
] = 1;
489 /* Notify the change. */
490 aarch64_notify_debug_reg_change (state
, is_watchpoint
, idx
);
501 /* Record the removal of one breakpoint/watchpoint, as represented by
502 ADDR and CTRL, in the process' arch-specific data area *STATE. */
505 aarch64_dr_state_remove_one_point (struct aarch64_debug_reg_state
*state
,
506 enum target_hw_bp_type type
,
507 CORE_ADDR addr
, int offset
, int len
,
510 int i
, num_regs
, is_watchpoint
;
511 unsigned int ctrl
, *dr_ctrl_p
, *dr_ref_count
;
512 CORE_ADDR
*dr_addr_p
, *dr_addr_orig_p
;
514 /* Set up state pointers. */
515 is_watchpoint
= (type
!= hw_execute
);
518 num_regs
= aarch64_num_wp_regs
;
519 dr_addr_p
= state
->dr_addr_wp
;
520 dr_addr_orig_p
= state
->dr_addr_orig_wp
;
521 dr_ctrl_p
= state
->dr_ctrl_wp
;
522 dr_ref_count
= state
->dr_ref_count_wp
;
526 num_regs
= aarch64_num_bp_regs
;
527 dr_addr_p
= state
->dr_addr_bp
;
528 dr_addr_orig_p
= nullptr;
529 dr_ctrl_p
= state
->dr_ctrl_bp
;
530 dr_ref_count
= state
->dr_ref_count_bp
;
533 ctrl
= aarch64_point_encode_ctrl_reg (type
, offset
, len
);
535 /* Find the entry that matches the ADDR and CTRL. */
536 for (i
= 0; i
< num_regs
; ++i
)
537 if (dr_addr_p
[i
] == addr
538 && (dr_addr_orig_p
== nullptr || dr_addr_orig_p
[i
] == addr_orig
)
539 && dr_ctrl_p
[i
] == ctrl
)
541 gdb_assert (dr_ref_count
[i
] != 0);
549 /* Clear our cache. */
550 if (--dr_ref_count
[i
] == 0)
552 /* Clear the enable bit. */
555 if (dr_addr_orig_p
!= nullptr)
556 dr_addr_orig_p
[i
] = 0;
558 /* Notify the change. */
559 aarch64_notify_debug_reg_change (state
, is_watchpoint
, i
);
566 aarch64_handle_breakpoint (enum target_hw_bp_type type
, CORE_ADDR addr
,
567 int len
, int is_insert
,
568 struct aarch64_debug_reg_state
*state
)
572 /* The hardware breakpoint on AArch64 should always be 4-byte
573 aligned, but on AArch32, it can be 2-byte aligned. Note that
574 we only check the alignment on inserting breakpoint because
575 aarch64_point_is_aligned needs the inferior_ptid inferior's
576 regcache to decide whether the inferior is 32-bit or 64-bit.
577 However when GDB follows the parent process and detach breakpoints
578 from child process, inferior_ptid is the child ptid, but the
579 child inferior doesn't exist in GDB's view yet. */
580 if (!aarch64_point_is_aligned (0 /* is_watchpoint */ , addr
, len
))
583 return aarch64_dr_state_insert_one_point (state
, type
, addr
, 0, len
, -1);
586 return aarch64_dr_state_remove_one_point (state
, type
, addr
, 0, len
, -1);
589 /* This is essentially the same as aarch64_handle_breakpoint, apart
590 from that it is an aligned watchpoint to be handled. */
593 aarch64_handle_aligned_watchpoint (enum target_hw_bp_type type
,
594 CORE_ADDR addr
, int len
, int is_insert
,
595 struct aarch64_debug_reg_state
*state
)
598 return aarch64_dr_state_insert_one_point (state
, type
, addr
, 0, len
, addr
);
600 return aarch64_dr_state_remove_one_point (state
, type
, addr
, 0, len
, addr
);
603 /* Insert/remove unaligned watchpoint by calling
604 aarch64_align_watchpoint repeatedly until the whole watched region,
605 as represented by ADDR and LEN, has been properly aligned and ready
606 to be written to one or more hardware watchpoint registers.
607 IS_INSERT indicates whether this is an insertion or a deletion.
608 Return 0 if succeed. */
611 aarch64_handle_unaligned_watchpoint (enum target_hw_bp_type type
,
612 CORE_ADDR addr
, int len
, int is_insert
,
613 struct aarch64_debug_reg_state
*state
)
615 CORE_ADDR addr_orig
= addr
;
619 CORE_ADDR aligned_addr
;
620 int aligned_offset
, aligned_len
, ret
;
621 CORE_ADDR addr_orig_next
= addr_orig
;
623 aarch64_align_watchpoint (addr
, len
, &aligned_addr
, &aligned_offset
,
624 &aligned_len
, &addr
, &len
, &addr_orig_next
);
627 ret
= aarch64_dr_state_insert_one_point (state
, type
, aligned_addr
,
629 aligned_len
, addr_orig
);
631 ret
= aarch64_dr_state_remove_one_point (state
, type
, aligned_addr
,
633 aligned_len
, addr_orig
);
636 debug_printf ("handle_unaligned_watchpoint: is_insert: %d\n"
638 "aligned_addr: %s, aligned_len: %d\n"
642 "next_addr: %s, next_len: %d\n"
644 "addr_orig_next: %s\n",
645 is_insert
, core_addr_to_string_nz (aligned_addr
),
646 aligned_len
, core_addr_to_string_nz (addr_orig
),
647 core_addr_to_string_nz (addr
), len
,
648 core_addr_to_string_nz (addr_orig_next
));
650 addr_orig
= addr_orig_next
;
660 aarch64_handle_watchpoint (enum target_hw_bp_type type
, CORE_ADDR addr
,
661 int len
, int is_insert
,
662 struct aarch64_debug_reg_state
*state
)
664 if (aarch64_point_is_aligned (1 /* is_watchpoint */ , addr
, len
))
665 return aarch64_handle_aligned_watchpoint (type
, addr
, len
, is_insert
,
668 return aarch64_handle_unaligned_watchpoint (type
, addr
, len
, is_insert
,
672 /* Call ptrace to set the thread TID's hardware breakpoint/watchpoint
673 registers with data from *STATE. */
676 aarch64_linux_set_debug_regs (struct aarch64_debug_reg_state
*state
,
677 int tid
, int watchpoint
)
681 struct user_hwdebug_state regs
;
682 const CORE_ADDR
*addr
;
683 const unsigned int *ctrl
;
685 memset (®s
, 0, sizeof (regs
));
686 iov
.iov_base
= ®s
;
687 count
= watchpoint
? aarch64_num_wp_regs
: aarch64_num_bp_regs
;
688 addr
= watchpoint
? state
->dr_addr_wp
: state
->dr_addr_bp
;
689 ctrl
= watchpoint
? state
->dr_ctrl_wp
: state
->dr_ctrl_bp
;
692 iov
.iov_len
= (offsetof (struct user_hwdebug_state
, dbg_regs
)
693 + count
* sizeof (regs
.dbg_regs
[0]));
695 for (i
= 0; i
< count
; i
++)
697 regs
.dbg_regs
[i
].addr
= addr
[i
];
698 regs
.dbg_regs
[i
].ctrl
= ctrl
[i
];
701 if (ptrace (PTRACE_SETREGSET
, tid
,
702 watchpoint
? NT_ARM_HW_WATCH
: NT_ARM_HW_BREAK
,
705 /* Handle Linux kernels with the PR external/20207 bug. */
706 if (watchpoint
&& errno
== EINVAL
707 && kernel_supports_any_contiguous_range
)
709 kernel_supports_any_contiguous_range
= false;
710 aarch64_downgrade_regs (state
);
711 aarch64_linux_set_debug_regs (state
, tid
, watchpoint
);
714 error (_("Unexpected error setting hardware debug registers"));
718 /* See nat/aarch64-linux-hw-point.h. */
721 aarch64_linux_any_set_debug_regs_state (aarch64_debug_reg_state
*state
,
724 int count
= watchpoint
? aarch64_num_wp_regs
: aarch64_num_bp_regs
;
728 const CORE_ADDR
*addr
= watchpoint
? state
->dr_addr_wp
: state
->dr_addr_bp
;
729 const unsigned int *ctrl
= watchpoint
? state
->dr_ctrl_wp
: state
->dr_ctrl_bp
;
731 for (int i
= 0; i
< count
; i
++)
732 if (addr
[i
] != 0 || ctrl
[i
] != 0)
738 /* Print the values of the cached breakpoint/watchpoint registers. */
741 aarch64_show_debug_reg_state (struct aarch64_debug_reg_state
*state
,
742 const char *func
, CORE_ADDR addr
,
743 int len
, enum target_hw_bp_type type
)
747 debug_printf ("%s", func
);
749 debug_printf (" (addr=0x%08lx, len=%d, type=%s)",
750 (unsigned long) addr
, len
,
751 type
== hw_write
? "hw-write-watchpoint"
752 : (type
== hw_read
? "hw-read-watchpoint"
753 : (type
== hw_access
? "hw-access-watchpoint"
754 : (type
== hw_execute
? "hw-breakpoint"
756 debug_printf (":\n");
758 debug_printf ("\tBREAKPOINTs:\n");
759 for (i
= 0; i
< aarch64_num_bp_regs
; i
++)
760 debug_printf ("\tBP%d: addr=%s, ctrl=0x%08x, ref.count=%d\n",
761 i
, core_addr_to_string_nz (state
->dr_addr_bp
[i
]),
762 state
->dr_ctrl_bp
[i
], state
->dr_ref_count_bp
[i
]);
764 debug_printf ("\tWATCHPOINTs:\n");
765 for (i
= 0; i
< aarch64_num_wp_regs
; i
++)
766 debug_printf ("\tWP%d: addr=%s (orig=%s), ctrl=0x%08x, ref.count=%d\n",
767 i
, core_addr_to_string_nz (state
->dr_addr_wp
[i
]),
768 core_addr_to_string_nz (state
->dr_addr_orig_wp
[i
]),
769 state
->dr_ctrl_wp
[i
], state
->dr_ref_count_wp
[i
]);
772 /* Return true if debug arch level is compatible for hw watchpoints
776 compatible_debug_arch (unsigned int debug_arch
)
778 if (debug_arch
== AARCH64_DEBUG_ARCH_V8
)
780 if (debug_arch
== AARCH64_DEBUG_ARCH_V8_1
)
782 if (debug_arch
== AARCH64_DEBUG_ARCH_V8_2
)
784 if (debug_arch
== AARCH64_DEBUG_ARCH_V8_4
)
790 /* Get the hardware debug register capacity information from the
791 process represented by TID. */
794 aarch64_linux_get_debug_reg_capacity (int tid
)
797 struct user_hwdebug_state dreg_state
;
799 iov
.iov_base
= &dreg_state
;
800 iov
.iov_len
= sizeof (dreg_state
);
802 /* Get hardware watchpoint register info. */
803 if (ptrace (PTRACE_GETREGSET
, tid
, NT_ARM_HW_WATCH
, &iov
) == 0
804 && compatible_debug_arch (AARCH64_DEBUG_ARCH (dreg_state
.dbg_info
)))
806 aarch64_num_wp_regs
= AARCH64_DEBUG_NUM_SLOTS (dreg_state
.dbg_info
);
807 if (aarch64_num_wp_regs
> AARCH64_HWP_MAX_NUM
)
809 warning (_("Unexpected number of hardware watchpoint registers"
810 " reported by ptrace, got %d, expected %d."),
811 aarch64_num_wp_regs
, AARCH64_HWP_MAX_NUM
);
812 aarch64_num_wp_regs
= AARCH64_HWP_MAX_NUM
;
817 warning (_("Unable to determine the number of hardware watchpoints"
819 aarch64_num_wp_regs
= 0;
822 /* Get hardware breakpoint register info. */
823 if (ptrace (PTRACE_GETREGSET
, tid
, NT_ARM_HW_BREAK
, &iov
) == 0
824 && compatible_debug_arch (AARCH64_DEBUG_ARCH (dreg_state
.dbg_info
)))
826 aarch64_num_bp_regs
= AARCH64_DEBUG_NUM_SLOTS (dreg_state
.dbg_info
);
827 if (aarch64_num_bp_regs
> AARCH64_HBP_MAX_NUM
)
829 warning (_("Unexpected number of hardware breakpoint registers"
830 " reported by ptrace, got %d, expected %d."),
831 aarch64_num_bp_regs
, AARCH64_HBP_MAX_NUM
);
832 aarch64_num_bp_regs
= AARCH64_HBP_MAX_NUM
;
837 warning (_("Unable to determine the number of hardware breakpoints"
839 aarch64_num_bp_regs
= 0;
843 /* Return true if we can watch a memory region that starts address
844 ADDR and whose length is LEN in bytes. */
847 aarch64_linux_region_ok_for_watchpoint (CORE_ADDR addr
, int len
)
849 CORE_ADDR aligned_addr
;
851 /* Can not set watchpoints for zero or negative lengths. */
855 /* Must have hardware watchpoint debug register(s). */
856 if (aarch64_num_wp_regs
== 0)
859 /* We support unaligned watchpoint address and arbitrary length,
860 as long as the size of the whole watched area after alignment
861 doesn't exceed size of the total area that all watchpoint debug
862 registers can watch cooperatively.
864 This is a very relaxed rule, but unfortunately there are
865 limitations, e.g. false-positive hits, due to limited support of
866 hardware debug registers in the kernel. See comment above
867 aarch64_align_watchpoint for more information. */
869 aligned_addr
= addr
& ~(AARCH64_HWP_MAX_LEN_PER_REG
- 1);
870 if (aligned_addr
+ aarch64_num_wp_regs
* AARCH64_HWP_MAX_LEN_PER_REG
874 /* All tests passed so we are likely to be able to set the watchpoint.
875 The reason that it is 'likely' rather than 'must' is because
876 we don't check the current usage of the watchpoint registers, and
877 there may not be enough registers available for this watchpoint.
878 Ideally we should check the cached debug register state, however
879 the checking is costly. */