1 /* Copyright (C) 2009-2019 Free Software Foundation, Inc.
2 Contributed by ARM Ltd.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19 #include "common/common-defs.h"
20 #include "common/break-common.h"
21 #include "common/common-regcache.h"
22 #include "nat/linux-nat.h"
23 #include "aarch64-linux-hw-point.h"
26 #include <asm/ptrace.h>
27 #include <sys/ptrace.h>
30 /* Number of hardware breakpoints/watchpoints the target supports.
31 They are initialized with values obtained via the ptrace calls
32 with NT_ARM_HW_BREAK and NT_ARM_HW_WATCH respectively. */
34 int aarch64_num_bp_regs
;
35 int aarch64_num_wp_regs
;
37 /* True if this kernel does not have the bug described by PR
38 external/20207 (Linux >= 4.10). A fixed kernel supports any
39 contiguous range of bits in 8-bit byte DR_CONTROL_MASK. A buggy
40 kernel supports only 0x01, 0x03, 0x0f and 0xff. We start by
41 assuming the bug is fixed, and then detect the bug at
42 PTRACE_SETREGSET time. */
43 static bool kernel_supports_any_contiguous_range
= true;
45 /* Return starting byte 0..7 incl. of a watchpoint encoded by CTRL. */
48 aarch64_watchpoint_offset (unsigned int ctrl
)
50 uint8_t mask
= DR_CONTROL_MASK (ctrl
);
53 /* Shift out bottom zeros. */
54 for (retval
= 0; mask
&& (mask
& 1) == 0; ++retval
)
60 /* Utility function that returns the length in bytes of a watchpoint
61 according to the content of a hardware debug control register CTRL.
62 Any contiguous range of bytes in CTRL is supported. The returned
63 value can be between 0..8 (inclusive). */
66 aarch64_watchpoint_length (unsigned int ctrl
)
68 uint8_t mask
= DR_CONTROL_MASK (ctrl
);
71 /* Shift out bottom zeros. */
72 mask
>>= aarch64_watchpoint_offset (ctrl
);
74 /* Count bottom ones. */
75 for (retval
= 0; (mask
& 1) != 0; ++retval
)
79 error (_("Unexpected hardware watchpoint length register value 0x%x"),
80 DR_CONTROL_MASK (ctrl
));
85 /* Given the hardware breakpoint or watchpoint type TYPE and its
86 length LEN, return the expected encoding for a hardware
87 breakpoint/watchpoint control register. */
90 aarch64_point_encode_ctrl_reg (enum target_hw_bp_type type
, int offset
, int len
)
92 unsigned int ctrl
, ttype
;
94 gdb_assert (offset
== 0 || kernel_supports_any_contiguous_range
);
95 gdb_assert (offset
+ len
<= AARCH64_HWP_MAX_LEN_PER_REG
);
113 perror_with_name (_("Unrecognized breakpoint/watchpoint type"));
118 /* offset and length bitmask */
119 ctrl
|= ((1 << len
) - 1) << (5 + offset
);
121 ctrl
|= (2 << 1) | 1;
126 /* Addresses to be written to the hardware breakpoint and watchpoint
127 value registers need to be aligned; the alignment is 4-byte and
128 8-type respectively. Linux kernel rejects any non-aligned address
129 it receives from the related ptrace call. Furthermore, the kernel
130 currently only supports the following Byte Address Select (BAS)
131 values: 0x1, 0x3, 0xf and 0xff, which means that for a hardware
132 watchpoint to be accepted by the kernel (via ptrace call), its
133 valid length can only be 1 byte, 2 bytes, 4 bytes or 8 bytes.
134 Despite these limitations, the unaligned watchpoint is supported in
137 Return 0 for any non-compliant ADDR and/or LEN; return 1 otherwise. */
140 aarch64_point_is_aligned (int is_watchpoint
, CORE_ADDR addr
, int len
)
142 unsigned int alignment
= 0;
145 alignment
= AARCH64_HWP_ALIGNMENT
;
148 struct regcache
*regcache
149 = get_thread_regcache_for_ptid (current_lwp_ptid ());
151 /* Set alignment to 2 only if the current process is 32-bit,
152 since thumb instruction can be 2-byte aligned. Otherwise, set
153 alignment to AARCH64_HBP_ALIGNMENT. */
154 if (regcache_register_size (regcache
, 0) == 8)
155 alignment
= AARCH64_HBP_ALIGNMENT
;
160 if (addr
& (alignment
- 1))
163 if ((!kernel_supports_any_contiguous_range
164 && len
!= 8 && len
!= 4 && len
!= 2 && len
!= 1)
165 || (kernel_supports_any_contiguous_range
166 && (len
< 1 || len
> 8)))
172 /* Given the (potentially unaligned) watchpoint address in ADDR and
173 length in LEN, return the aligned address, offset from that base
174 address, and aligned length in *ALIGNED_ADDR_P, *ALIGNED_OFFSET_P
175 and *ALIGNED_LEN_P, respectively. The returned values will be
176 valid values to write to the hardware watchpoint value and control
179 The given watchpoint may get truncated if more than one hardware
180 register is needed to cover the watched region. *NEXT_ADDR_P
181 and *NEXT_LEN_P, if non-NULL, will return the address and length
182 of the remaining part of the watchpoint (which can be processed
183 by calling this routine again to generate another aligned address,
184 offset and length tuple.
186 Essentially, unaligned watchpoint is achieved by minimally
187 enlarging the watched area to meet the alignment requirement, and
188 if necessary, splitting the watchpoint over several hardware
189 watchpoint registers.
191 On kernels that predate the support for Byte Address Select (BAS)
192 in the hardware watchpoint control register, the offset from the
193 base address is always zero, and so in that case the trade-off is
194 that there will be false-positive hits for the read-type or the
195 access-type hardware watchpoints; for the write type, which is more
196 commonly used, there will be no such issues, as the higher-level
197 breakpoint management in gdb always examines the exact watched
198 region for any content change, and transparently resumes a thread
199 from a watchpoint trap if there is no change to the watched region.
201 Another limitation is that because the watched region is enlarged,
202 the watchpoint fault address discovered by
203 aarch64_stopped_data_address may be outside of the original watched
204 region, especially when the triggering instruction is accessing a
205 larger region. When the fault address is not within any known
206 range, watchpoints_triggered in gdb will get confused, as the
207 higher-level watchpoint management is only aware of original
208 watched regions, and will think that some unknown watchpoint has
209 been triggered. To prevent such a case,
210 aarch64_stopped_data_address implementations in gdb and gdbserver
211 try to match the trapped address with a watched region, and return
212 an address within the latter. */
215 aarch64_align_watchpoint (CORE_ADDR addr
, int len
, CORE_ADDR
*aligned_addr_p
,
216 int *aligned_offset_p
, int *aligned_len_p
,
217 CORE_ADDR
*next_addr_p
, int *next_len_p
,
218 CORE_ADDR
*next_addr_orig_p
)
221 unsigned int offset
, aligned_offset
;
222 CORE_ADDR aligned_addr
;
223 const unsigned int alignment
= AARCH64_HWP_ALIGNMENT
;
224 const unsigned int max_wp_len
= AARCH64_HWP_MAX_LEN_PER_REG
;
226 /* As assumed by the algorithm. */
227 gdb_assert (alignment
== max_wp_len
);
232 /* The address put into the hardware watchpoint value register must
234 offset
= addr
& (alignment
- 1);
235 aligned_addr
= addr
- offset
;
237 = kernel_supports_any_contiguous_range
? addr
& (alignment
- 1) : 0;
239 gdb_assert (offset
>= 0 && offset
< alignment
);
240 gdb_assert (aligned_addr
>= 0 && aligned_addr
<= addr
);
241 gdb_assert (offset
+ len
> 0);
243 if (offset
+ len
>= max_wp_len
)
245 /* Need more than one watchpoint register; truncate at the
246 alignment boundary. */
248 = max_wp_len
- (kernel_supports_any_contiguous_range
? offset
: 0);
249 len
-= (max_wp_len
- offset
);
250 addr
+= (max_wp_len
- offset
);
251 gdb_assert ((addr
& (alignment
- 1)) == 0);
255 /* Find the smallest valid length that is large enough to
256 accommodate this watchpoint. */
257 static const unsigned char
258 aligned_len_array
[AARCH64_HWP_MAX_LEN_PER_REG
] =
259 { 1, 2, 4, 4, 8, 8, 8, 8 };
261 aligned_len
= (kernel_supports_any_contiguous_range
262 ? len
: aligned_len_array
[offset
+ len
- 1]);
268 *aligned_addr_p
= aligned_addr
;
269 if (aligned_offset_p
)
270 *aligned_offset_p
= aligned_offset
;
272 *aligned_len_p
= aligned_len
;
277 if (next_addr_orig_p
)
278 *next_addr_orig_p
= align_down (*next_addr_orig_p
+ alignment
, alignment
);
281 /* Helper for aarch64_notify_debug_reg_change. Records the
282 information about the change of one hardware breakpoint/watchpoint
283 setting for the thread LWP.
284 N.B. The actual updating of hardware debug registers is not
285 carried out until the moment the thread is resumed. */
288 debug_reg_change_callback (struct lwp_info
*lwp
, int is_watchpoint
,
291 int tid
= ptid_of_lwp (lwp
).lwp ();
292 struct arch_lwp_info
*info
= lwp_arch_private_info (lwp
);
293 dr_changed_t
*dr_changed_ptr
;
294 dr_changed_t dr_changed
;
298 info
= XCNEW (struct arch_lwp_info
);
299 lwp_set_arch_private_info (lwp
, info
);
304 debug_printf ("debug_reg_change_callback: \n\tOn entry:\n");
305 debug_printf ("\ttid%d, dr_changed_bp=0x%s, "
306 "dr_changed_wp=0x%s\n", tid
,
307 phex (info
->dr_changed_bp
, 8),
308 phex (info
->dr_changed_wp
, 8));
311 dr_changed_ptr
= is_watchpoint
? &info
->dr_changed_wp
312 : &info
->dr_changed_bp
;
313 dr_changed
= *dr_changed_ptr
;
316 && (idx
<= (is_watchpoint
? aarch64_num_wp_regs
317 : aarch64_num_bp_regs
)));
319 /* The actual update is done later just before resuming the lwp,
320 we just mark that one register pair needs updating. */
321 DR_MARK_N_CHANGED (dr_changed
, idx
);
322 *dr_changed_ptr
= dr_changed
;
324 /* If the lwp isn't stopped, force it to momentarily pause, so
325 we can update its debug registers. */
326 if (!lwp_is_stopped (lwp
))
327 linux_stop_lwp (lwp
);
331 debug_printf ("\tOn exit:\n\ttid%d, dr_changed_bp=0x%s, "
332 "dr_changed_wp=0x%s\n", tid
,
333 phex (info
->dr_changed_bp
, 8),
334 phex (info
->dr_changed_wp
, 8));
340 /* Notify each thread that their IDXth breakpoint/watchpoint register
341 pair needs to be updated. The message will be recorded in each
342 thread's arch-specific data area, the actual updating will be done
343 when the thread is resumed. */
346 aarch64_notify_debug_reg_change (const struct aarch64_debug_reg_state
*state
,
347 int is_watchpoint
, unsigned int idx
)
349 ptid_t pid_ptid
= ptid_t (current_lwp_ptid ().pid ());
351 iterate_over_lwps (pid_ptid
, [=] (struct lwp_info
*info
)
353 return debug_reg_change_callback (info
,
359 /* Reconfigure STATE to be compatible with Linux kernels with the PR
360 external/20207 bug. This is called when
361 KERNEL_SUPPORTS_ANY_CONTIGUOUS_RANGE transitions to false. Note we
362 don't try to support combining watchpoints with matching (and thus
363 shared) masks, as it's too late when we get here. On buggy
364 kernels, GDB will try to first setup the perfect matching ranges,
365 which will run out of registers before this function can merge
366 them. It doesn't look like worth the effort to improve that, given
367 eventually buggy kernels will be phased out. */
370 aarch64_downgrade_regs (struct aarch64_debug_reg_state
*state
)
372 for (int i
= 0; i
< aarch64_num_wp_regs
; ++i
)
373 if ((state
->dr_ctrl_wp
[i
] & 1) != 0)
375 gdb_assert (state
->dr_ref_count_wp
[i
] != 0);
376 uint8_t mask_orig
= (state
->dr_ctrl_wp
[i
] >> 5) & 0xff;
377 gdb_assert (mask_orig
!= 0);
378 static const uint8_t old_valid
[] = { 0x01, 0x03, 0x0f, 0xff };
380 for (const uint8_t old_mask
: old_valid
)
381 if (mask_orig
<= old_mask
)
386 gdb_assert (mask
!= 0);
388 /* No update needed for this watchpoint? */
389 if (mask
== mask_orig
)
391 state
->dr_ctrl_wp
[i
] |= mask
<< 5;
393 = align_down (state
->dr_addr_wp
[i
], AARCH64_HWP_ALIGNMENT
);
395 /* Try to match duplicate entries. */
396 for (int j
= 0; j
< i
; ++j
)
397 if ((state
->dr_ctrl_wp
[j
] & 1) != 0
398 && state
->dr_addr_wp
[j
] == state
->dr_addr_wp
[i
]
399 && state
->dr_addr_orig_wp
[j
] == state
->dr_addr_orig_wp
[i
]
400 && state
->dr_ctrl_wp
[j
] == state
->dr_ctrl_wp
[i
])
402 state
->dr_ref_count_wp
[j
] += state
->dr_ref_count_wp
[i
];
403 state
->dr_ref_count_wp
[i
] = 0;
404 state
->dr_addr_wp
[i
] = 0;
405 state
->dr_addr_orig_wp
[i
] = 0;
406 state
->dr_ctrl_wp
[i
] &= ~1;
410 aarch64_notify_debug_reg_change (state
, 1 /* is_watchpoint */, i
);
414 /* Record the insertion of one breakpoint/watchpoint, as represented
415 by ADDR and CTRL, in the process' arch-specific data area *STATE. */
418 aarch64_dr_state_insert_one_point (struct aarch64_debug_reg_state
*state
,
419 enum target_hw_bp_type type
,
420 CORE_ADDR addr
, int offset
, int len
,
423 int i
, idx
, num_regs
, is_watchpoint
;
424 unsigned int ctrl
, *dr_ctrl_p
, *dr_ref_count
;
425 CORE_ADDR
*dr_addr_p
, *dr_addr_orig_p
;
427 /* Set up state pointers. */
428 is_watchpoint
= (type
!= hw_execute
);
429 gdb_assert (aarch64_point_is_aligned (is_watchpoint
, addr
, len
));
432 num_regs
= aarch64_num_wp_regs
;
433 dr_addr_p
= state
->dr_addr_wp
;
434 dr_addr_orig_p
= state
->dr_addr_orig_wp
;
435 dr_ctrl_p
= state
->dr_ctrl_wp
;
436 dr_ref_count
= state
->dr_ref_count_wp
;
440 num_regs
= aarch64_num_bp_regs
;
441 dr_addr_p
= state
->dr_addr_bp
;
442 dr_addr_orig_p
= nullptr;
443 dr_ctrl_p
= state
->dr_ctrl_bp
;
444 dr_ref_count
= state
->dr_ref_count_bp
;
447 ctrl
= aarch64_point_encode_ctrl_reg (type
, offset
, len
);
449 /* Find an existing or free register in our cache. */
451 for (i
= 0; i
< num_regs
; ++i
)
453 if ((dr_ctrl_p
[i
] & 1) == 0)
455 gdb_assert (dr_ref_count
[i
] == 0);
457 /* no break; continue hunting for an exising one. */
459 else if (dr_addr_p
[i
] == addr
460 && (dr_addr_orig_p
== nullptr || dr_addr_orig_p
[i
] == addr_orig
)
461 && dr_ctrl_p
[i
] == ctrl
)
463 gdb_assert (dr_ref_count
[i
] != 0);
473 /* Update our cache. */
474 if ((dr_ctrl_p
[idx
] & 1) == 0)
477 dr_addr_p
[idx
] = addr
;
478 if (dr_addr_orig_p
!= nullptr)
479 dr_addr_orig_p
[idx
] = addr_orig
;
480 dr_ctrl_p
[idx
] = ctrl
;
481 dr_ref_count
[idx
] = 1;
482 /* Notify the change. */
483 aarch64_notify_debug_reg_change (state
, is_watchpoint
, idx
);
494 /* Record the removal of one breakpoint/watchpoint, as represented by
495 ADDR and CTRL, in the process' arch-specific data area *STATE. */
498 aarch64_dr_state_remove_one_point (struct aarch64_debug_reg_state
*state
,
499 enum target_hw_bp_type type
,
500 CORE_ADDR addr
, int offset
, int len
,
503 int i
, num_regs
, is_watchpoint
;
504 unsigned int ctrl
, *dr_ctrl_p
, *dr_ref_count
;
505 CORE_ADDR
*dr_addr_p
, *dr_addr_orig_p
;
507 /* Set up state pointers. */
508 is_watchpoint
= (type
!= hw_execute
);
511 num_regs
= aarch64_num_wp_regs
;
512 dr_addr_p
= state
->dr_addr_wp
;
513 dr_addr_orig_p
= state
->dr_addr_orig_wp
;
514 dr_ctrl_p
= state
->dr_ctrl_wp
;
515 dr_ref_count
= state
->dr_ref_count_wp
;
519 num_regs
= aarch64_num_bp_regs
;
520 dr_addr_p
= state
->dr_addr_bp
;
521 dr_addr_orig_p
= nullptr;
522 dr_ctrl_p
= state
->dr_ctrl_bp
;
523 dr_ref_count
= state
->dr_ref_count_bp
;
526 ctrl
= aarch64_point_encode_ctrl_reg (type
, offset
, len
);
528 /* Find the entry that matches the ADDR and CTRL. */
529 for (i
= 0; i
< num_regs
; ++i
)
530 if (dr_addr_p
[i
] == addr
531 && (dr_addr_orig_p
== nullptr || dr_addr_orig_p
[i
] == addr_orig
)
532 && dr_ctrl_p
[i
] == ctrl
)
534 gdb_assert (dr_ref_count
[i
] != 0);
542 /* Clear our cache. */
543 if (--dr_ref_count
[i
] == 0)
545 /* Clear the enable bit. */
548 if (dr_addr_orig_p
!= nullptr)
549 dr_addr_orig_p
[i
] = 0;
551 /* Notify the change. */
552 aarch64_notify_debug_reg_change (state
, is_watchpoint
, i
);
559 aarch64_handle_breakpoint (enum target_hw_bp_type type
, CORE_ADDR addr
,
560 int len
, int is_insert
,
561 struct aarch64_debug_reg_state
*state
)
565 /* The hardware breakpoint on AArch64 should always be 4-byte
566 aligned, but on AArch32, it can be 2-byte aligned. Note that
567 we only check the alignment on inserting breakpoint because
568 aarch64_point_is_aligned needs the inferior_ptid inferior's
569 regcache to decide whether the inferior is 32-bit or 64-bit.
570 However when GDB follows the parent process and detach breakpoints
571 from child process, inferior_ptid is the child ptid, but the
572 child inferior doesn't exist in GDB's view yet. */
573 if (!aarch64_point_is_aligned (0 /* is_watchpoint */ , addr
, len
))
576 return aarch64_dr_state_insert_one_point (state
, type
, addr
, 0, len
, -1);
579 return aarch64_dr_state_remove_one_point (state
, type
, addr
, 0, len
, -1);
582 /* This is essentially the same as aarch64_handle_breakpoint, apart
583 from that it is an aligned watchpoint to be handled. */
586 aarch64_handle_aligned_watchpoint (enum target_hw_bp_type type
,
587 CORE_ADDR addr
, int len
, int is_insert
,
588 struct aarch64_debug_reg_state
*state
)
591 return aarch64_dr_state_insert_one_point (state
, type
, addr
, 0, len
, addr
);
593 return aarch64_dr_state_remove_one_point (state
, type
, addr
, 0, len
, addr
);
596 /* Insert/remove unaligned watchpoint by calling
597 aarch64_align_watchpoint repeatedly until the whole watched region,
598 as represented by ADDR and LEN, has been properly aligned and ready
599 to be written to one or more hardware watchpoint registers.
600 IS_INSERT indicates whether this is an insertion or a deletion.
601 Return 0 if succeed. */
604 aarch64_handle_unaligned_watchpoint (enum target_hw_bp_type type
,
605 CORE_ADDR addr
, int len
, int is_insert
,
606 struct aarch64_debug_reg_state
*state
)
608 CORE_ADDR addr_orig
= addr
;
612 CORE_ADDR aligned_addr
;
613 int aligned_offset
, aligned_len
, ret
;
614 CORE_ADDR addr_orig_next
= addr_orig
;
616 aarch64_align_watchpoint (addr
, len
, &aligned_addr
, &aligned_offset
,
617 &aligned_len
, &addr
, &len
, &addr_orig_next
);
620 ret
= aarch64_dr_state_insert_one_point (state
, type
, aligned_addr
,
622 aligned_len
, addr_orig
);
624 ret
= aarch64_dr_state_remove_one_point (state
, type
, aligned_addr
,
626 aligned_len
, addr_orig
);
629 debug_printf ("handle_unaligned_watchpoint: is_insert: %d\n"
631 "aligned_addr: %s, aligned_len: %d\n"
635 "next_addr: %s, next_len: %d\n"
637 "addr_orig_next: %s\n",
638 is_insert
, core_addr_to_string_nz (aligned_addr
),
639 aligned_len
, core_addr_to_string_nz (addr_orig
),
640 core_addr_to_string_nz (addr
), len
,
641 core_addr_to_string_nz (addr_orig_next
));
643 addr_orig
= addr_orig_next
;
653 aarch64_handle_watchpoint (enum target_hw_bp_type type
, CORE_ADDR addr
,
654 int len
, int is_insert
,
655 struct aarch64_debug_reg_state
*state
)
657 if (aarch64_point_is_aligned (1 /* is_watchpoint */ , addr
, len
))
658 return aarch64_handle_aligned_watchpoint (type
, addr
, len
, is_insert
,
661 return aarch64_handle_unaligned_watchpoint (type
, addr
, len
, is_insert
,
665 /* Call ptrace to set the thread TID's hardware breakpoint/watchpoint
666 registers with data from *STATE. */
669 aarch64_linux_set_debug_regs (struct aarch64_debug_reg_state
*state
,
670 int tid
, int watchpoint
)
674 struct user_hwdebug_state regs
;
675 const CORE_ADDR
*addr
;
676 const unsigned int *ctrl
;
678 memset (®s
, 0, sizeof (regs
));
679 iov
.iov_base
= ®s
;
680 count
= watchpoint
? aarch64_num_wp_regs
: aarch64_num_bp_regs
;
681 addr
= watchpoint
? state
->dr_addr_wp
: state
->dr_addr_bp
;
682 ctrl
= watchpoint
? state
->dr_ctrl_wp
: state
->dr_ctrl_bp
;
685 iov
.iov_len
= (offsetof (struct user_hwdebug_state
, dbg_regs
)
686 + count
* sizeof (regs
.dbg_regs
[0]));
688 for (i
= 0; i
< count
; i
++)
690 regs
.dbg_regs
[i
].addr
= addr
[i
];
691 regs
.dbg_regs
[i
].ctrl
= ctrl
[i
];
694 if (ptrace (PTRACE_SETREGSET
, tid
,
695 watchpoint
? NT_ARM_HW_WATCH
: NT_ARM_HW_BREAK
,
698 /* Handle Linux kernels with the PR external/20207 bug. */
699 if (watchpoint
&& errno
== EINVAL
700 && kernel_supports_any_contiguous_range
)
702 kernel_supports_any_contiguous_range
= false;
703 aarch64_downgrade_regs (state
);
704 aarch64_linux_set_debug_regs (state
, tid
, watchpoint
);
707 error (_("Unexpected error setting hardware debug registers"));
711 /* See nat/aarch64-linux-hw-point.h. */
714 aarch64_linux_any_set_debug_regs_state (aarch64_debug_reg_state
*state
,
717 int count
= watchpoint
? aarch64_num_wp_regs
: aarch64_num_bp_regs
;
721 const CORE_ADDR
*addr
= watchpoint
? state
->dr_addr_wp
: state
->dr_addr_bp
;
722 const unsigned int *ctrl
= watchpoint
? state
->dr_ctrl_wp
: state
->dr_ctrl_bp
;
724 for (int i
= 0; i
< count
; i
++)
725 if (addr
[i
] != 0 || ctrl
[i
] != 0)
731 /* Print the values of the cached breakpoint/watchpoint registers. */
734 aarch64_show_debug_reg_state (struct aarch64_debug_reg_state
*state
,
735 const char *func
, CORE_ADDR addr
,
736 int len
, enum target_hw_bp_type type
)
740 debug_printf ("%s", func
);
742 debug_printf (" (addr=0x%08lx, len=%d, type=%s)",
743 (unsigned long) addr
, len
,
744 type
== hw_write
? "hw-write-watchpoint"
745 : (type
== hw_read
? "hw-read-watchpoint"
746 : (type
== hw_access
? "hw-access-watchpoint"
747 : (type
== hw_execute
? "hw-breakpoint"
749 debug_printf (":\n");
751 debug_printf ("\tBREAKPOINTs:\n");
752 for (i
= 0; i
< aarch64_num_bp_regs
; i
++)
753 debug_printf ("\tBP%d: addr=%s, ctrl=0x%08x, ref.count=%d\n",
754 i
, core_addr_to_string_nz (state
->dr_addr_bp
[i
]),
755 state
->dr_ctrl_bp
[i
], state
->dr_ref_count_bp
[i
]);
757 debug_printf ("\tWATCHPOINTs:\n");
758 for (i
= 0; i
< aarch64_num_wp_regs
; i
++)
759 debug_printf ("\tWP%d: addr=%s (orig=%s), ctrl=0x%08x, ref.count=%d\n",
760 i
, core_addr_to_string_nz (state
->dr_addr_wp
[i
]),
761 core_addr_to_string_nz (state
->dr_addr_orig_wp
[i
]),
762 state
->dr_ctrl_wp
[i
], state
->dr_ref_count_wp
[i
]);
765 /* Get the hardware debug register capacity information from the
766 process represented by TID. */
769 aarch64_linux_get_debug_reg_capacity (int tid
)
772 struct user_hwdebug_state dreg_state
;
774 iov
.iov_base
= &dreg_state
;
775 iov
.iov_len
= sizeof (dreg_state
);
777 /* Get hardware watchpoint register info. */
778 if (ptrace (PTRACE_GETREGSET
, tid
, NT_ARM_HW_WATCH
, &iov
) == 0
779 && (AARCH64_DEBUG_ARCH (dreg_state
.dbg_info
) == AARCH64_DEBUG_ARCH_V8
780 || AARCH64_DEBUG_ARCH (dreg_state
.dbg_info
) == AARCH64_DEBUG_ARCH_V8_1
781 || AARCH64_DEBUG_ARCH (dreg_state
.dbg_info
) == AARCH64_DEBUG_ARCH_V8_2
))
783 aarch64_num_wp_regs
= AARCH64_DEBUG_NUM_SLOTS (dreg_state
.dbg_info
);
784 if (aarch64_num_wp_regs
> AARCH64_HWP_MAX_NUM
)
786 warning (_("Unexpected number of hardware watchpoint registers"
787 " reported by ptrace, got %d, expected %d."),
788 aarch64_num_wp_regs
, AARCH64_HWP_MAX_NUM
);
789 aarch64_num_wp_regs
= AARCH64_HWP_MAX_NUM
;
794 warning (_("Unable to determine the number of hardware watchpoints"
796 aarch64_num_wp_regs
= 0;
799 /* Get hardware breakpoint register info. */
800 if (ptrace (PTRACE_GETREGSET
, tid
, NT_ARM_HW_BREAK
, &iov
) == 0
801 && (AARCH64_DEBUG_ARCH (dreg_state
.dbg_info
) == AARCH64_DEBUG_ARCH_V8
802 || AARCH64_DEBUG_ARCH (dreg_state
.dbg_info
) == AARCH64_DEBUG_ARCH_V8_1
803 || AARCH64_DEBUG_ARCH (dreg_state
.dbg_info
) == AARCH64_DEBUG_ARCH_V8_2
))
805 aarch64_num_bp_regs
= AARCH64_DEBUG_NUM_SLOTS (dreg_state
.dbg_info
);
806 if (aarch64_num_bp_regs
> AARCH64_HBP_MAX_NUM
)
808 warning (_("Unexpected number of hardware breakpoint registers"
809 " reported by ptrace, got %d, expected %d."),
810 aarch64_num_bp_regs
, AARCH64_HBP_MAX_NUM
);
811 aarch64_num_bp_regs
= AARCH64_HBP_MAX_NUM
;
816 warning (_("Unable to determine the number of hardware breakpoints"
818 aarch64_num_bp_regs
= 0;
822 /* Return true if we can watch a memory region that starts address
823 ADDR and whose length is LEN in bytes. */
826 aarch64_linux_region_ok_for_watchpoint (CORE_ADDR addr
, int len
)
828 CORE_ADDR aligned_addr
;
830 /* Can not set watchpoints for zero or negative lengths. */
834 /* Must have hardware watchpoint debug register(s). */
835 if (aarch64_num_wp_regs
== 0)
838 /* We support unaligned watchpoint address and arbitrary length,
839 as long as the size of the whole watched area after alignment
840 doesn't exceed size of the total area that all watchpoint debug
841 registers can watch cooperatively.
843 This is a very relaxed rule, but unfortunately there are
844 limitations, e.g. false-positive hits, due to limited support of
845 hardware debug registers in the kernel. See comment above
846 aarch64_align_watchpoint for more information. */
848 aligned_addr
= addr
& ~(AARCH64_HWP_MAX_LEN_PER_REG
- 1);
849 if (aligned_addr
+ aarch64_num_wp_regs
* AARCH64_HWP_MAX_LEN_PER_REG
853 /* All tests passed so we are likely to be able to set the watchpoint.
854 The reason that it is 'likely' rather than 'must' is because
855 we don't check the current usage of the watchpoint registers, and
856 there may not be enough registers available for this watchpoint.
857 Ideally we should check the cached debug register state, however
858 the checking is costly. */