Update copyright year range in header of all files managed by GDB
[binutils-gdb.git] / gdb / nat / aarch64-hw-point.c
blobdd0ffc32634c7438700ea924c5643274a1753c89
1 /* Copyright (C) 2009-2023 Free Software Foundation, Inc.
3 This file is part of GDB.
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18 #include "gdbsupport/common-defs.h"
19 #include "gdbsupport/break-common.h"
20 #include "gdbsupport/common-regcache.h"
21 #include "aarch64-hw-point.h"
23 #ifdef __linux__
24 /* For kernel_supports_any_contiguous_range. */
25 #include "aarch64-linux-hw-point.h"
26 #else
27 #define kernel_supports_any_contiguous_range true
28 #endif
30 /* Number of hardware breakpoints/watchpoints the target supports.
31 They are initialized with values obtained via ptrace. */
33 int aarch64_num_bp_regs;
34 int aarch64_num_wp_regs;
36 /* Return starting byte 0..7 incl. of a watchpoint encoded by CTRL. */
38 unsigned int
39 aarch64_watchpoint_offset (unsigned int ctrl)
41 uint8_t mask = DR_CONTROL_MASK (ctrl);
42 unsigned retval;
44 /* Shift out bottom zeros. */
45 for (retval = 0; mask && (mask & 1) == 0; ++retval)
46 mask >>= 1;
48 return retval;
51 /* Utility function that returns the length in bytes of a watchpoint
52 according to the content of a hardware debug control register CTRL.
53 Any contiguous range of bytes in CTRL is supported. The returned
54 value can be between 0..8 (inclusive). */
56 unsigned int
57 aarch64_watchpoint_length (unsigned int ctrl)
59 uint8_t mask = DR_CONTROL_MASK (ctrl);
60 unsigned retval;
62 /* Shift out bottom zeros. */
63 mask >>= aarch64_watchpoint_offset (ctrl);
65 /* Count bottom ones. */
66 for (retval = 0; (mask & 1) != 0; ++retval)
67 mask >>= 1;
69 if (mask != 0)
70 error (_("Unexpected hardware watchpoint length register value 0x%x"),
71 DR_CONTROL_MASK (ctrl));
73 return retval;
76 /* Given the hardware breakpoint or watchpoint type TYPE and its
77 length LEN, return the expected encoding for a hardware
78 breakpoint/watchpoint control register. */
80 static unsigned int
81 aarch64_point_encode_ctrl_reg (enum target_hw_bp_type type, int offset, int len)
83 unsigned int ctrl, ttype;
85 gdb_assert (offset == 0 || kernel_supports_any_contiguous_range);
86 gdb_assert (offset + len <= AARCH64_HWP_MAX_LEN_PER_REG);
88 /* type */
89 switch (type)
91 case hw_write:
92 ttype = 2;
93 break;
94 case hw_read:
95 ttype = 1;
96 break;
97 case hw_access:
98 ttype = 3;
99 break;
100 case hw_execute:
101 ttype = 0;
102 break;
103 default:
104 perror_with_name (_("Unrecognized breakpoint/watchpoint type"));
107 ctrl = ttype << 3;
109 /* offset and length bitmask */
110 ctrl |= ((1 << len) - 1) << (5 + offset);
111 /* enabled at el0 */
112 ctrl |= (2 << 1) | 1;
114 return ctrl;
117 /* Addresses to be written to the hardware breakpoint and watchpoint
118 value registers need to be aligned; the alignment is 4-byte and
119 8-type respectively. Linux kernel rejects any non-aligned address
120 it receives from the related ptrace call. Furthermore, the kernel
121 currently only supports the following Byte Address Select (BAS)
122 values: 0x1, 0x3, 0xf and 0xff, which means that for a hardware
123 watchpoint to be accepted by the kernel (via ptrace call), its
124 valid length can only be 1 byte, 2 bytes, 4 bytes or 8 bytes.
125 Despite these limitations, the unaligned watchpoint is supported in
126 this port.
128 Return 0 for any non-compliant ADDR and/or LEN; return 1 otherwise. */
130 static int
131 aarch64_point_is_aligned (ptid_t ptid, int is_watchpoint, CORE_ADDR addr,
132 int len)
134 unsigned int alignment = 0;
136 if (is_watchpoint)
137 alignment = AARCH64_HWP_ALIGNMENT;
138 else
140 struct regcache *regcache
141 = get_thread_regcache_for_ptid (ptid);
143 /* Set alignment to 2 only if the current process is 32-bit,
144 since thumb instruction can be 2-byte aligned. Otherwise, set
145 alignment to AARCH64_HBP_ALIGNMENT. */
146 if (regcache_register_size (regcache, 0) == 8)
147 alignment = AARCH64_HBP_ALIGNMENT;
148 else
149 alignment = 2;
152 if (addr & (alignment - 1))
153 return 0;
155 if ((!kernel_supports_any_contiguous_range
156 && len != 8 && len != 4 && len != 2 && len != 1)
157 || (kernel_supports_any_contiguous_range
158 && (len < 1 || len > 8)))
159 return 0;
161 return 1;
164 /* Given the (potentially unaligned) watchpoint address in ADDR and
165 length in LEN, return the aligned address, offset from that base
166 address, and aligned length in *ALIGNED_ADDR_P, *ALIGNED_OFFSET_P
167 and *ALIGNED_LEN_P, respectively. The returned values will be
168 valid values to write to the hardware watchpoint value and control
169 registers.
171 The given watchpoint may get truncated if more than one hardware
172 register is needed to cover the watched region. *NEXT_ADDR_P
173 and *NEXT_LEN_P, if non-NULL, will return the address and length
174 of the remaining part of the watchpoint (which can be processed
175 by calling this routine again to generate another aligned address,
176 offset and length tuple.
178 Essentially, unaligned watchpoint is achieved by minimally
179 enlarging the watched area to meet the alignment requirement, and
180 if necessary, splitting the watchpoint over several hardware
181 watchpoint registers.
183 On kernels that predate the support for Byte Address Select (BAS)
184 in the hardware watchpoint control register, the offset from the
185 base address is always zero, and so in that case the trade-off is
186 that there will be false-positive hits for the read-type or the
187 access-type hardware watchpoints; for the write type, which is more
188 commonly used, there will be no such issues, as the higher-level
189 breakpoint management in gdb always examines the exact watched
190 region for any content change, and transparently resumes a thread
191 from a watchpoint trap if there is no change to the watched region.
193 Another limitation is that because the watched region is enlarged,
194 the watchpoint fault address discovered by
195 aarch64_stopped_data_address may be outside of the original watched
196 region, especially when the triggering instruction is accessing a
197 larger region. When the fault address is not within any known
198 range, watchpoints_triggered in gdb will get confused, as the
199 higher-level watchpoint management is only aware of original
200 watched regions, and will think that some unknown watchpoint has
201 been triggered. To prevent such a case,
202 aarch64_stopped_data_address implementations in gdb and gdbserver
203 try to match the trapped address with a watched region, and return
204 an address within the latter. */
206 static void
207 aarch64_align_watchpoint (CORE_ADDR addr, int len, CORE_ADDR *aligned_addr_p,
208 int *aligned_offset_p, int *aligned_len_p,
209 CORE_ADDR *next_addr_p, int *next_len_p,
210 CORE_ADDR *next_addr_orig_p)
212 int aligned_len;
213 unsigned int offset, aligned_offset;
214 CORE_ADDR aligned_addr;
215 const unsigned int alignment = AARCH64_HWP_ALIGNMENT;
216 const unsigned int max_wp_len = AARCH64_HWP_MAX_LEN_PER_REG;
218 /* As assumed by the algorithm. */
219 gdb_assert (alignment == max_wp_len);
221 if (len <= 0)
222 return;
224 /* The address put into the hardware watchpoint value register must
225 be aligned. */
226 offset = addr & (alignment - 1);
227 aligned_addr = addr - offset;
228 aligned_offset
229 = kernel_supports_any_contiguous_range ? addr & (alignment - 1) : 0;
231 gdb_assert (offset >= 0 && offset < alignment);
232 gdb_assert (aligned_addr >= 0 && aligned_addr <= addr);
233 gdb_assert (offset + len > 0);
235 if (offset + len >= max_wp_len)
237 /* Need more than one watchpoint register; truncate at the
238 alignment boundary. */
239 aligned_len
240 = max_wp_len - (kernel_supports_any_contiguous_range ? offset : 0);
241 len -= (max_wp_len - offset);
242 addr += (max_wp_len - offset);
243 gdb_assert ((addr & (alignment - 1)) == 0);
245 else
247 /* Find the smallest valid length that is large enough to
248 accommodate this watchpoint. */
249 static const unsigned char
250 aligned_len_array[AARCH64_HWP_MAX_LEN_PER_REG] =
251 { 1, 2, 4, 4, 8, 8, 8, 8 };
253 aligned_len = (kernel_supports_any_contiguous_range
254 ? len : aligned_len_array[offset + len - 1]);
255 addr += len;
256 len = 0;
259 if (aligned_addr_p)
260 *aligned_addr_p = aligned_addr;
261 if (aligned_offset_p)
262 *aligned_offset_p = aligned_offset;
263 if (aligned_len_p)
264 *aligned_len_p = aligned_len;
265 if (next_addr_p)
266 *next_addr_p = addr;
267 if (next_len_p)
268 *next_len_p = len;
269 if (next_addr_orig_p)
270 *next_addr_orig_p = align_down (*next_addr_orig_p + alignment, alignment);
273 /* Record the insertion of one breakpoint/watchpoint, as represented
274 by ADDR and CTRL, in the process' arch-specific data area *STATE. */
276 static int
277 aarch64_dr_state_insert_one_point (ptid_t ptid,
278 struct aarch64_debug_reg_state *state,
279 enum target_hw_bp_type type,
280 CORE_ADDR addr, int offset, int len,
281 CORE_ADDR addr_orig)
283 int i, idx, num_regs, is_watchpoint;
284 unsigned int ctrl, *dr_ctrl_p, *dr_ref_count;
285 CORE_ADDR *dr_addr_p, *dr_addr_orig_p;
287 /* Set up state pointers. */
288 is_watchpoint = (type != hw_execute);
289 gdb_assert (aarch64_point_is_aligned (ptid, is_watchpoint, addr, len));
290 if (is_watchpoint)
292 num_regs = aarch64_num_wp_regs;
293 dr_addr_p = state->dr_addr_wp;
294 dr_addr_orig_p = state->dr_addr_orig_wp;
295 dr_ctrl_p = state->dr_ctrl_wp;
296 dr_ref_count = state->dr_ref_count_wp;
298 else
300 num_regs = aarch64_num_bp_regs;
301 dr_addr_p = state->dr_addr_bp;
302 dr_addr_orig_p = nullptr;
303 dr_ctrl_p = state->dr_ctrl_bp;
304 dr_ref_count = state->dr_ref_count_bp;
307 ctrl = aarch64_point_encode_ctrl_reg (type, offset, len);
309 /* Find an existing or free register in our cache. */
310 idx = -1;
311 for (i = 0; i < num_regs; ++i)
313 if ((dr_ctrl_p[i] & 1) == 0)
315 gdb_assert (dr_ref_count[i] == 0);
316 idx = i;
317 /* no break; continue hunting for an exising one. */
319 else if (dr_addr_p[i] == addr
320 && (dr_addr_orig_p == nullptr || dr_addr_orig_p[i] == addr_orig)
321 && dr_ctrl_p[i] == ctrl)
323 gdb_assert (dr_ref_count[i] != 0);
324 idx = i;
325 break;
329 /* No space. */
330 if (idx == -1)
331 return -1;
333 /* Update our cache. */
334 if ((dr_ctrl_p[idx] & 1) == 0)
336 /* new entry */
337 dr_addr_p[idx] = addr;
338 if (dr_addr_orig_p != nullptr)
339 dr_addr_orig_p[idx] = addr_orig;
340 dr_ctrl_p[idx] = ctrl;
341 dr_ref_count[idx] = 1;
342 /* Notify the change. */
343 aarch64_notify_debug_reg_change (ptid, is_watchpoint, idx);
345 else
347 /* existing entry */
348 dr_ref_count[idx]++;
351 return 0;
354 /* Record the removal of one breakpoint/watchpoint, as represented by
355 ADDR and CTRL, in the process' arch-specific data area *STATE. */
357 static int
358 aarch64_dr_state_remove_one_point (ptid_t ptid,
359 struct aarch64_debug_reg_state *state,
360 enum target_hw_bp_type type,
361 CORE_ADDR addr, int offset, int len,
362 CORE_ADDR addr_orig)
364 int i, num_regs, is_watchpoint;
365 unsigned int ctrl, *dr_ctrl_p, *dr_ref_count;
366 CORE_ADDR *dr_addr_p, *dr_addr_orig_p;
368 /* Set up state pointers. */
369 is_watchpoint = (type != hw_execute);
370 if (is_watchpoint)
372 num_regs = aarch64_num_wp_regs;
373 dr_addr_p = state->dr_addr_wp;
374 dr_addr_orig_p = state->dr_addr_orig_wp;
375 dr_ctrl_p = state->dr_ctrl_wp;
376 dr_ref_count = state->dr_ref_count_wp;
378 else
380 num_regs = aarch64_num_bp_regs;
381 dr_addr_p = state->dr_addr_bp;
382 dr_addr_orig_p = nullptr;
383 dr_ctrl_p = state->dr_ctrl_bp;
384 dr_ref_count = state->dr_ref_count_bp;
387 ctrl = aarch64_point_encode_ctrl_reg (type, offset, len);
389 /* Find the entry that matches the ADDR and CTRL. */
390 for (i = 0; i < num_regs; ++i)
391 if (dr_addr_p[i] == addr
392 && (dr_addr_orig_p == nullptr || dr_addr_orig_p[i] == addr_orig)
393 && dr_ctrl_p[i] == ctrl)
395 gdb_assert (dr_ref_count[i] != 0);
396 break;
399 /* Not found. */
400 if (i == num_regs)
401 return -1;
403 /* Clear our cache. */
404 if (--dr_ref_count[i] == 0)
406 /* Clear the enable bit. */
407 ctrl &= ~1;
408 dr_addr_p[i] = 0;
409 if (dr_addr_orig_p != nullptr)
410 dr_addr_orig_p[i] = 0;
411 dr_ctrl_p[i] = ctrl;
412 /* Notify the change. */
413 aarch64_notify_debug_reg_change (ptid, is_watchpoint, i);
416 return 0;
420 aarch64_handle_breakpoint (enum target_hw_bp_type type, CORE_ADDR addr,
421 int len, int is_insert, ptid_t ptid,
422 struct aarch64_debug_reg_state *state)
424 if (is_insert)
426 /* The hardware breakpoint on AArch64 should always be 4-byte
427 aligned, but on AArch32, it can be 2-byte aligned. Note that
428 we only check the alignment on inserting breakpoint because
429 aarch64_point_is_aligned needs the inferior_ptid inferior's
430 regcache to decide whether the inferior is 32-bit or 64-bit.
431 However when GDB follows the parent process and detach breakpoints
432 from child process, inferior_ptid is the child ptid, but the
433 child inferior doesn't exist in GDB's view yet. */
434 if (!aarch64_point_is_aligned (ptid, 0 /* is_watchpoint */ , addr, len))
435 return -1;
437 return aarch64_dr_state_insert_one_point (ptid, state, type, addr, 0, len,
438 -1);
440 else
441 return aarch64_dr_state_remove_one_point (ptid, state, type, addr, 0, len,
442 -1);
445 /* This is essentially the same as aarch64_handle_breakpoint, apart
446 from that it is an aligned watchpoint to be handled. */
448 static int
449 aarch64_handle_aligned_watchpoint (enum target_hw_bp_type type,
450 CORE_ADDR addr, int len, int is_insert,
451 ptid_t ptid,
452 struct aarch64_debug_reg_state *state)
454 if (is_insert)
455 return aarch64_dr_state_insert_one_point (ptid, state, type, addr, 0, len,
456 addr);
457 else
458 return aarch64_dr_state_remove_one_point (ptid, state, type, addr, 0, len,
459 addr);
462 /* Insert/remove unaligned watchpoint by calling
463 aarch64_align_watchpoint repeatedly until the whole watched region,
464 as represented by ADDR and LEN, has been properly aligned and ready
465 to be written to one or more hardware watchpoint registers.
466 IS_INSERT indicates whether this is an insertion or a deletion.
467 Return 0 if succeed. */
469 static int
470 aarch64_handle_unaligned_watchpoint (enum target_hw_bp_type type,
471 CORE_ADDR addr, int len, int is_insert,
472 ptid_t ptid,
473 struct aarch64_debug_reg_state *state)
475 CORE_ADDR addr_orig = addr;
477 while (len > 0)
479 CORE_ADDR aligned_addr;
480 int aligned_offset, aligned_len, ret;
481 CORE_ADDR addr_orig_next = addr_orig;
483 aarch64_align_watchpoint (addr, len, &aligned_addr, &aligned_offset,
484 &aligned_len, &addr, &len, &addr_orig_next);
486 if (is_insert)
487 ret = aarch64_dr_state_insert_one_point (ptid, state, type,
488 aligned_addr, aligned_offset,
489 aligned_len, addr_orig);
490 else
491 ret = aarch64_dr_state_remove_one_point (ptid, state, type,
492 aligned_addr, aligned_offset,
493 aligned_len, addr_orig);
495 if (show_debug_regs)
496 debug_printf ("handle_unaligned_watchpoint: is_insert: %d\n"
498 "aligned_addr: %s, aligned_len: %d\n"
500 "addr_orig: %s\n"
502 "next_addr: %s, next_len: %d\n"
504 "addr_orig_next: %s\n",
505 is_insert, core_addr_to_string_nz (aligned_addr),
506 aligned_len, core_addr_to_string_nz (addr_orig),
507 core_addr_to_string_nz (addr), len,
508 core_addr_to_string_nz (addr_orig_next));
510 addr_orig = addr_orig_next;
512 if (ret != 0)
513 return ret;
516 return 0;
520 aarch64_handle_watchpoint (enum target_hw_bp_type type, CORE_ADDR addr,
521 int len, int is_insert, ptid_t ptid,
522 struct aarch64_debug_reg_state *state)
524 if (aarch64_point_is_aligned (ptid, 1 /* is_watchpoint */ , addr, len))
525 return aarch64_handle_aligned_watchpoint (type, addr, len, is_insert, ptid,
526 state);
527 else
528 return aarch64_handle_unaligned_watchpoint (type, addr, len, is_insert,
529 ptid, state);
532 /* See nat/aarch64-hw-point.h. */
534 bool
535 aarch64_any_set_debug_regs_state (aarch64_debug_reg_state *state,
536 bool watchpoint)
538 int count = watchpoint ? aarch64_num_wp_regs : aarch64_num_bp_regs;
539 if (count == 0)
540 return false;
542 const CORE_ADDR *addr = watchpoint ? state->dr_addr_wp : state->dr_addr_bp;
543 const unsigned int *ctrl = watchpoint ? state->dr_ctrl_wp : state->dr_ctrl_bp;
545 for (int i = 0; i < count; i++)
546 if (addr[i] != 0 || ctrl[i] != 0)
547 return true;
549 return false;
552 /* Print the values of the cached breakpoint/watchpoint registers. */
554 void
555 aarch64_show_debug_reg_state (struct aarch64_debug_reg_state *state,
556 const char *func, CORE_ADDR addr,
557 int len, enum target_hw_bp_type type)
559 int i;
561 debug_printf ("%s", func);
562 if (addr || len)
563 debug_printf (" (addr=0x%08lx, len=%d, type=%s)",
564 (unsigned long) addr, len,
565 type == hw_write ? "hw-write-watchpoint"
566 : (type == hw_read ? "hw-read-watchpoint"
567 : (type == hw_access ? "hw-access-watchpoint"
568 : (type == hw_execute ? "hw-breakpoint"
569 : "??unknown??"))));
570 debug_printf (":\n");
572 debug_printf ("\tBREAKPOINTs:\n");
573 for (i = 0; i < aarch64_num_bp_regs; i++)
574 debug_printf ("\tBP%d: addr=%s, ctrl=0x%08x, ref.count=%d\n",
575 i, core_addr_to_string_nz (state->dr_addr_bp[i]),
576 state->dr_ctrl_bp[i], state->dr_ref_count_bp[i]);
578 debug_printf ("\tWATCHPOINTs:\n");
579 for (i = 0; i < aarch64_num_wp_regs; i++)
580 debug_printf ("\tWP%d: addr=%s (orig=%s), ctrl=0x%08x, ref.count=%d\n",
581 i, core_addr_to_string_nz (state->dr_addr_wp[i]),
582 core_addr_to_string_nz (state->dr_addr_orig_wp[i]),
583 state->dr_ctrl_wp[i], state->dr_ref_count_wp[i]);
586 /* Return true if we can watch a memory region that starts address
587 ADDR and whose length is LEN in bytes. */
590 aarch64_region_ok_for_watchpoint (CORE_ADDR addr, int len)
592 CORE_ADDR aligned_addr;
594 /* Can not set watchpoints for zero or negative lengths. */
595 if (len <= 0)
596 return 0;
598 /* Must have hardware watchpoint debug register(s). */
599 if (aarch64_num_wp_regs == 0)
600 return 0;
602 /* We support unaligned watchpoint address and arbitrary length,
603 as long as the size of the whole watched area after alignment
604 doesn't exceed size of the total area that all watchpoint debug
605 registers can watch cooperatively.
607 This is a very relaxed rule, but unfortunately there are
608 limitations, e.g. false-positive hits, due to limited support of
609 hardware debug registers in the kernel. See comment above
610 aarch64_align_watchpoint for more information. */
612 aligned_addr = addr & ~(AARCH64_HWP_MAX_LEN_PER_REG - 1);
613 if (aligned_addr + aarch64_num_wp_regs * AARCH64_HWP_MAX_LEN_PER_REG
614 < addr + len)
615 return 0;
617 /* All tests passed so we are likely to be able to set the watchpoint.
618 The reason that it is 'likely' rather than 'must' is because
619 we don't check the current usage of the watchpoint registers, and
620 there may not be enough registers available for this watchpoint.
621 Ideally we should check the cached debug register state, however
622 the checking is costly. */
623 return 1;