Automatic date update in version.in
[binutils-gdb.git] / gdb / aarch64-tdep.c
blob6850baf2d6806b5b6e2ae0900faec93c51b4bbe3
1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2024 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "extract-store-integer.h"
23 #include "frame.h"
24 #include "language.h"
25 #include "cli/cli-cmds.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "value.h"
31 #include "arch-utils.h"
32 #include "osabi.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
36 #include "objfiles.h"
37 #include "dwarf2.h"
38 #include "dwarf2/frame.h"
39 #include "gdbtypes.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
43 #include "ax-gdb.h"
44 #include "gdbsupport/selftest.h"
46 #include "aarch64-tdep.h"
47 #include "aarch64-ravenscar-thread.h"
48 #include "arch/aarch64-mte.h"
50 #include "record.h"
51 #include "record-full.h"
52 #include "arch/aarch64-insn.h"
53 #include "gdbarch.h"
55 #include "opcode/aarch64.h"
56 #include <algorithm>
57 #include <unordered_map>
59 /* For inferior_ptid and current_inferior (). */
60 #include "inferior.h"
61 /* For std::sqrt and std::pow. */
62 #include <cmath>
64 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
65 four members. */
66 #define HA_MAX_NUM_FLDS 4
68 /* All possible aarch64 target descriptors. */
69 static std::unordered_map <aarch64_features, target_desc *> tdesc_aarch64_map;
71 /* The standard register names, and all the valid aliases for them.
72 We're not adding fp here, that name is already taken, see
73 _initialize_frame_reg. */
74 static const struct
76 const char *const name;
77 int regnum;
78 } aarch64_register_aliases[] =
80 /* Link register alias for x30. */
81 {"lr", AARCH64_LR_REGNUM},
82 /* SP is the canonical name for x31 according to aarch64_r_register_names,
83 so we're adding an x31 alias for sp. */
84 {"x31", AARCH64_SP_REGNUM},
85 /* specials */
86 {"ip0", AARCH64_X0_REGNUM + 16},
87 {"ip1", AARCH64_X0_REGNUM + 17}
90 /* The required core 'R' registers. */
91 static const char *const aarch64_r_register_names[] =
93 /* These registers must appear in consecutive RAW register number
94 order and they must begin with AARCH64_X0_REGNUM! */
95 "x0", "x1", "x2", "x3",
96 "x4", "x5", "x6", "x7",
97 "x8", "x9", "x10", "x11",
98 "x12", "x13", "x14", "x15",
99 "x16", "x17", "x18", "x19",
100 "x20", "x21", "x22", "x23",
101 "x24", "x25", "x26", "x27",
102 "x28", "x29", "x30", "sp",
103 "pc", "cpsr"
106 /* The FP/SIMD 'V' registers. */
107 static const char *const aarch64_v_register_names[] =
109 /* These registers must appear in consecutive RAW register number
110 order and they must begin with AARCH64_V0_REGNUM! */
111 "v0", "v1", "v2", "v3",
112 "v4", "v5", "v6", "v7",
113 "v8", "v9", "v10", "v11",
114 "v12", "v13", "v14", "v15",
115 "v16", "v17", "v18", "v19",
116 "v20", "v21", "v22", "v23",
117 "v24", "v25", "v26", "v27",
118 "v28", "v29", "v30", "v31",
119 "fpsr",
120 "fpcr"
123 /* The SVE 'Z' and 'P' registers. */
124 static const char *const aarch64_sve_register_names[] =
126 /* These registers must appear in consecutive RAW register number
127 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
128 "z0", "z1", "z2", "z3",
129 "z4", "z5", "z6", "z7",
130 "z8", "z9", "z10", "z11",
131 "z12", "z13", "z14", "z15",
132 "z16", "z17", "z18", "z19",
133 "z20", "z21", "z22", "z23",
134 "z24", "z25", "z26", "z27",
135 "z28", "z29", "z30", "z31",
136 "fpsr", "fpcr",
137 "p0", "p1", "p2", "p3",
138 "p4", "p5", "p6", "p7",
139 "p8", "p9", "p10", "p11",
140 "p12", "p13", "p14", "p15",
141 "ffr", "vg"
144 static const char *const aarch64_pauth_register_names[] =
146 /* Authentication mask for data pointer, low half/user pointers. */
147 "pauth_dmask",
148 /* Authentication mask for code pointer, low half/user pointers. */
149 "pauth_cmask",
150 /* Authentication mask for data pointer, high half / kernel pointers. */
151 "pauth_dmask_high",
152 /* Authentication mask for code pointer, high half / kernel pointers. */
153 "pauth_cmask_high"
156 static const char *const aarch64_mte_register_names[] =
158 /* Tag Control Register. */
159 "tag_ctl"
162 static int aarch64_stack_frame_destroyed_p (struct gdbarch *, CORE_ADDR);
164 /* AArch64 prologue cache structure. */
165 struct aarch64_prologue_cache
167 /* The program counter at the start of the function. It is used to
168 identify this frame as a prologue frame. */
169 CORE_ADDR func;
171 /* The program counter at the time this frame was created; i.e. where
172 this function was called from. It is used to identify this frame as a
173 stub frame. */
174 CORE_ADDR prev_pc;
176 /* The stack pointer at the time this frame was created; i.e. the
177 caller's stack pointer when this function was called. It is used
178 to identify this frame. */
179 CORE_ADDR prev_sp;
181 /* Is the target available to read from? */
182 int available_p;
184 /* The frame base for this frame is just prev_sp - frame size.
185 FRAMESIZE is the distance from the frame pointer to the
186 initial stack pointer. */
187 int framesize;
189 /* The register used to hold the frame pointer for this frame. */
190 int framereg;
192 /* Saved register offsets. */
193 trad_frame_saved_reg *saved_regs;
196 /* Holds information used to read/write from/to ZA
197 pseudo-registers.
199 With this information, the read/write code can be simplified so it
200 deals only with the required information to map a ZA pseudo-register
201 to the exact bytes into the ZA contents buffer. Otherwise we'd need
202 to use a lot of conditionals. */
204 struct za_offsets
206 /* Offset, into ZA, of the starting byte of the pseudo-register. */
207 size_t starting_offset;
208 /* The size of the contiguous chunks of the pseudo-register. */
209 size_t chunk_size;
210 /* The number of pseudo-register chunks contained in ZA. */
211 size_t chunks;
212 /* The offset between each contiguous chunk. */
213 size_t stride_size;
216 /* Holds data that is helpful to determine the individual fields that make
217 up the names of the ZA pseudo-registers. It is also very helpful to
218 determine offsets, stride and sizes for reading ZA tiles and tile
219 slices. */
221 struct za_pseudo_encoding
223 /* The slice index (0 ~ svl). Only used for tile slices. */
224 uint8_t slice_index;
225 /* The tile number (0 ~ 15). */
226 uint8_t tile_index;
227 /* Direction (horizontal/vertical). Only used for tile slices. */
228 bool horizontal;
229 /* Qualifier index (0 ~ 4). These map to B, H, S, D and Q. */
230 uint8_t qualifier_index;
233 static void
234 show_aarch64_debug (struct ui_file *file, int from_tty,
235 struct cmd_list_element *c, const char *value)
237 gdb_printf (file, _("AArch64 debugging is %s.\n"), value);
240 namespace {
242 /* Abstract instruction reader. */
244 class abstract_instruction_reader
246 public:
247 /* Read in one instruction. */
248 virtual ULONGEST read (CORE_ADDR memaddr, int len,
249 enum bfd_endian byte_order) = 0;
252 /* Instruction reader from real target. */
254 class instruction_reader : public abstract_instruction_reader
256 public:
257 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
258 override
260 return read_code_unsigned_integer (memaddr, len, byte_order);
264 } // namespace
266 /* If address signing is enabled, mask off the signature bits from the link
267 register, which is passed by value in ADDR, using the register values in
268 THIS_FRAME. */
270 static CORE_ADDR
271 aarch64_frame_unmask_lr (aarch64_gdbarch_tdep *tdep,
272 const frame_info_ptr &this_frame, CORE_ADDR addr)
274 if (tdep->has_pauth ()
275 && frame_unwind_register_unsigned (this_frame,
276 tdep->ra_sign_state_regnum))
278 /* VA range select (bit 55) tells us whether to use the low half masks
279 or the high half masks. */
280 int cmask_num;
281 if (tdep->pauth_reg_count > 2 && addr & VA_RANGE_SELECT_BIT_MASK)
282 cmask_num = AARCH64_PAUTH_CMASK_HIGH_REGNUM (tdep->pauth_reg_base);
283 else
284 cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
286 /* By default, we assume TBI and discard the top 8 bits plus the VA range
287 select bit (55). */
288 CORE_ADDR mask = AARCH64_TOP_BITS_MASK;
289 mask |= frame_unwind_register_unsigned (this_frame, cmask_num);
290 addr = aarch64_remove_top_bits (addr, mask);
292 /* Record in the frame that the link register required unmasking. */
293 set_frame_previous_pc_masked (this_frame);
296 return addr;
299 /* Implement the "get_pc_address_flags" gdbarch method. */
301 static std::string
302 aarch64_get_pc_address_flags (const frame_info_ptr &frame, CORE_ADDR pc)
304 if (pc != 0 && get_frame_pc_masked (frame))
305 return "PAC";
307 return "";
310 /* Analyze a prologue, looking for a recognizable stack frame
311 and frame pointer. Scan until we encounter a store that could
312 clobber the stack frame unexpectedly, or an unknown instruction. */
314 static CORE_ADDR
315 aarch64_analyze_prologue (struct gdbarch *gdbarch,
316 CORE_ADDR start, CORE_ADDR limit,
317 struct aarch64_prologue_cache *cache,
318 abstract_instruction_reader& reader)
320 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
321 int i;
323 /* Whether the stack has been set. This should be true when we notice a SP
324 to FP move or if we are using the SP as the base register for storing
325 data, in case the FP is omitted. */
326 bool seen_stack_set = false;
328 /* Track X registers and D registers in prologue. */
329 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
331 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
332 regs[i] = pv_register (i, 0);
333 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
335 for (; start < limit; start += 4)
337 uint32_t insn;
338 aarch64_inst inst;
340 insn = reader.read (start, 4, byte_order_for_code);
342 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
343 break;
345 if (inst.opcode->iclass == addsub_imm
346 && (inst.opcode->op == OP_ADD
347 || strcmp ("sub", inst.opcode->name) == 0))
349 unsigned rd = inst.operands[0].reg.regno;
350 unsigned rn = inst.operands[1].reg.regno;
352 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
353 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
354 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
355 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
357 if (inst.opcode->op == OP_ADD)
359 regs[rd] = pv_add_constant (regs[rn],
360 inst.operands[2].imm.value);
362 else
364 regs[rd] = pv_add_constant (regs[rn],
365 -inst.operands[2].imm.value);
368 /* Did we move SP to FP? */
369 if (rn == AARCH64_SP_REGNUM && rd == AARCH64_FP_REGNUM)
370 seen_stack_set = true;
372 else if (inst.opcode->iclass == addsub_ext
373 && strcmp ("sub", inst.opcode->name) == 0)
375 unsigned rd = inst.operands[0].reg.regno;
376 unsigned rn = inst.operands[1].reg.regno;
377 unsigned rm = inst.operands[2].reg.regno;
379 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
380 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
381 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
382 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_EXT);
384 regs[rd] = pv_subtract (regs[rn], regs[rm]);
386 else if (inst.opcode->iclass == branch_imm)
388 /* Stop analysis on branch. */
389 break;
391 else if (inst.opcode->iclass == condbranch)
393 /* Stop analysis on branch. */
394 break;
396 else if (inst.opcode->iclass == branch_reg)
398 /* Stop analysis on branch. */
399 break;
401 else if (inst.opcode->iclass == compbranch)
403 /* Stop analysis on branch. */
404 break;
406 else if (inst.opcode->op == OP_MOVZ)
408 unsigned rd = inst.operands[0].reg.regno;
410 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
411 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
412 gdb_assert (inst.operands[1].type == AARCH64_OPND_HALF);
413 gdb_assert (inst.operands[1].shifter.kind == AARCH64_MOD_LSL);
415 /* If this shows up before we set the stack, keep going. Otherwise
416 stop the analysis. */
417 if (seen_stack_set)
418 break;
420 regs[rd] = pv_constant (inst.operands[1].imm.value
421 << inst.operands[1].shifter.amount);
423 else if (inst.opcode->iclass == log_shift
424 && strcmp (inst.opcode->name, "orr") == 0)
426 unsigned rd = inst.operands[0].reg.regno;
427 unsigned rn = inst.operands[1].reg.regno;
428 unsigned rm = inst.operands[2].reg.regno;
430 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
431 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
432 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
434 if (inst.operands[2].shifter.amount == 0
435 && rn == AARCH64_SP_REGNUM)
436 regs[rd] = regs[rm];
437 else
439 aarch64_debug_printf ("prologue analysis gave up "
440 "addr=%s opcode=0x%x (orr x register)",
441 core_addr_to_string_nz (start), insn);
443 break;
446 else if (inst.opcode->op == OP_STUR)
448 unsigned rt = inst.operands[0].reg.regno;
449 unsigned rn = inst.operands[1].addr.base_regno;
450 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
452 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
453 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
454 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
455 gdb_assert (!inst.operands[1].addr.offset.is_reg);
457 stack.store
458 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
459 size, regs[rt]);
461 /* Are we storing with SP as a base? */
462 if (rn == AARCH64_SP_REGNUM)
463 seen_stack_set = true;
465 else if ((inst.opcode->iclass == ldstpair_off
466 || (inst.opcode->iclass == ldstpair_indexed
467 && inst.operands[2].addr.preind))
468 && strcmp ("stp", inst.opcode->name) == 0)
470 /* STP with addressing mode Pre-indexed and Base register. */
471 unsigned rt1;
472 unsigned rt2;
473 unsigned rn = inst.operands[2].addr.base_regno;
474 int32_t imm = inst.operands[2].addr.offset.imm;
475 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
477 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
478 || inst.operands[0].type == AARCH64_OPND_Ft);
479 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
480 || inst.operands[1].type == AARCH64_OPND_Ft2);
481 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
482 gdb_assert (!inst.operands[2].addr.offset.is_reg);
484 /* If recording this store would invalidate the store area
485 (perhaps because rn is not known) then we should abandon
486 further prologue analysis. */
487 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
488 break;
490 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
491 break;
493 rt1 = inst.operands[0].reg.regno;
494 rt2 = inst.operands[1].reg.regno;
495 if (inst.operands[0].type == AARCH64_OPND_Ft)
497 rt1 += AARCH64_X_REGISTER_COUNT;
498 rt2 += AARCH64_X_REGISTER_COUNT;
501 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
502 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
504 if (inst.operands[2].addr.writeback)
505 regs[rn] = pv_add_constant (regs[rn], imm);
507 /* Ignore the instruction that allocates stack space and sets
508 the SP. */
509 if (rn == AARCH64_SP_REGNUM && !inst.operands[2].addr.writeback)
510 seen_stack_set = true;
512 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
513 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
514 && (inst.opcode->op == OP_STR_POS
515 || inst.opcode->op == OP_STRF_POS)))
516 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
517 && strcmp ("str", inst.opcode->name) == 0)
519 /* STR (immediate) */
520 unsigned int rt = inst.operands[0].reg.regno;
521 int32_t imm = inst.operands[1].addr.offset.imm;
522 unsigned int rn = inst.operands[1].addr.base_regno;
523 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
524 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
525 || inst.operands[0].type == AARCH64_OPND_Ft);
527 if (inst.operands[0].type == AARCH64_OPND_Ft)
528 rt += AARCH64_X_REGISTER_COUNT;
530 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
531 if (inst.operands[1].addr.writeback)
532 regs[rn] = pv_add_constant (regs[rn], imm);
534 /* Are we storing with SP as a base? */
535 if (rn == AARCH64_SP_REGNUM)
536 seen_stack_set = true;
538 else if (inst.opcode->iclass == testbranch)
540 /* Stop analysis on branch. */
541 break;
543 else if (inst.opcode->iclass == ic_system)
545 aarch64_gdbarch_tdep *tdep
546 = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
547 int ra_state_val = 0;
549 if (insn == 0xd503233f /* paciasp. */
550 || insn == 0xd503237f /* pacibsp. */)
552 /* Return addresses are mangled. */
553 ra_state_val = 1;
555 else if (insn == 0xd50323bf /* autiasp. */
556 || insn == 0xd50323ff /* autibsp. */)
558 /* Return addresses are not mangled. */
559 ra_state_val = 0;
561 else if (IS_BTI (insn))
562 /* We don't need to do anything special for a BTI instruction. */
563 continue;
564 else
566 aarch64_debug_printf ("prologue analysis gave up addr=%s"
567 " opcode=0x%x (iclass)",
568 core_addr_to_string_nz (start), insn);
569 break;
572 if (tdep->has_pauth () && cache != nullptr)
574 int regnum = tdep->ra_sign_state_regnum;
575 cache->saved_regs[regnum].set_value (ra_state_val);
578 else
580 aarch64_debug_printf ("prologue analysis gave up addr=%s"
581 " opcode=0x%x",
582 core_addr_to_string_nz (start), insn);
584 break;
588 if (cache == NULL)
589 return start;
591 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
593 /* Frame pointer is fp. Frame size is constant. */
594 cache->framereg = AARCH64_FP_REGNUM;
595 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
597 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
599 /* Try the stack pointer. */
600 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
601 cache->framereg = AARCH64_SP_REGNUM;
603 else
605 /* We're just out of luck. We don't know where the frame is. */
606 cache->framereg = -1;
607 cache->framesize = 0;
610 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
612 CORE_ADDR offset;
614 if (stack.find_reg (gdbarch, i, &offset))
615 cache->saved_regs[i].set_addr (offset);
618 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
620 int regnum = gdbarch_num_regs (gdbarch);
621 CORE_ADDR offset;
623 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
624 &offset))
625 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].set_addr (offset);
628 return start;
631 static CORE_ADDR
632 aarch64_analyze_prologue (struct gdbarch *gdbarch,
633 CORE_ADDR start, CORE_ADDR limit,
634 struct aarch64_prologue_cache *cache)
636 instruction_reader reader;
638 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
639 reader);
642 #if GDB_SELF_TEST
644 namespace selftests {
646 /* Instruction reader from manually cooked instruction sequences. */
648 class instruction_reader_test : public abstract_instruction_reader
650 public:
651 template<size_t SIZE>
652 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
653 : m_insns (insns), m_insns_size (SIZE)
656 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
657 override
659 SELF_CHECK (len == 4);
660 SELF_CHECK (memaddr % 4 == 0);
661 SELF_CHECK (memaddr / 4 < m_insns_size);
663 return m_insns[memaddr / 4];
666 private:
667 const uint32_t *m_insns;
668 size_t m_insns_size;
671 static void
672 aarch64_analyze_prologue_test (void)
674 struct gdbarch_info info;
676 info.bfd_arch_info = bfd_scan_arch ("aarch64");
678 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
679 SELF_CHECK (gdbarch != NULL);
681 struct aarch64_prologue_cache cache;
682 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
684 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
686 /* Test the simple prologue in which frame pointer is used. */
688 static const uint32_t insns[] = {
689 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
690 0x910003fd, /* mov x29, sp */
691 0x97ffffe6, /* bl 0x400580 */
693 instruction_reader_test reader (insns);
695 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
696 SELF_CHECK (end == 4 * 2);
698 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
699 SELF_CHECK (cache.framesize == 272);
701 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
703 if (i == AARCH64_FP_REGNUM)
704 SELF_CHECK (cache.saved_regs[i].addr () == -272);
705 else if (i == AARCH64_LR_REGNUM)
706 SELF_CHECK (cache.saved_regs[i].addr () == -264);
707 else
708 SELF_CHECK (cache.saved_regs[i].is_realreg ()
709 && cache.saved_regs[i].realreg () == i);
712 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
714 int num_regs = gdbarch_num_regs (gdbarch);
715 int regnum = i + num_regs + AARCH64_D0_REGNUM;
717 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
718 && cache.saved_regs[regnum].realreg () == regnum);
722 /* Test a prologue in which STR is used and frame pointer is not
723 used. */
725 static const uint32_t insns[] = {
726 0xf81d0ff3, /* str x19, [sp, #-48]! */
727 0xb9002fe0, /* str w0, [sp, #44] */
728 0xf90013e1, /* str x1, [sp, #32]*/
729 0xfd000fe0, /* str d0, [sp, #24] */
730 0xaa0203f3, /* mov x19, x2 */
731 0xf94013e0, /* ldr x0, [sp, #32] */
733 instruction_reader_test reader (insns);
735 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
736 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
738 SELF_CHECK (end == 4 * 5);
740 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
741 SELF_CHECK (cache.framesize == 48);
743 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
745 if (i == 1)
746 SELF_CHECK (cache.saved_regs[i].addr () == -16);
747 else if (i == 19)
748 SELF_CHECK (cache.saved_regs[i].addr () == -48);
749 else
750 SELF_CHECK (cache.saved_regs[i].is_realreg ()
751 && cache.saved_regs[i].realreg () == i);
754 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
756 int num_regs = gdbarch_num_regs (gdbarch);
757 int regnum = i + num_regs + AARCH64_D0_REGNUM;
760 if (i == 0)
761 SELF_CHECK (cache.saved_regs[regnum].addr () == -24);
762 else
763 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
764 && cache.saved_regs[regnum].realreg () == regnum);
768 /* Test handling of movz before setting the frame pointer. */
770 static const uint32_t insns[] = {
771 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
772 0x52800020, /* mov w0, #0x1 */
773 0x910003fd, /* mov x29, sp */
774 0x528000a2, /* mov w2, #0x5 */
775 0x97fffff8, /* bl 6e4 */
778 instruction_reader_test reader (insns);
780 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
781 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
783 /* We should stop at the 4th instruction. */
784 SELF_CHECK (end == (4 - 1) * 4);
785 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
786 SELF_CHECK (cache.framesize == 16);
789 /* Test handling of movz/stp when using the stack pointer as frame
790 pointer. */
792 static const uint32_t insns[] = {
793 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
794 0x52800020, /* mov w0, #0x1 */
795 0x290207e0, /* stp w0, w1, [sp, #16] */
796 0xa9018fe2, /* stp x2, x3, [sp, #24] */
797 0x528000a2, /* mov w2, #0x5 */
798 0x97fffff8, /* bl 6e4 */
801 instruction_reader_test reader (insns);
803 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
804 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
806 /* We should stop at the 5th instruction. */
807 SELF_CHECK (end == (5 - 1) * 4);
808 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
809 SELF_CHECK (cache.framesize == 64);
812 /* Test handling of movz/str when using the stack pointer as frame
813 pointer */
815 static const uint32_t insns[] = {
816 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
817 0x52800020, /* mov w0, #0x1 */
818 0xb9002be4, /* str w4, [sp, #40] */
819 0xf9001be5, /* str x5, [sp, #48] */
820 0x528000a2, /* mov w2, #0x5 */
821 0x97fffff8, /* bl 6e4 */
824 instruction_reader_test reader (insns);
826 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
827 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
829 /* We should stop at the 5th instruction. */
830 SELF_CHECK (end == (5 - 1) * 4);
831 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
832 SELF_CHECK (cache.framesize == 64);
835 /* Test handling of movz/stur when using the stack pointer as frame
836 pointer. */
838 static const uint32_t insns[] = {
839 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
840 0x52800020, /* mov w0, #0x1 */
841 0xb80343e6, /* stur w6, [sp, #52] */
842 0xf80383e7, /* stur x7, [sp, #56] */
843 0x528000a2, /* mov w2, #0x5 */
844 0x97fffff8, /* bl 6e4 */
847 instruction_reader_test reader (insns);
849 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
850 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
852 /* We should stop at the 5th instruction. */
853 SELF_CHECK (end == (5 - 1) * 4);
854 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
855 SELF_CHECK (cache.framesize == 64);
858 /* Test handling of movz when there is no frame pointer set or no stack
859 pointer used. */
861 static const uint32_t insns[] = {
862 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
863 0x52800020, /* mov w0, #0x1 */
864 0x528000a2, /* mov w2, #0x5 */
865 0x97fffff8, /* bl 6e4 */
868 instruction_reader_test reader (insns);
870 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
871 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
873 /* We should stop at the 4th instruction. */
874 SELF_CHECK (end == (4 - 1) * 4);
875 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
876 SELF_CHECK (cache.framesize == 16);
879 /* Test a prologue in which there is a return address signing instruction. */
880 if (tdep->has_pauth ())
882 static const uint32_t insns[] = {
883 0xd503233f, /* paciasp */
884 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
885 0x910003fd, /* mov x29, sp */
886 0xf801c3f3, /* str x19, [sp, #28] */
887 0xb9401fa0, /* ldr x19, [x29, #28] */
889 instruction_reader_test reader (insns);
891 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
892 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
893 reader);
895 SELF_CHECK (end == 4 * 4);
896 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
897 SELF_CHECK (cache.framesize == 48);
899 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
901 if (i == 19)
902 SELF_CHECK (cache.saved_regs[i].addr () == -20);
903 else if (i == AARCH64_FP_REGNUM)
904 SELF_CHECK (cache.saved_regs[i].addr () == -48);
905 else if (i == AARCH64_LR_REGNUM)
906 SELF_CHECK (cache.saved_regs[i].addr () == -40);
907 else
908 SELF_CHECK (cache.saved_regs[i].is_realreg ()
909 && cache.saved_regs[i].realreg () == i);
912 if (tdep->has_pauth ())
914 int regnum = tdep->ra_sign_state_regnum;
915 SELF_CHECK (cache.saved_regs[regnum].is_value ());
919 /* Test a prologue with a BTI instruction. */
921 static const uint32_t insns[] = {
922 0xd503245f, /* bti */
923 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
924 0x910003fd, /* mov x29, sp */
925 0xf801c3f3, /* str x19, [sp, #28] */
926 0xb9401fa0, /* ldr x19, [x29, #28] */
928 instruction_reader_test reader (insns);
930 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
931 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
932 reader);
934 SELF_CHECK (end == 4 * 4);
935 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
936 SELF_CHECK (cache.framesize == 48);
938 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
940 if (i == 19)
941 SELF_CHECK (cache.saved_regs[i].addr () == -20);
942 else if (i == AARCH64_FP_REGNUM)
943 SELF_CHECK (cache.saved_regs[i].addr () == -48);
944 else if (i == AARCH64_LR_REGNUM)
945 SELF_CHECK (cache.saved_regs[i].addr () == -40);
946 else
947 SELF_CHECK (cache.saved_regs[i].is_realreg ()
948 && cache.saved_regs[i].realreg () == i);
952 } // namespace selftests
953 #endif /* GDB_SELF_TEST */
955 /* Implement the "skip_prologue" gdbarch method. */
957 static CORE_ADDR
958 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
960 CORE_ADDR func_addr, func_end_addr, limit_pc;
962 /* See if we can determine the end of the prologue via the symbol
963 table. If so, then return either PC, or the PC after the
964 prologue, whichever is greater. */
965 bool func_addr_found
966 = find_pc_partial_function (pc, NULL, &func_addr, &func_end_addr);
968 if (func_addr_found)
970 CORE_ADDR post_prologue_pc
971 = skip_prologue_using_sal (gdbarch, func_addr);
973 if (post_prologue_pc != 0)
974 return std::max (pc, post_prologue_pc);
977 /* Can't determine prologue from the symbol table, need to examine
978 instructions. */
980 /* Find an upper limit on the function prologue using the debug
981 information. If the debug information could not be used to
982 provide that bound, then use an arbitrary large number as the
983 upper bound. */
984 limit_pc = skip_prologue_using_sal (gdbarch, pc);
985 if (limit_pc == 0)
986 limit_pc = pc + 128; /* Magic. */
988 limit_pc
989 = func_end_addr == 0 ? limit_pc : std::min (limit_pc, func_end_addr - 4);
991 /* Try disassembling prologue. */
992 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
995 /* Scan the function prologue for THIS_FRAME and populate the prologue
996 cache CACHE. */
998 static void
999 aarch64_scan_prologue (const frame_info_ptr &this_frame,
1000 struct aarch64_prologue_cache *cache)
1002 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1003 CORE_ADDR prologue_start;
1004 CORE_ADDR prologue_end;
1005 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1006 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1008 cache->prev_pc = prev_pc;
1010 /* Assume we do not find a frame. */
1011 cache->framereg = -1;
1012 cache->framesize = 0;
1014 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1015 &prologue_end))
1017 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1019 if (sal.line == 0)
1021 /* No line info so use the current PC. */
1022 prologue_end = prev_pc;
1024 else if (sal.end < prologue_end)
1026 /* The next line begins after the function end. */
1027 prologue_end = sal.end;
1030 prologue_end = std::min (prologue_end, prev_pc);
1031 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1033 else
1035 CORE_ADDR frame_loc;
1037 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
1038 if (frame_loc == 0)
1039 return;
1041 cache->framereg = AARCH64_FP_REGNUM;
1042 cache->framesize = 16;
1043 cache->saved_regs[29].set_addr (0);
1044 cache->saved_regs[30].set_addr (8);
1048 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
1049 function may throw an exception if the inferior's registers or memory is
1050 not available. */
1052 static void
1053 aarch64_make_prologue_cache_1 (const frame_info_ptr &this_frame,
1054 struct aarch64_prologue_cache *cache)
1056 CORE_ADDR unwound_fp;
1057 int reg;
1059 aarch64_scan_prologue (this_frame, cache);
1061 if (cache->framereg == -1)
1062 return;
1064 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
1065 if (unwound_fp == 0)
1066 return;
1068 cache->prev_sp = unwound_fp;
1069 if (!aarch64_stack_frame_destroyed_p (get_frame_arch (this_frame),
1070 cache->prev_pc))
1071 cache->prev_sp += cache->framesize;
1073 /* Calculate actual addresses of saved registers using offsets
1074 determined by aarch64_analyze_prologue. */
1075 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
1076 if (cache->saved_regs[reg].is_addr ())
1077 cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr ()
1078 + cache->prev_sp);
1080 cache->func = get_frame_func (this_frame);
1082 cache->available_p = 1;
1085 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1086 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1087 Return a pointer to the current aarch64_prologue_cache in
1088 *THIS_CACHE. */
1090 static struct aarch64_prologue_cache *
1091 aarch64_make_prologue_cache (const frame_info_ptr &this_frame, void **this_cache)
1093 struct aarch64_prologue_cache *cache;
1095 if (*this_cache != NULL)
1096 return (struct aarch64_prologue_cache *) *this_cache;
1098 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1099 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1100 *this_cache = cache;
1104 aarch64_make_prologue_cache_1 (this_frame, cache);
1106 catch (const gdb_exception_error &ex)
1108 if (ex.error != NOT_AVAILABLE_ERROR)
1109 throw;
1112 return cache;
1115 /* Implement the "stop_reason" frame_unwind method. */
1117 static enum unwind_stop_reason
1118 aarch64_prologue_frame_unwind_stop_reason (const frame_info_ptr &this_frame,
1119 void **this_cache)
1121 struct aarch64_prologue_cache *cache
1122 = aarch64_make_prologue_cache (this_frame, this_cache);
1124 if (!cache->available_p)
1125 return UNWIND_UNAVAILABLE;
1127 /* Halt the backtrace at "_start". */
1128 gdbarch *arch = get_frame_arch (this_frame);
1129 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch);
1130 if (cache->prev_pc <= tdep->lowest_pc)
1131 return UNWIND_OUTERMOST;
1133 /* We've hit a wall, stop. */
1134 if (cache->prev_sp == 0)
1135 return UNWIND_OUTERMOST;
1137 return UNWIND_NO_REASON;
1140 /* Our frame ID for a normal frame is the current function's starting
1141 PC and the caller's SP when we were called. */
1143 static void
1144 aarch64_prologue_this_id (const frame_info_ptr &this_frame,
1145 void **this_cache, struct frame_id *this_id)
1147 struct aarch64_prologue_cache *cache
1148 = aarch64_make_prologue_cache (this_frame, this_cache);
1150 if (!cache->available_p)
1151 *this_id = frame_id_build_unavailable_stack (cache->func);
1152 else
1153 *this_id = frame_id_build (cache->prev_sp, cache->func);
1156 /* Implement the "prev_register" frame_unwind method. */
1158 static struct value *
1159 aarch64_prologue_prev_register (const frame_info_ptr &this_frame,
1160 void **this_cache, int prev_regnum)
1162 struct aarch64_prologue_cache *cache
1163 = aarch64_make_prologue_cache (this_frame, this_cache);
1165 /* If we are asked to unwind the PC, then we need to return the LR
1166 instead. The prologue may save PC, but it will point into this
1167 frame's prologue, not the next frame's resume location. */
1168 if (prev_regnum == AARCH64_PC_REGNUM)
1170 CORE_ADDR lr;
1171 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1172 aarch64_gdbarch_tdep *tdep
1173 = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
1175 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1177 if (tdep->has_pauth ()
1178 && cache->saved_regs[tdep->ra_sign_state_regnum].is_value ())
1179 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1181 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1184 /* SP is generally not saved to the stack, but this frame is
1185 identified by the next frame's stack pointer at the time of the
1186 call. The value was already reconstructed into PREV_SP. */
1188 +----------+ ^
1189 | saved lr | |
1190 +->| saved fp |--+
1191 | | |
1192 | | | <- Previous SP
1193 | +----------+
1194 | | saved lr |
1195 +--| saved fp |<- FP
1197 | |<- SP
1198 +----------+ */
1199 if (prev_regnum == AARCH64_SP_REGNUM)
1200 return frame_unwind_got_constant (this_frame, prev_regnum,
1201 cache->prev_sp);
1203 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1204 prev_regnum);
1207 /* AArch64 prologue unwinder. */
1208 static frame_unwind aarch64_prologue_unwind =
1210 "aarch64 prologue",
1211 NORMAL_FRAME,
1212 aarch64_prologue_frame_unwind_stop_reason,
1213 aarch64_prologue_this_id,
1214 aarch64_prologue_prev_register,
1215 NULL,
1216 default_frame_sniffer
1219 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1220 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1221 Return a pointer to the current aarch64_prologue_cache in
1222 *THIS_CACHE. */
1224 static struct aarch64_prologue_cache *
1225 aarch64_make_stub_cache (const frame_info_ptr &this_frame, void **this_cache)
1227 struct aarch64_prologue_cache *cache;
1229 if (*this_cache != NULL)
1230 return (struct aarch64_prologue_cache *) *this_cache;
1232 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1233 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1234 *this_cache = cache;
1238 cache->prev_sp = get_frame_register_unsigned (this_frame,
1239 AARCH64_SP_REGNUM);
1240 cache->prev_pc = get_frame_pc (this_frame);
1241 cache->available_p = 1;
1243 catch (const gdb_exception_error &ex)
1245 if (ex.error != NOT_AVAILABLE_ERROR)
1246 throw;
1249 return cache;
1252 /* Implement the "stop_reason" frame_unwind method. */
1254 static enum unwind_stop_reason
1255 aarch64_stub_frame_unwind_stop_reason (const frame_info_ptr &this_frame,
1256 void **this_cache)
1258 struct aarch64_prologue_cache *cache
1259 = aarch64_make_stub_cache (this_frame, this_cache);
1261 if (!cache->available_p)
1262 return UNWIND_UNAVAILABLE;
1264 return UNWIND_NO_REASON;
1267 /* Our frame ID for a stub frame is the current SP and LR. */
1269 static void
1270 aarch64_stub_this_id (const frame_info_ptr &this_frame,
1271 void **this_cache, struct frame_id *this_id)
1273 struct aarch64_prologue_cache *cache
1274 = aarch64_make_stub_cache (this_frame, this_cache);
1276 if (cache->available_p)
1277 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1278 else
1279 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1282 /* Implement the "sniffer" frame_unwind method. */
1284 static int
1285 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1286 const frame_info_ptr &this_frame,
1287 void **this_prologue_cache)
1289 CORE_ADDR addr_in_block;
1290 gdb_byte dummy[4];
1292 addr_in_block = get_frame_address_in_block (this_frame);
1293 if (in_plt_section (addr_in_block)
1294 /* We also use the stub winder if the target memory is unreadable
1295 to avoid having the prologue unwinder trying to read it. */
1296 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1297 return 1;
1299 return 0;
1302 /* AArch64 stub unwinder. */
1303 static frame_unwind aarch64_stub_unwind =
1305 "aarch64 stub",
1306 NORMAL_FRAME,
1307 aarch64_stub_frame_unwind_stop_reason,
1308 aarch64_stub_this_id,
1309 aarch64_prologue_prev_register,
1310 NULL,
1311 aarch64_stub_unwind_sniffer
1314 /* Return the frame base address of *THIS_FRAME. */
1316 static CORE_ADDR
1317 aarch64_normal_frame_base (const frame_info_ptr &this_frame, void **this_cache)
1319 struct aarch64_prologue_cache *cache
1320 = aarch64_make_prologue_cache (this_frame, this_cache);
1322 return cache->prev_sp - cache->framesize;
1325 /* AArch64 default frame base information. */
1326 static frame_base aarch64_normal_base =
1328 &aarch64_prologue_unwind,
1329 aarch64_normal_frame_base,
1330 aarch64_normal_frame_base,
1331 aarch64_normal_frame_base
1334 /* Return the value of the REGNUM register in the previous frame of
1335 *THIS_FRAME. */
1337 static struct value *
1338 aarch64_dwarf2_prev_register (const frame_info_ptr &this_frame,
1339 void **this_cache, int regnum)
1341 gdbarch *arch = get_frame_arch (this_frame);
1342 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch);
1343 CORE_ADDR lr;
1345 switch (regnum)
1347 case AARCH64_PC_REGNUM:
1348 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1349 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1350 return frame_unwind_got_constant (this_frame, regnum, lr);
1352 default:
1353 internal_error (_("Unexpected register %d"), regnum);
1357 static const unsigned char op_lit0 = DW_OP_lit0;
1358 static const unsigned char op_lit1 = DW_OP_lit1;
1360 /* Implement the "init_reg" dwarf2_frame_ops method. */
1362 static void
1363 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1364 struct dwarf2_frame_state_reg *reg,
1365 const frame_info_ptr &this_frame)
1367 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
1369 switch (regnum)
1371 case AARCH64_PC_REGNUM:
1372 reg->how = DWARF2_FRAME_REG_FN;
1373 reg->loc.fn = aarch64_dwarf2_prev_register;
1374 return;
1376 case AARCH64_SP_REGNUM:
1377 reg->how = DWARF2_FRAME_REG_CFA;
1378 return;
1381 /* Init pauth registers. */
1382 if (tdep->has_pauth ())
1384 if (regnum == tdep->ra_sign_state_regnum)
1386 /* Initialize RA_STATE to zero. */
1387 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1388 reg->loc.exp.start = &op_lit0;
1389 reg->loc.exp.len = 1;
1390 return;
1392 else if (regnum >= tdep->pauth_reg_base
1393 && regnum < tdep->pauth_reg_base + tdep->pauth_reg_count)
1395 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1396 return;
1401 /* Implement the execute_dwarf_cfa_vendor_op method. */
1403 static bool
1404 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1405 struct dwarf2_frame_state *fs)
1407 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
1408 struct dwarf2_frame_state_reg *ra_state;
1410 if (op == DW_CFA_AARCH64_negate_ra_state)
1412 /* On systems without pauth, treat as a nop. */
1413 if (!tdep->has_pauth ())
1414 return true;
1416 /* Allocate RA_STATE column if it's not allocated yet. */
1417 fs->regs.alloc_regs (AARCH64_DWARF_RA_SIGN_STATE + 1);
1419 /* Toggle the status of RA_STATE between 0 and 1. */
1420 ra_state = &(fs->regs.reg[AARCH64_DWARF_RA_SIGN_STATE]);
1421 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1423 if (ra_state->loc.exp.start == nullptr
1424 || ra_state->loc.exp.start == &op_lit0)
1425 ra_state->loc.exp.start = &op_lit1;
1426 else
1427 ra_state->loc.exp.start = &op_lit0;
1429 ra_state->loc.exp.len = 1;
1431 return true;
1434 return false;
1437 /* Used for matching BRK instructions for AArch64. */
1438 static constexpr uint32_t BRK_INSN_MASK = 0xffe0001f;
1439 static constexpr uint32_t BRK_INSN_BASE = 0xd4200000;
1441 /* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */
1443 static bool
1444 aarch64_program_breakpoint_here_p (gdbarch *gdbarch, CORE_ADDR address)
1446 const uint32_t insn_len = 4;
1447 gdb_byte target_mem[4];
1449 /* Enable the automatic memory restoration from breakpoints while
1450 we read the memory. Otherwise we may find temporary breakpoints, ones
1451 inserted by GDB, and flag them as permanent breakpoints. */
1452 scoped_restore restore_memory
1453 = make_scoped_restore_show_memory_breakpoints (0);
1455 if (target_read_memory (address, target_mem, insn_len) == 0)
1457 uint32_t insn =
1458 (uint32_t) extract_unsigned_integer (target_mem, insn_len,
1459 gdbarch_byte_order_for_code (gdbarch));
1461 /* Check if INSN is a BRK instruction pattern. There are multiple choices
1462 of such instructions with different immediate values. Different OS'
1463 may use a different variation, but they have the same outcome. */
1464 return ((insn & BRK_INSN_MASK) == BRK_INSN_BASE);
1467 return false;
1470 /* When arguments must be pushed onto the stack, they go on in reverse
1471 order. The code below implements a FILO (stack) to do this. */
1473 struct stack_item_t
1475 /* Value to pass on stack. It can be NULL if this item is for stack
1476 padding. */
1477 const gdb_byte *data;
1479 /* Size in bytes of value to pass on stack. */
1480 int len;
1483 /* Implement the gdbarch type alignment method, overrides the generic
1484 alignment algorithm for anything that is aarch64 specific. */
1486 static ULONGEST
1487 aarch64_type_align (gdbarch *gdbarch, struct type *t)
1489 t = check_typedef (t);
1490 if (t->code () == TYPE_CODE_ARRAY && t->is_vector ())
1492 /* Use the natural alignment for vector types (the same for
1493 scalar type), but the maximum alignment is 128-bit. */
1494 if (t->length () > 16)
1495 return 16;
1496 else
1497 return t->length ();
1500 /* Allow the common code to calculate the alignment. */
1501 return 0;
1504 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1506 Return the number of register required, or -1 on failure.
1508 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1509 to the element, else fail if the type of this element does not match the
1510 existing value. */
1512 static int
1513 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1514 struct type **fundamental_type)
1516 if (type == nullptr)
1517 return -1;
1519 switch (type->code ())
1521 case TYPE_CODE_FLT:
1522 case TYPE_CODE_DECFLOAT:
1523 if (type->length () > 16)
1524 return -1;
1526 if (*fundamental_type == nullptr)
1527 *fundamental_type = type;
1528 else if (type->length () != (*fundamental_type)->length ()
1529 || type->code () != (*fundamental_type)->code ())
1530 return -1;
1532 return 1;
1534 case TYPE_CODE_COMPLEX:
1536 struct type *target_type = check_typedef (type->target_type ());
1537 if (target_type->length () > 16)
1538 return -1;
1540 if (*fundamental_type == nullptr)
1541 *fundamental_type = target_type;
1542 else if (target_type->length () != (*fundamental_type)->length ()
1543 || target_type->code () != (*fundamental_type)->code ())
1544 return -1;
1546 return 2;
1549 case TYPE_CODE_ARRAY:
1551 if (type->is_vector ())
1553 if (type->length () != 8 && type->length () != 16)
1554 return -1;
1556 if (*fundamental_type == nullptr)
1557 *fundamental_type = type;
1558 else if (type->length () != (*fundamental_type)->length ()
1559 || type->code () != (*fundamental_type)->code ())
1560 return -1;
1562 return 1;
1564 else
1566 struct type *target_type = type->target_type ();
1567 int count = aapcs_is_vfp_call_or_return_candidate_1
1568 (target_type, fundamental_type);
1570 if (count == -1)
1571 return count;
1573 count *= (type->length () / target_type->length ());
1574 return count;
1578 case TYPE_CODE_STRUCT:
1579 case TYPE_CODE_UNION:
1581 int count = 0;
1583 for (int i = 0; i < type->num_fields (); i++)
1585 /* Ignore any static fields. */
1586 if (type->field (i).is_static ())
1587 continue;
1589 struct type *member = check_typedef (type->field (i).type ());
1591 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1592 (member, fundamental_type);
1593 if (sub_count == -1)
1594 return -1;
1595 count += sub_count;
1598 /* Ensure there is no padding between the fields (allowing for empty
1599 zero length structs) */
1600 int ftype_length = (*fundamental_type == nullptr)
1601 ? 0 : (*fundamental_type)->length ();
1602 if (count * ftype_length != type->length ())
1603 return -1;
1605 return count;
1608 default:
1609 break;
1612 return -1;
1615 /* Return true if an argument, whose type is described by TYPE, can be passed or
1616 returned in simd/fp registers, providing enough parameter passing registers
1617 are available. This is as described in the AAPCS64.
1619 Upon successful return, *COUNT returns the number of needed registers,
1620 *FUNDAMENTAL_TYPE contains the type of those registers.
1622 Candidate as per the AAPCS64 5.4.2.C is either a:
1623 - float.
1624 - short-vector.
1625 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1626 all the members are floats and has at most 4 members.
1627 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1628 all the members are short vectors and has at most 4 members.
1629 - Complex (7.1.1)
1631 Note that HFAs and HVAs can include nested structures and arrays. */
1633 static bool
1634 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1635 struct type **fundamental_type)
1637 if (type == nullptr)
1638 return false;
1640 *fundamental_type = nullptr;
1642 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1643 fundamental_type);
1645 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1647 *count = ag_count;
1648 return true;
1650 else
1651 return false;
1654 /* AArch64 function call information structure. */
1655 struct aarch64_call_info
1657 /* the current argument number. */
1658 unsigned argnum = 0;
1660 /* The next general purpose register number, equivalent to NGRN as
1661 described in the AArch64 Procedure Call Standard. */
1662 unsigned ngrn = 0;
1664 /* The next SIMD and floating point register number, equivalent to
1665 NSRN as described in the AArch64 Procedure Call Standard. */
1666 unsigned nsrn = 0;
1668 /* The next stacked argument address, equivalent to NSAA as
1669 described in the AArch64 Procedure Call Standard. */
1670 unsigned nsaa = 0;
1672 /* Stack item vector. */
1673 std::vector<stack_item_t> si;
1676 /* Pass a value in a sequence of consecutive X registers. The caller
1677 is responsible for ensuring sufficient registers are available. */
1679 static void
1680 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1681 struct aarch64_call_info *info, struct type *type,
1682 struct value *arg)
1684 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1685 int len = type->length ();
1686 enum type_code typecode = type->code ();
1687 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1688 const bfd_byte *buf = arg->contents ().data ();
1690 info->argnum++;
1692 while (len > 0)
1694 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1695 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1696 byte_order);
1699 /* Adjust sub-word struct/union args when big-endian. */
1700 if (byte_order == BFD_ENDIAN_BIG
1701 && partial_len < X_REGISTER_SIZE
1702 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1703 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1705 aarch64_debug_printf ("arg %d in %s = 0x%s", info->argnum,
1706 gdbarch_register_name (gdbarch, regnum),
1707 phex (regval, X_REGISTER_SIZE));
1709 regcache_cooked_write_unsigned (regcache, regnum, regval);
1710 len -= partial_len;
1711 buf += partial_len;
1712 regnum++;
1716 /* Attempt to marshall a value in a V register. Return 1 if
1717 successful, or 0 if insufficient registers are available. This
1718 function, unlike the equivalent pass_in_x() function does not
1719 handle arguments spread across multiple registers. */
1721 static int
1722 pass_in_v (struct gdbarch *gdbarch,
1723 struct regcache *regcache,
1724 struct aarch64_call_info *info,
1725 int len, const bfd_byte *buf)
1727 if (info->nsrn < 8)
1729 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1730 /* Enough space for a full vector register. */
1731 gdb::byte_vector reg (register_size (gdbarch, regnum), 0);
1732 gdb_assert (len <= reg.size ());
1734 info->argnum++;
1735 info->nsrn++;
1737 /* PCS C.1, the argument is allocated to the least significant
1738 bits of V register. */
1739 memcpy (reg.data (), buf, len);
1740 regcache->cooked_write (regnum, reg);
1742 aarch64_debug_printf ("arg %d in %s", info->argnum,
1743 gdbarch_register_name (gdbarch, regnum));
1745 return 1;
1747 info->nsrn = 8;
1748 return 0;
1751 /* Marshall an argument onto the stack. */
1753 static void
1754 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1755 struct value *arg)
1757 const bfd_byte *buf = arg->contents ().data ();
1758 int len = type->length ();
1759 int align;
1760 stack_item_t item;
1762 info->argnum++;
1764 align = type_align (type);
1766 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1767 Natural alignment of the argument's type. */
1768 align = align_up (align, 8);
1770 /* The AArch64 PCS requires at most doubleword alignment. */
1771 if (align > 16)
1772 align = 16;
1774 aarch64_debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1775 info->nsaa);
1777 item.len = len;
1778 item.data = buf;
1779 info->si.push_back (item);
1781 info->nsaa += len;
1782 if (info->nsaa & (align - 1))
1784 /* Push stack alignment padding. */
1785 int pad = align - (info->nsaa & (align - 1));
1787 item.len = pad;
1788 item.data = NULL;
1790 info->si.push_back (item);
1791 info->nsaa += pad;
1795 /* Marshall an argument into a sequence of one or more consecutive X
1796 registers or, if insufficient X registers are available then onto
1797 the stack. */
1799 static void
1800 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1801 struct aarch64_call_info *info, struct type *type,
1802 struct value *arg)
1804 int len = type->length ();
1805 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1807 /* PCS C.13 - Pass in registers if we have enough spare */
1808 if (info->ngrn + nregs <= 8)
1810 pass_in_x (gdbarch, regcache, info, type, arg);
1811 info->ngrn += nregs;
1813 else
1815 info->ngrn = 8;
1816 pass_on_stack (info, type, arg);
1820 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1821 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1822 registers. A return value of false is an error state as the value will have
1823 been partially passed to the stack. */
1824 static bool
1825 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1826 struct aarch64_call_info *info, struct type *arg_type,
1827 struct value *arg)
1829 switch (arg_type->code ())
1831 case TYPE_CODE_FLT:
1832 case TYPE_CODE_DECFLOAT:
1833 return pass_in_v (gdbarch, regcache, info, arg_type->length (),
1834 arg->contents ().data ());
1835 break;
1837 case TYPE_CODE_COMPLEX:
1839 const bfd_byte *buf = arg->contents ().data ();
1840 struct type *target_type = check_typedef (arg_type->target_type ());
1842 if (!pass_in_v (gdbarch, regcache, info, target_type->length (),
1843 buf))
1844 return false;
1846 return pass_in_v (gdbarch, regcache, info, target_type->length (),
1847 buf + target_type->length ());
1850 case TYPE_CODE_ARRAY:
1851 if (arg_type->is_vector ())
1852 return pass_in_v (gdbarch, regcache, info, arg_type->length (),
1853 arg->contents ().data ());
1854 [[fallthrough]];
1856 case TYPE_CODE_STRUCT:
1857 case TYPE_CODE_UNION:
1858 for (int i = 0; i < arg_type->num_fields (); i++)
1860 /* Don't include static fields. */
1861 if (arg_type->field (i).is_static ())
1862 continue;
1864 struct value *field = arg->primitive_field (0, i, arg_type);
1865 struct type *field_type = check_typedef (field->type ());
1867 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1868 field))
1869 return false;
1871 return true;
1873 default:
1874 return false;
1878 /* Implement the "push_dummy_call" gdbarch method. */
1880 static CORE_ADDR
1881 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1882 struct regcache *regcache, CORE_ADDR bp_addr,
1883 int nargs,
1884 struct value **args, CORE_ADDR sp,
1885 function_call_return_method return_method,
1886 CORE_ADDR struct_addr)
1888 int argnum;
1889 struct aarch64_call_info info;
1891 /* We need to know what the type of the called function is in order
1892 to determine the number of named/anonymous arguments for the
1893 actual argument placement, and the return type in order to handle
1894 return value correctly.
1896 The generic code above us views the decision of return in memory
1897 or return in registers as a two stage processes. The language
1898 handler is consulted first and may decide to return in memory (eg
1899 class with copy constructor returned by value), this will cause
1900 the generic code to allocate space AND insert an initial leading
1901 argument.
1903 If the language code does not decide to pass in memory then the
1904 target code is consulted.
1906 If the language code decides to pass in memory we want to move
1907 the pointer inserted as the initial argument from the argument
1908 list and into X8, the conventional AArch64 struct return pointer
1909 register. */
1911 /* Set the return address. For the AArch64, the return breakpoint
1912 is always at BP_ADDR. */
1913 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1915 /* If we were given an initial argument for the return slot, lose it. */
1916 if (return_method == return_method_hidden_param)
1918 args++;
1919 nargs--;
1922 /* The struct_return pointer occupies X8. */
1923 if (return_method != return_method_normal)
1925 aarch64_debug_printf ("struct return in %s = 0x%s",
1926 gdbarch_register_name
1927 (gdbarch, AARCH64_STRUCT_RETURN_REGNUM),
1928 paddress (gdbarch, struct_addr));
1930 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1931 struct_addr);
1934 for (argnum = 0; argnum < nargs; argnum++)
1936 struct value *arg = args[argnum];
1937 struct type *arg_type, *fundamental_type;
1938 int len, elements;
1940 arg_type = check_typedef (arg->type ());
1941 len = arg_type->length ();
1943 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1944 if there are enough spare registers. */
1945 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1946 &fundamental_type))
1948 if (info.nsrn + elements <= 8)
1950 /* We know that we have sufficient registers available therefore
1951 this will never need to fallback to the stack. */
1952 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1953 arg))
1954 gdb_assert_not_reached ("Failed to push args");
1956 else
1958 info.nsrn = 8;
1959 pass_on_stack (&info, arg_type, arg);
1961 continue;
1964 switch (arg_type->code ())
1966 case TYPE_CODE_INT:
1967 case TYPE_CODE_BOOL:
1968 case TYPE_CODE_CHAR:
1969 case TYPE_CODE_RANGE:
1970 case TYPE_CODE_ENUM:
1971 if (len < 4 && !is_fixed_point_type (arg_type))
1973 /* Promote to 32 bit integer. */
1974 if (arg_type->is_unsigned ())
1975 arg_type = builtin_type (gdbarch)->builtin_uint32;
1976 else
1977 arg_type = builtin_type (gdbarch)->builtin_int32;
1978 arg = value_cast (arg_type, arg);
1980 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1981 break;
1983 case TYPE_CODE_STRUCT:
1984 case TYPE_CODE_ARRAY:
1985 case TYPE_CODE_UNION:
1986 if (len > 16)
1988 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1989 invisible reference. */
1991 /* Allocate aligned storage. */
1992 sp = align_down (sp - len, 16);
1994 /* Write the real data into the stack. */
1995 write_memory (sp, arg->contents ().data (), len);
1997 /* Construct the indirection. */
1998 arg_type = lookup_pointer_type (arg_type);
1999 arg = value_from_pointer (arg_type, sp);
2000 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
2002 else
2003 /* PCS C.15 / C.18 multiple values pass. */
2004 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
2005 break;
2007 default:
2008 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
2009 break;
2013 /* Make sure stack retains 16 byte alignment. */
2014 if (info.nsaa & 15)
2015 sp -= 16 - (info.nsaa & 15);
2017 while (!info.si.empty ())
2019 const stack_item_t &si = info.si.back ();
2021 sp -= si.len;
2022 if (si.data != NULL)
2023 write_memory (sp, si.data, si.len);
2024 info.si.pop_back ();
2027 /* Finally, update the SP register. */
2028 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
2030 return sp;
2033 /* Implement the "frame_align" gdbarch method. */
2035 static CORE_ADDR
2036 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2038 /* Align the stack to sixteen bytes. */
2039 return sp & ~(CORE_ADDR) 15;
2042 /* Return the type for an AdvSISD Q register. */
2044 static struct type *
2045 aarch64_vnq_type (struct gdbarch *gdbarch)
2047 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2049 if (tdep->vnq_type == NULL)
2051 struct type *t;
2052 struct type *elem;
2054 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2055 TYPE_CODE_UNION);
2057 elem = builtin_type (gdbarch)->builtin_uint128;
2058 append_composite_type_field (t, "u", elem);
2060 elem = builtin_type (gdbarch)->builtin_int128;
2061 append_composite_type_field (t, "s", elem);
2063 tdep->vnq_type = t;
2066 return tdep->vnq_type;
2069 /* Return the type for an AdvSISD D register. */
2071 static struct type *
2072 aarch64_vnd_type (struct gdbarch *gdbarch)
2074 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2076 if (tdep->vnd_type == NULL)
2078 struct type *t;
2079 struct type *elem;
2081 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2082 TYPE_CODE_UNION);
2084 elem = builtin_type (gdbarch)->builtin_double;
2085 append_composite_type_field (t, "f", elem);
2087 elem = builtin_type (gdbarch)->builtin_uint64;
2088 append_composite_type_field (t, "u", elem);
2090 elem = builtin_type (gdbarch)->builtin_int64;
2091 append_composite_type_field (t, "s", elem);
2093 tdep->vnd_type = t;
2096 return tdep->vnd_type;
2099 /* Return the type for an AdvSISD S register. */
2101 static struct type *
2102 aarch64_vns_type (struct gdbarch *gdbarch)
2104 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2106 if (tdep->vns_type == NULL)
2108 struct type *t;
2109 struct type *elem;
2111 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2112 TYPE_CODE_UNION);
2114 elem = builtin_type (gdbarch)->builtin_float;
2115 append_composite_type_field (t, "f", elem);
2117 elem = builtin_type (gdbarch)->builtin_uint32;
2118 append_composite_type_field (t, "u", elem);
2120 elem = builtin_type (gdbarch)->builtin_int32;
2121 append_composite_type_field (t, "s", elem);
2123 tdep->vns_type = t;
2126 return tdep->vns_type;
2129 /* Return the type for an AdvSISD H register. */
2131 static struct type *
2132 aarch64_vnh_type (struct gdbarch *gdbarch)
2134 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2136 if (tdep->vnh_type == NULL)
2138 struct type *t;
2139 struct type *elem;
2141 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2142 TYPE_CODE_UNION);
2144 elem = builtin_type (gdbarch)->builtin_bfloat16;
2145 append_composite_type_field (t, "bf", elem);
2147 elem = builtin_type (gdbarch)->builtin_half;
2148 append_composite_type_field (t, "f", elem);
2150 elem = builtin_type (gdbarch)->builtin_uint16;
2151 append_composite_type_field (t, "u", elem);
2153 elem = builtin_type (gdbarch)->builtin_int16;
2154 append_composite_type_field (t, "s", elem);
2156 tdep->vnh_type = t;
2159 return tdep->vnh_type;
2162 /* Return the type for an AdvSISD B register. */
2164 static struct type *
2165 aarch64_vnb_type (struct gdbarch *gdbarch)
2167 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2169 if (tdep->vnb_type == NULL)
2171 struct type *t;
2172 struct type *elem;
2174 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2175 TYPE_CODE_UNION);
2177 elem = builtin_type (gdbarch)->builtin_uint8;
2178 append_composite_type_field (t, "u", elem);
2180 elem = builtin_type (gdbarch)->builtin_int8;
2181 append_composite_type_field (t, "s", elem);
2183 tdep->vnb_type = t;
2186 return tdep->vnb_type;
2189 /* Return TRUE if REGNUM is a ZA tile slice pseudo-register number. Return
2190 FALSE otherwise. */
2192 static bool
2193 is_sme_tile_slice_pseudo_register (struct gdbarch *gdbarch, int regnum)
2195 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2197 gdb_assert (tdep->has_sme ());
2198 gdb_assert (tdep->sme_svq > 0);
2199 gdb_assert (tdep->sme_pseudo_base <= regnum);
2200 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
2202 if (tdep->sme_tile_slice_pseudo_base <= regnum
2203 && regnum < tdep->sme_tile_slice_pseudo_base
2204 + tdep->sme_tile_slice_pseudo_count)
2205 return true;
2207 return false;
2210 /* Given REGNUM, a ZA pseudo-register number, return, in ENCODING, the
2211 decoded fields that make up its name. */
2213 static void
2214 aarch64_za_decode_pseudos (struct gdbarch *gdbarch, int regnum,
2215 struct za_pseudo_encoding &encoding)
2217 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2219 gdb_assert (tdep->has_sme ());
2220 gdb_assert (tdep->sme_svq > 0);
2221 gdb_assert (tdep->sme_pseudo_base <= regnum);
2222 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
2224 if (is_sme_tile_slice_pseudo_register (gdbarch, regnum))
2226 /* Calculate the tile slice pseudo-register offset relative to the other
2227 tile slice pseudo-registers. */
2228 int offset = regnum - tdep->sme_tile_slice_pseudo_base;
2230 /* Fetch the qualifier. We can have 160 to 2560 possible tile slice
2231 pseudo-registers. Each qualifier (we have 5 of them: B, H, S, D
2232 and Q) covers 32 * svq pseudo-registers, so we divide the offset by
2233 that constant. */
2234 size_t qualifier = offset / (tdep->sme_svq * 32);
2235 encoding.qualifier_index = qualifier;
2237 /* Prepare to fetch the direction (d), tile number (t) and slice
2238 number (s). */
2239 int dts = offset % (tdep->sme_svq * 32);
2241 /* The direction is represented by the even/odd numbers. Even-numbered
2242 pseudo-registers are horizontal tile slices and odd-numbered
2243 pseudo-registers are vertical tile slices. */
2244 encoding.horizontal = !(dts & 1);
2246 /* Fetch the tile number. The tile number is closely related to the
2247 qualifier. B has 1 tile, H has 2 tiles, S has 4 tiles, D has 8 tiles
2248 and Q has 16 tiles. */
2249 encoding.tile_index = (dts >> 1) & ((1 << qualifier) - 1);
2251 /* Fetch the slice number. The slice number is closely related to the
2252 qualifier and the svl. */
2253 encoding.slice_index = dts >> (qualifier + 1);
2255 else
2257 /* Calculate the tile pseudo-register offset relative to the other
2258 tile pseudo-registers. */
2259 int offset = regnum - tdep->sme_tile_pseudo_base;
2261 encoding.qualifier_index = std::floor (std::log2 (offset + 1));
2262 /* Calculate the tile number. */
2263 encoding.tile_index = (offset + 1) - (1 << encoding.qualifier_index);
2264 /* Direction and slice index don't get used for tiles. Set them to
2265 0/false values. */
2266 encoding.slice_index = 0;
2267 encoding.horizontal = false;
2271 /* Return the type for a ZA tile slice pseudo-register based on ENCODING. */
2273 static struct type *
2274 aarch64_za_tile_slice_type (struct gdbarch *gdbarch,
2275 const struct za_pseudo_encoding &encoding)
2277 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2279 gdb_assert (tdep->has_sme ());
2280 gdb_assert (tdep->sme_svq > 0);
2282 if (tdep->sme_tile_slice_type_q == nullptr)
2284 /* Q tile slice type. */
2285 tdep->sme_tile_slice_type_q
2286 = init_vector_type (builtin_type (gdbarch)->builtin_uint128,
2287 tdep->sme_svq);
2288 /* D tile slice type. */
2289 tdep->sme_tile_slice_type_d
2290 = init_vector_type (builtin_type (gdbarch)->builtin_uint64,
2291 tdep->sme_svq * 2);
2292 /* S tile slice type. */
2293 tdep->sme_tile_slice_type_s
2294 = init_vector_type (builtin_type (gdbarch)->builtin_uint32,
2295 tdep->sme_svq * 4);
2296 /* H tile slice type. */
2297 tdep->sme_tile_slice_type_h
2298 = init_vector_type (builtin_type (gdbarch)->builtin_uint16,
2299 tdep->sme_svq * 8);
2300 /* B tile slice type. */
2301 tdep->sme_tile_slice_type_b
2302 = init_vector_type (builtin_type (gdbarch)->builtin_uint8,
2303 tdep->sme_svq * 16);
2306 switch (encoding.qualifier_index)
2308 case 4:
2309 return tdep->sme_tile_slice_type_q;
2310 case 3:
2311 return tdep->sme_tile_slice_type_d;
2312 case 2:
2313 return tdep->sme_tile_slice_type_s;
2314 case 1:
2315 return tdep->sme_tile_slice_type_h;
2316 case 0:
2317 return tdep->sme_tile_slice_type_b;
2318 default:
2319 error (_("Invalid qualifier index %s for tile slice pseudo register."),
2320 pulongest (encoding.qualifier_index));
2323 gdb_assert_not_reached ("Unknown qualifier for ZA tile slice register");
2326 /* Return the type for a ZA tile pseudo-register based on ENCODING. */
2328 static struct type *
2329 aarch64_za_tile_type (struct gdbarch *gdbarch,
2330 const struct za_pseudo_encoding &encoding)
2332 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2334 gdb_assert (tdep->has_sme ());
2335 gdb_assert (tdep->sme_svq > 0);
2337 if (tdep->sme_tile_type_q == nullptr)
2339 struct type *inner_vectors_type;
2341 /* Q tile type. */
2342 inner_vectors_type
2343 = init_vector_type (builtin_type (gdbarch)->builtin_uint128,
2344 tdep->sme_svq);
2345 tdep->sme_tile_type_q
2346 = init_vector_type (inner_vectors_type, tdep->sme_svq);
2348 /* D tile type. */
2349 inner_vectors_type
2350 = init_vector_type (builtin_type (gdbarch)->builtin_uint64,
2351 tdep->sme_svq * 2);
2352 tdep->sme_tile_type_d
2353 = init_vector_type (inner_vectors_type, tdep->sme_svq * 2);
2355 /* S tile type. */
2356 inner_vectors_type
2357 = init_vector_type (builtin_type (gdbarch)->builtin_uint32,
2358 tdep->sme_svq * 4);
2359 tdep->sme_tile_type_s
2360 = init_vector_type (inner_vectors_type, tdep->sme_svq * 4);
2362 /* H tile type. */
2363 inner_vectors_type
2364 = init_vector_type (builtin_type (gdbarch)->builtin_uint16,
2365 tdep->sme_svq * 8);
2366 tdep->sme_tile_type_h
2367 = init_vector_type (inner_vectors_type, tdep->sme_svq * 8);
2369 /* B tile type. */
2370 inner_vectors_type
2371 = init_vector_type (builtin_type (gdbarch)->builtin_uint8,
2372 tdep->sme_svq * 16);
2373 tdep->sme_tile_type_b
2374 = init_vector_type (inner_vectors_type, tdep->sme_svq * 16);
2377 switch (encoding.qualifier_index)
2379 case 4:
2380 return tdep->sme_tile_type_q;
2381 case 3:
2382 return tdep->sme_tile_type_d;
2383 case 2:
2384 return tdep->sme_tile_type_s;
2385 case 1:
2386 return tdep->sme_tile_type_h;
2387 case 0:
2388 return tdep->sme_tile_type_b;
2389 default:
2390 error (_("Invalid qualifier index %s for ZA tile pseudo register."),
2391 pulongest (encoding.qualifier_index));
2394 gdb_assert_not_reached ("unknown qualifier for tile pseudo-register");
2397 /* Return the type for an AdvSISD V register. */
2399 static struct type *
2400 aarch64_vnv_type (struct gdbarch *gdbarch)
2402 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2404 if (tdep->vnv_type == NULL)
2406 /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
2407 slice from the non-pseudo vector registers. However NEON V registers
2408 are always vector registers, and need constructing as such. */
2409 const struct builtin_type *bt = builtin_type (gdbarch);
2411 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
2412 TYPE_CODE_UNION);
2414 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2415 TYPE_CODE_UNION);
2416 append_composite_type_field (sub, "f",
2417 init_vector_type (bt->builtin_double, 2));
2418 append_composite_type_field (sub, "u",
2419 init_vector_type (bt->builtin_uint64, 2));
2420 append_composite_type_field (sub, "s",
2421 init_vector_type (bt->builtin_int64, 2));
2422 append_composite_type_field (t, "d", sub);
2424 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2425 TYPE_CODE_UNION);
2426 append_composite_type_field (sub, "f",
2427 init_vector_type (bt->builtin_float, 4));
2428 append_composite_type_field (sub, "u",
2429 init_vector_type (bt->builtin_uint32, 4));
2430 append_composite_type_field (sub, "s",
2431 init_vector_type (bt->builtin_int32, 4));
2432 append_composite_type_field (t, "s", sub);
2434 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2435 TYPE_CODE_UNION);
2436 append_composite_type_field (sub, "bf",
2437 init_vector_type (bt->builtin_bfloat16, 8));
2438 append_composite_type_field (sub, "f",
2439 init_vector_type (bt->builtin_half, 8));
2440 append_composite_type_field (sub, "u",
2441 init_vector_type (bt->builtin_uint16, 8));
2442 append_composite_type_field (sub, "s",
2443 init_vector_type (bt->builtin_int16, 8));
2444 append_composite_type_field (t, "h", sub);
2446 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2447 TYPE_CODE_UNION);
2448 append_composite_type_field (sub, "u",
2449 init_vector_type (bt->builtin_uint8, 16));
2450 append_composite_type_field (sub, "s",
2451 init_vector_type (bt->builtin_int8, 16));
2452 append_composite_type_field (t, "b", sub);
2454 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2455 TYPE_CODE_UNION);
2456 append_composite_type_field (sub, "u",
2457 init_vector_type (bt->builtin_uint128, 1));
2458 append_composite_type_field (sub, "s",
2459 init_vector_type (bt->builtin_int128, 1));
2460 append_composite_type_field (t, "q", sub);
2462 tdep->vnv_type = t;
2465 return tdep->vnv_type;
2468 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2470 static int
2471 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2473 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2475 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2476 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2478 if (reg == AARCH64_DWARF_SP)
2479 return AARCH64_SP_REGNUM;
2481 if (reg == AARCH64_DWARF_PC)
2482 return AARCH64_PC_REGNUM;
2484 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2485 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2487 if (reg == AARCH64_DWARF_SVE_VG)
2488 return AARCH64_SVE_VG_REGNUM;
2490 if (reg == AARCH64_DWARF_SVE_FFR)
2491 return AARCH64_SVE_FFR_REGNUM;
2493 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2494 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2496 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2497 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2499 if (tdep->has_pauth ())
2501 if (reg == AARCH64_DWARF_RA_SIGN_STATE)
2502 return tdep->ra_sign_state_regnum;
2505 return -1;
2508 /* Implement the "print_insn" gdbarch method. */
2510 static int
2511 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2513 info->symbols = NULL;
2514 return default_print_insn (memaddr, info);
2517 /* AArch64 BRK software debug mode instruction.
2518 Note that AArch64 code is always little-endian.
2519 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2520 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2522 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2524 /* Extract from an array REGS containing the (raw) register state a
2525 function return value of type TYPE, and copy that, in virtual
2526 format, into VALBUF. */
2528 static void
2529 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2530 gdb_byte *valbuf)
2532 struct gdbarch *gdbarch = regs->arch ();
2533 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2534 int elements;
2535 struct type *fundamental_type;
2537 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2538 &fundamental_type))
2540 int len = fundamental_type->length ();
2542 for (int i = 0; i < elements; i++)
2544 int regno = AARCH64_V0_REGNUM + i;
2545 /* Enough space for a full vector register. */
2546 gdb::byte_vector buf (register_size (gdbarch, regno));
2547 gdb_assert (len <= buf.size ());
2549 aarch64_debug_printf
2550 ("read HFA or HVA return value element %d from %s",
2551 i + 1, gdbarch_register_name (gdbarch, regno));
2553 regs->cooked_read (regno, buf);
2555 memcpy (valbuf, buf.data (), len);
2556 valbuf += len;
2559 else if (type->code () == TYPE_CODE_INT
2560 || type->code () == TYPE_CODE_CHAR
2561 || type->code () == TYPE_CODE_BOOL
2562 || type->code () == TYPE_CODE_PTR
2563 || TYPE_IS_REFERENCE (type)
2564 || type->code () == TYPE_CODE_ENUM)
2566 /* If the type is a plain integer, then the access is
2567 straight-forward. Otherwise we have to play around a bit
2568 more. */
2569 int len = type->length ();
2570 int regno = AARCH64_X0_REGNUM;
2571 ULONGEST tmp;
2573 while (len > 0)
2575 /* By using store_unsigned_integer we avoid having to do
2576 anything special for small big-endian values. */
2577 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2578 store_unsigned_integer (valbuf,
2579 (len > X_REGISTER_SIZE
2580 ? X_REGISTER_SIZE : len), byte_order, tmp);
2581 len -= X_REGISTER_SIZE;
2582 valbuf += X_REGISTER_SIZE;
2585 else
2587 /* For a structure or union the behaviour is as if the value had
2588 been stored to word-aligned memory and then loaded into
2589 registers with 64-bit load instruction(s). */
2590 int len = type->length ();
2591 int regno = AARCH64_X0_REGNUM;
2592 bfd_byte buf[X_REGISTER_SIZE];
2594 while (len > 0)
2596 regs->cooked_read (regno++, buf);
2597 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2598 len -= X_REGISTER_SIZE;
2599 valbuf += X_REGISTER_SIZE;
2605 /* Will a function return an aggregate type in memory or in a
2606 register? Return 0 if an aggregate type can be returned in a
2607 register, 1 if it must be returned in memory. */
2609 static int
2610 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2612 type = check_typedef (type);
2613 int elements;
2614 struct type *fundamental_type;
2616 if (TYPE_HAS_DYNAMIC_LENGTH (type))
2617 return 1;
2619 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2620 &fundamental_type))
2622 /* v0-v7 are used to return values and one register is allocated
2623 for one member. However, HFA or HVA has at most four members. */
2624 return 0;
2627 if (type->length () > 16
2628 || !language_pass_by_reference (type).trivially_copyable)
2630 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2631 invisible reference. */
2633 return 1;
2636 return 0;
2639 /* Write into appropriate registers a function return value of type
2640 TYPE, given in virtual format. */
2642 static void
2643 aarch64_store_return_value (struct type *type, struct regcache *regs,
2644 const gdb_byte *valbuf)
2646 struct gdbarch *gdbarch = regs->arch ();
2647 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2648 int elements;
2649 struct type *fundamental_type;
2651 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2652 &fundamental_type))
2654 int len = fundamental_type->length ();
2656 for (int i = 0; i < elements; i++)
2658 int regno = AARCH64_V0_REGNUM + i;
2659 /* Enough space for a full vector register. */
2660 gdb::byte_vector tmpbuf (register_size (gdbarch, regno));
2661 gdb_assert (len <= tmpbuf.size ());
2663 aarch64_debug_printf
2664 ("write HFA or HVA return value element %d to %s",
2665 i + 1, gdbarch_register_name (gdbarch, regno));
2667 /* Depending on whether the target supports SVE or not, the V
2668 registers may report a size > 16 bytes. In that case, read the
2669 original contents of the register before overriding it with a new
2670 value that has a potential size <= 16 bytes. */
2671 regs->cooked_read (regno, tmpbuf);
2672 memcpy (tmpbuf.data (), valbuf,
2673 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2674 regs->cooked_write (regno, tmpbuf);
2675 valbuf += len;
2678 else if (type->code () == TYPE_CODE_INT
2679 || type->code () == TYPE_CODE_CHAR
2680 || type->code () == TYPE_CODE_BOOL
2681 || type->code () == TYPE_CODE_PTR
2682 || TYPE_IS_REFERENCE (type)
2683 || type->code () == TYPE_CODE_ENUM)
2685 if (type->length () <= X_REGISTER_SIZE)
2687 /* Values of one word or less are zero/sign-extended and
2688 returned in r0. */
2689 bfd_byte tmpbuf[X_REGISTER_SIZE];
2690 LONGEST val = unpack_long (type, valbuf);
2692 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2693 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2695 else
2697 /* Integral values greater than one word are stored in
2698 consecutive registers starting with r0. This will always
2699 be a multiple of the regiser size. */
2700 int len = type->length ();
2701 int regno = AARCH64_X0_REGNUM;
2703 while (len > 0)
2705 regs->cooked_write (regno++, valbuf);
2706 len -= X_REGISTER_SIZE;
2707 valbuf += X_REGISTER_SIZE;
2711 else
2713 /* For a structure or union the behaviour is as if the value had
2714 been stored to word-aligned memory and then loaded into
2715 registers with 64-bit load instruction(s). */
2716 int len = type->length ();
2717 int regno = AARCH64_X0_REGNUM;
2718 bfd_byte tmpbuf[X_REGISTER_SIZE];
2720 while (len > 0)
2722 memcpy (tmpbuf, valbuf,
2723 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2724 regs->cooked_write (regno++, tmpbuf);
2725 len -= X_REGISTER_SIZE;
2726 valbuf += X_REGISTER_SIZE;
2731 /* Implement the "return_value" gdbarch method. */
2733 static enum return_value_convention
2734 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2735 struct type *valtype, struct regcache *regcache,
2736 struct value **read_value, const gdb_byte *writebuf)
2738 if (valtype->code () == TYPE_CODE_STRUCT
2739 || valtype->code () == TYPE_CODE_UNION
2740 || valtype->code () == TYPE_CODE_ARRAY)
2742 if (aarch64_return_in_memory (gdbarch, valtype))
2744 /* From the AAPCS64's Result Return section:
2746 "Otherwise, the caller shall reserve a block of memory of
2747 sufficient size and alignment to hold the result. The address
2748 of the memory block shall be passed as an additional argument to
2749 the function in x8. */
2751 aarch64_debug_printf ("return value in memory");
2753 if (read_value != nullptr)
2755 CORE_ADDR addr;
2757 regcache->cooked_read (AARCH64_STRUCT_RETURN_REGNUM, &addr);
2758 *read_value = value_at_non_lval (valtype, addr);
2761 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
2765 if (writebuf)
2766 aarch64_store_return_value (valtype, regcache, writebuf);
2768 if (read_value)
2770 *read_value = value::allocate (valtype);
2771 aarch64_extract_return_value (valtype, regcache,
2772 (*read_value)->contents_raw ().data ());
2775 aarch64_debug_printf ("return value in registers");
2777 return RETURN_VALUE_REGISTER_CONVENTION;
2780 /* Implement the "get_longjmp_target" gdbarch method. */
2782 static int
2783 aarch64_get_longjmp_target (const frame_info_ptr &frame, CORE_ADDR *pc)
2785 CORE_ADDR jb_addr;
2786 gdb_byte buf[X_REGISTER_SIZE];
2787 struct gdbarch *gdbarch = get_frame_arch (frame);
2788 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2789 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2791 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2793 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2794 X_REGISTER_SIZE))
2795 return 0;
2797 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2798 return 1;
2801 /* Implement the "gen_return_address" gdbarch method. */
2803 static void
2804 aarch64_gen_return_address (struct gdbarch *gdbarch,
2805 struct agent_expr *ax, struct axs_value *value,
2806 CORE_ADDR scope)
2808 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2809 value->kind = axs_lvalue_register;
2810 value->u.reg = AARCH64_LR_REGNUM;
2814 /* Return TRUE if REGNUM is a W pseudo-register number. Return FALSE
2815 otherwise. */
2817 static bool
2818 is_w_pseudo_register (struct gdbarch *gdbarch, int regnum)
2820 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2822 if (tdep->w_pseudo_base <= regnum
2823 && regnum < tdep->w_pseudo_base + tdep->w_pseudo_count)
2824 return true;
2826 return false;
2829 /* Return TRUE if REGNUM is a SME pseudo-register number. Return FALSE
2830 otherwise. */
2832 static bool
2833 is_sme_pseudo_register (struct gdbarch *gdbarch, int regnum)
2835 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2837 if (tdep->has_sme () && tdep->sme_pseudo_base <= regnum
2838 && regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count)
2839 return true;
2841 return false;
2844 /* Convert ENCODING into a ZA tile slice name. */
2846 static const std::string
2847 aarch64_za_tile_slice_name (const struct za_pseudo_encoding &encoding)
2849 gdb_assert (encoding.qualifier_index >= 0);
2850 gdb_assert (encoding.qualifier_index <= 4);
2851 gdb_assert (encoding.tile_index >= 0);
2852 gdb_assert (encoding.tile_index <= 15);
2853 gdb_assert (encoding.slice_index >= 0);
2854 gdb_assert (encoding.slice_index <= 255);
2856 const char orientation = encoding.horizontal ? 'h' : 'v';
2858 const char qualifiers[6] = "bhsdq";
2859 const char qualifier = qualifiers [encoding.qualifier_index];
2860 return string_printf ("za%d%c%c%d", encoding.tile_index, orientation,
2861 qualifier, encoding.slice_index);
2864 /* Convert ENCODING into a ZA tile name. */
2866 static const std::string
2867 aarch64_za_tile_name (const struct za_pseudo_encoding &encoding)
2869 /* Tiles don't use the slice number and the direction fields. */
2870 gdb_assert (encoding.qualifier_index >= 0);
2871 gdb_assert (encoding.qualifier_index <= 4);
2872 gdb_assert (encoding.tile_index >= 0);
2873 gdb_assert (encoding.tile_index <= 15);
2875 const char qualifiers[6] = "bhsdq";
2876 const char qualifier = qualifiers [encoding.qualifier_index];
2877 return (string_printf ("za%d%c", encoding.tile_index, qualifier));
2880 /* Given a SME pseudo-register REGNUM, return its type. */
2882 static struct type *
2883 aarch64_sme_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2885 struct za_pseudo_encoding encoding;
2887 /* Decode the SME pseudo-register number. */
2888 aarch64_za_decode_pseudos (gdbarch, regnum, encoding);
2890 if (is_sme_tile_slice_pseudo_register (gdbarch, regnum))
2891 return aarch64_za_tile_slice_type (gdbarch, encoding);
2892 else
2893 return aarch64_za_tile_type (gdbarch, encoding);
2896 /* Return the pseudo register name corresponding to register regnum. */
2898 static const char *
2899 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2901 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2903 /* W pseudo-registers. Bottom halves of the X registers. */
2904 static const char *const w_name[] =
2906 "w0", "w1", "w2", "w3",
2907 "w4", "w5", "w6", "w7",
2908 "w8", "w9", "w10", "w11",
2909 "w12", "w13", "w14", "w15",
2910 "w16", "w17", "w18", "w19",
2911 "w20", "w21", "w22", "w23",
2912 "w24", "w25", "w26", "w27",
2913 "w28", "w29", "w30",
2916 static const char *const q_name[] =
2918 "q0", "q1", "q2", "q3",
2919 "q4", "q5", "q6", "q7",
2920 "q8", "q9", "q10", "q11",
2921 "q12", "q13", "q14", "q15",
2922 "q16", "q17", "q18", "q19",
2923 "q20", "q21", "q22", "q23",
2924 "q24", "q25", "q26", "q27",
2925 "q28", "q29", "q30", "q31",
2928 static const char *const d_name[] =
2930 "d0", "d1", "d2", "d3",
2931 "d4", "d5", "d6", "d7",
2932 "d8", "d9", "d10", "d11",
2933 "d12", "d13", "d14", "d15",
2934 "d16", "d17", "d18", "d19",
2935 "d20", "d21", "d22", "d23",
2936 "d24", "d25", "d26", "d27",
2937 "d28", "d29", "d30", "d31",
2940 static const char *const s_name[] =
2942 "s0", "s1", "s2", "s3",
2943 "s4", "s5", "s6", "s7",
2944 "s8", "s9", "s10", "s11",
2945 "s12", "s13", "s14", "s15",
2946 "s16", "s17", "s18", "s19",
2947 "s20", "s21", "s22", "s23",
2948 "s24", "s25", "s26", "s27",
2949 "s28", "s29", "s30", "s31",
2952 static const char *const h_name[] =
2954 "h0", "h1", "h2", "h3",
2955 "h4", "h5", "h6", "h7",
2956 "h8", "h9", "h10", "h11",
2957 "h12", "h13", "h14", "h15",
2958 "h16", "h17", "h18", "h19",
2959 "h20", "h21", "h22", "h23",
2960 "h24", "h25", "h26", "h27",
2961 "h28", "h29", "h30", "h31",
2964 static const char *const b_name[] =
2966 "b0", "b1", "b2", "b3",
2967 "b4", "b5", "b6", "b7",
2968 "b8", "b9", "b10", "b11",
2969 "b12", "b13", "b14", "b15",
2970 "b16", "b17", "b18", "b19",
2971 "b20", "b21", "b22", "b23",
2972 "b24", "b25", "b26", "b27",
2973 "b28", "b29", "b30", "b31",
2976 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2978 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2979 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2981 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2982 return d_name[p_regnum - AARCH64_D0_REGNUM];
2984 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2985 return s_name[p_regnum - AARCH64_S0_REGNUM];
2987 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2988 return h_name[p_regnum - AARCH64_H0_REGNUM];
2990 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2991 return b_name[p_regnum - AARCH64_B0_REGNUM];
2993 /* W pseudo-registers? */
2994 if (is_w_pseudo_register (gdbarch, regnum))
2995 return w_name[regnum - tdep->w_pseudo_base];
2997 if (tdep->has_sve ())
2999 static const char *const sve_v_name[] =
3001 "v0", "v1", "v2", "v3",
3002 "v4", "v5", "v6", "v7",
3003 "v8", "v9", "v10", "v11",
3004 "v12", "v13", "v14", "v15",
3005 "v16", "v17", "v18", "v19",
3006 "v20", "v21", "v22", "v23",
3007 "v24", "v25", "v26", "v27",
3008 "v28", "v29", "v30", "v31",
3011 if (p_regnum >= AARCH64_SVE_V0_REGNUM
3012 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
3013 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
3016 if (is_sme_pseudo_register (gdbarch, regnum))
3017 return tdep->sme_pseudo_names[regnum - tdep->sme_pseudo_base].c_str ();
3019 /* RA_STATE is used for unwinding only. Do not assign it a name - this
3020 prevents it from being read by methods such as
3021 mi_cmd_trace_frame_collected. */
3022 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
3023 return "";
3025 internal_error (_("aarch64_pseudo_register_name: bad register number %d"),
3026 p_regnum);
3029 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
3031 static struct type *
3032 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
3034 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3036 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
3038 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
3039 return aarch64_vnq_type (gdbarch);
3041 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
3042 return aarch64_vnd_type (gdbarch);
3044 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
3045 return aarch64_vns_type (gdbarch);
3047 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
3048 return aarch64_vnh_type (gdbarch);
3050 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
3051 return aarch64_vnb_type (gdbarch);
3053 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
3054 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
3055 return aarch64_vnv_type (gdbarch);
3057 /* W pseudo-registers are 32-bit. */
3058 if (is_w_pseudo_register (gdbarch, regnum))
3059 return builtin_type (gdbarch)->builtin_uint32;
3061 if (is_sme_pseudo_register (gdbarch, regnum))
3062 return aarch64_sme_pseudo_register_type (gdbarch, regnum);
3064 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
3065 return builtin_type (gdbarch)->builtin_uint64;
3067 internal_error (_("aarch64_pseudo_register_type: bad register number %d"),
3068 p_regnum);
3071 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
3073 static int
3074 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
3075 const struct reggroup *group)
3077 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3079 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
3081 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
3082 return group == all_reggroup || group == vector_reggroup;
3083 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
3084 return (group == all_reggroup || group == vector_reggroup
3085 || group == float_reggroup);
3086 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
3087 return (group == all_reggroup || group == vector_reggroup
3088 || group == float_reggroup);
3089 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
3090 return group == all_reggroup || group == vector_reggroup;
3091 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
3092 return group == all_reggroup || group == vector_reggroup;
3093 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
3094 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
3095 return group == all_reggroup || group == vector_reggroup;
3096 else if (is_sme_pseudo_register (gdbarch, regnum))
3097 return group == all_reggroup || group == vector_reggroup;
3098 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
3099 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
3100 return 0;
3102 return group == all_reggroup;
3105 /* Helper for aarch64_pseudo_read_value. */
3107 static value *
3108 aarch64_pseudo_read_value_1 (const frame_info_ptr &next_frame,
3109 const int pseudo_reg_num, int raw_regnum_offset)
3111 unsigned v_regnum = AARCH64_V0_REGNUM + raw_regnum_offset;
3113 return pseudo_from_raw_part (next_frame, pseudo_reg_num, v_regnum, 0);
3116 /* Helper function for reading/writing ZA pseudo-registers. Given REGNUM,
3117 a ZA pseudo-register number, return the information on positioning of the
3118 bytes that must be read from/written to. */
3120 static za_offsets
3121 aarch64_za_offsets_from_regnum (struct gdbarch *gdbarch, int regnum)
3123 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3125 gdb_assert (tdep->has_sme ());
3126 gdb_assert (tdep->sme_svq > 0);
3127 gdb_assert (tdep->sme_pseudo_base <= regnum);
3128 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
3130 struct za_pseudo_encoding encoding;
3132 /* Decode the ZA pseudo-register number. */
3133 aarch64_za_decode_pseudos (gdbarch, regnum, encoding);
3135 /* Fetch the streaming vector length. */
3136 size_t svl = sve_vl_from_vq (tdep->sme_svq);
3137 za_offsets offsets;
3139 if (is_sme_tile_slice_pseudo_register (gdbarch, regnum))
3141 if (encoding.horizontal)
3143 /* Horizontal tile slices are contiguous ranges of svl bytes. */
3145 /* The starting offset depends on the tile index (to locate the tile
3146 in the ZA buffer), the slice index (to locate the slice within the
3147 tile) and the qualifier. */
3148 offsets.starting_offset
3149 = encoding.tile_index * svl + encoding.slice_index
3150 * (svl >> encoding.qualifier_index);
3151 /* Horizontal tile slice data is contiguous and thus doesn't have
3152 a stride. */
3153 offsets.stride_size = 0;
3154 /* Horizontal tile slice data is contiguous and thus only has 1
3155 chunk. */
3156 offsets.chunks = 1;
3157 /* The chunk size is always svl bytes. */
3158 offsets.chunk_size = svl;
3160 else
3162 /* Vertical tile slices are non-contiguous ranges of
3163 (1 << qualifier_index) bytes. */
3165 /* The starting offset depends on the tile number (to locate the
3166 tile in the ZA buffer), the slice index (to locate the element
3167 within the tile slice) and the qualifier. */
3168 offsets.starting_offset
3169 = encoding.tile_index * svl + encoding.slice_index
3170 * (1 << encoding.qualifier_index);
3171 /* The offset between vertical tile slices depends on the qualifier
3172 and svl. */
3173 offsets.stride_size = svl << encoding.qualifier_index;
3174 /* The number of chunks depends on svl and the qualifier size. */
3175 offsets.chunks = svl >> encoding.qualifier_index;
3176 /* The chunk size depends on the qualifier. */
3177 offsets.chunk_size = 1 << encoding.qualifier_index;
3180 else
3182 /* ZA tile pseudo-register. */
3184 /* Starting offset depends on the tile index and qualifier. */
3185 offsets.starting_offset = encoding.tile_index * svl;
3186 /* The offset between tile slices depends on the qualifier and svl. */
3187 offsets.stride_size = svl << encoding.qualifier_index;
3188 /* The number of chunks depends on the qualifier and svl. */
3189 offsets.chunks = svl >> encoding.qualifier_index;
3190 /* The chunk size is always svl bytes. */
3191 offsets.chunk_size = svl;
3194 return offsets;
3197 /* Given REGNUM, a SME pseudo-register number, return its value in RESULT. */
3199 static value *
3200 aarch64_sme_pseudo_register_read (gdbarch *gdbarch, const frame_info_ptr &next_frame,
3201 const int pseudo_reg_num)
3203 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3205 gdb_assert (tdep->has_sme ());
3206 gdb_assert (tdep->sme_svq > 0);
3207 gdb_assert (tdep->sme_pseudo_base <= pseudo_reg_num);
3208 gdb_assert (pseudo_reg_num < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
3210 /* Fetch the offsets that we need in order to read from the correct blocks
3211 of ZA. */
3212 za_offsets offsets
3213 = aarch64_za_offsets_from_regnum (gdbarch, pseudo_reg_num);
3215 /* Fetch the contents of ZA. */
3216 value *za_value = value_of_register (tdep->sme_za_regnum, next_frame);
3217 value *result = value::allocate_register (next_frame, pseudo_reg_num);
3219 /* Copy the requested data. */
3220 for (int chunks = 0; chunks < offsets.chunks; chunks++)
3222 int src_offset = offsets.starting_offset + chunks * offsets.stride_size;
3223 int dst_offset = chunks * offsets.chunk_size;
3224 za_value->contents_copy (result, dst_offset, src_offset,
3225 offsets.chunk_size);
3228 return result;
3231 /* Implement the "pseudo_register_read_value" gdbarch method. */
3233 static value *
3234 aarch64_pseudo_read_value (gdbarch *gdbarch, const frame_info_ptr &next_frame,
3235 const int pseudo_reg_num)
3237 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3239 if (is_w_pseudo_register (gdbarch, pseudo_reg_num))
3241 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3242 /* Default offset for little endian. */
3243 int offset = 0;
3245 if (byte_order == BFD_ENDIAN_BIG)
3246 offset = 4;
3248 /* Find the correct X register to extract the data from. */
3249 int x_regnum
3250 = AARCH64_X0_REGNUM + (pseudo_reg_num - tdep->w_pseudo_base);
3252 /* Read the bottom 4 bytes of X. */
3253 return pseudo_from_raw_part (next_frame, pseudo_reg_num, x_regnum,
3254 offset);
3256 else if (is_sme_pseudo_register (gdbarch, pseudo_reg_num))
3257 return aarch64_sme_pseudo_register_read (gdbarch, next_frame,
3258 pseudo_reg_num);
3260 /* Offset in the "pseudo-register space". */
3261 int pseudo_offset = pseudo_reg_num - gdbarch_num_regs (gdbarch);
3263 if (pseudo_offset >= AARCH64_Q0_REGNUM
3264 && pseudo_offset < AARCH64_Q0_REGNUM + 32)
3265 return aarch64_pseudo_read_value_1 (next_frame, pseudo_reg_num,
3266 pseudo_offset - AARCH64_Q0_REGNUM);
3268 if (pseudo_offset >= AARCH64_D0_REGNUM
3269 && pseudo_offset < AARCH64_D0_REGNUM + 32)
3270 return aarch64_pseudo_read_value_1 (next_frame, pseudo_reg_num,
3271 pseudo_offset - AARCH64_D0_REGNUM);
3273 if (pseudo_offset >= AARCH64_S0_REGNUM
3274 && pseudo_offset < AARCH64_S0_REGNUM + 32)
3275 return aarch64_pseudo_read_value_1 (next_frame, pseudo_reg_num,
3276 pseudo_offset - AARCH64_S0_REGNUM);
3278 if (pseudo_offset >= AARCH64_H0_REGNUM
3279 && pseudo_offset < AARCH64_H0_REGNUM + 32)
3280 return aarch64_pseudo_read_value_1 (next_frame, pseudo_reg_num,
3281 pseudo_offset - AARCH64_H0_REGNUM);
3283 if (pseudo_offset >= AARCH64_B0_REGNUM
3284 && pseudo_offset < AARCH64_B0_REGNUM + 32)
3285 return aarch64_pseudo_read_value_1 (next_frame, pseudo_reg_num,
3286 pseudo_offset - AARCH64_B0_REGNUM);
3288 if (tdep->has_sve () && pseudo_offset >= AARCH64_SVE_V0_REGNUM
3289 && pseudo_offset < AARCH64_SVE_V0_REGNUM + 32)
3290 return aarch64_pseudo_read_value_1 (next_frame, pseudo_reg_num,
3291 pseudo_offset - AARCH64_SVE_V0_REGNUM);
3293 gdb_assert_not_reached ("regnum out of bound");
3296 /* Helper for aarch64_pseudo_write. */
3298 static void
3299 aarch64_pseudo_write_1 (gdbarch *gdbarch, const frame_info_ptr &next_frame,
3300 int regnum_offset,
3301 gdb::array_view<const gdb_byte> buf)
3303 unsigned raw_regnum = AARCH64_V0_REGNUM + regnum_offset;
3305 /* Enough space for a full vector register.
3307 Ensure the register buffer is zero, we want gdb writes of the
3308 various 'scalar' pseudo registers to behavior like architectural
3309 writes, register width bytes are written the remainder are set to
3310 zero. */
3311 gdb::byte_vector raw_buf (register_size (gdbarch, raw_regnum), 0);
3312 static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
3314 gdb::array_view<gdb_byte> raw_view (raw_buf);
3315 copy (buf, raw_view.slice (0, buf.size ()));
3316 put_frame_register (next_frame, raw_regnum, raw_view);
3319 /* Given REGNUM, a SME pseudo-register number, store the bytes from DATA to the
3320 pseudo-register. */
3322 static void
3323 aarch64_sme_pseudo_register_write (gdbarch *gdbarch, const frame_info_ptr &next_frame,
3324 const int regnum,
3325 gdb::array_view<const gdb_byte> data)
3327 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3329 gdb_assert (tdep->has_sme ());
3330 gdb_assert (tdep->sme_svq > 0);
3331 gdb_assert (tdep->sme_pseudo_base <= regnum);
3332 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
3334 /* Fetch the offsets that we need in order to write to the correct blocks
3335 of ZA. */
3336 za_offsets offsets = aarch64_za_offsets_from_regnum (gdbarch, regnum);
3338 /* Fetch the contents of ZA. */
3339 value *za_value = value_of_register (tdep->sme_za_regnum, next_frame);
3342 /* Create a view only on the portion of za we want to write. */
3343 gdb::array_view<gdb_byte> za_view
3344 = za_value->contents_writeable ().slice (offsets.starting_offset);
3346 /* Copy the requested data. */
3347 for (int chunks = 0; chunks < offsets.chunks; chunks++)
3349 gdb::array_view<const gdb_byte> src
3350 = data.slice (chunks * offsets.chunk_size, offsets.chunk_size);
3351 gdb::array_view<gdb_byte> dst
3352 = za_view.slice (chunks * offsets.stride_size, offsets.chunk_size);
3353 copy (src, dst);
3357 /* Write back to ZA. */
3358 put_frame_register (next_frame, tdep->sme_za_regnum,
3359 za_value->contents_raw ());
3362 /* Implement the "pseudo_register_write" gdbarch method. */
3364 static void
3365 aarch64_pseudo_write (gdbarch *gdbarch, const frame_info_ptr &next_frame,
3366 const int pseudo_reg_num,
3367 gdb::array_view<const gdb_byte> buf)
3369 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3371 if (is_w_pseudo_register (gdbarch, pseudo_reg_num))
3373 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3374 /* Default offset for little endian. */
3375 int offset = 0;
3377 if (byte_order == BFD_ENDIAN_BIG)
3378 offset = 4;
3380 /* Find the correct X register to extract the data from. */
3381 int x_regnum = AARCH64_X0_REGNUM + (pseudo_reg_num - tdep->w_pseudo_base);
3383 /* First zero-out the contents of X. */
3384 gdb_byte bytes[8] {};
3385 gdb::array_view<gdb_byte> bytes_view (bytes);
3386 copy (buf, bytes_view.slice (offset, 4));
3388 /* Write to the bottom 4 bytes of X. */
3389 put_frame_register (next_frame, x_regnum, bytes_view);
3390 return;
3392 else if (is_sme_pseudo_register (gdbarch, pseudo_reg_num))
3394 aarch64_sme_pseudo_register_write (gdbarch, next_frame, pseudo_reg_num,
3395 buf);
3396 return;
3399 /* Offset in the "pseudo-register space". */
3400 int pseudo_offset = pseudo_reg_num - gdbarch_num_regs (gdbarch);
3402 if (pseudo_offset >= AARCH64_Q0_REGNUM
3403 && pseudo_offset < AARCH64_Q0_REGNUM + 32)
3404 return aarch64_pseudo_write_1 (gdbarch, next_frame,
3405 pseudo_offset - AARCH64_Q0_REGNUM, buf);
3407 if (pseudo_offset >= AARCH64_D0_REGNUM
3408 && pseudo_offset < AARCH64_D0_REGNUM + 32)
3409 return aarch64_pseudo_write_1 (gdbarch, next_frame,
3410 pseudo_offset - AARCH64_D0_REGNUM, buf);
3412 if (pseudo_offset >= AARCH64_S0_REGNUM
3413 && pseudo_offset < AARCH64_S0_REGNUM + 32)
3414 return aarch64_pseudo_write_1 (gdbarch, next_frame,
3415 pseudo_offset - AARCH64_S0_REGNUM, buf);
3417 if (pseudo_offset >= AARCH64_H0_REGNUM
3418 && pseudo_offset < AARCH64_H0_REGNUM + 32)
3419 return aarch64_pseudo_write_1 (gdbarch, next_frame,
3420 pseudo_offset - AARCH64_H0_REGNUM, buf);
3422 if (pseudo_offset >= AARCH64_B0_REGNUM
3423 && pseudo_offset < AARCH64_B0_REGNUM + 32)
3424 return aarch64_pseudo_write_1 (gdbarch, next_frame,
3425 pseudo_offset - AARCH64_B0_REGNUM, buf);
3427 if (tdep->has_sve () && pseudo_offset >= AARCH64_SVE_V0_REGNUM
3428 && pseudo_offset < AARCH64_SVE_V0_REGNUM + 32)
3429 return aarch64_pseudo_write_1 (gdbarch, next_frame,
3430 pseudo_offset - AARCH64_SVE_V0_REGNUM, buf);
3432 gdb_assert_not_reached ("regnum out of bound");
3435 /* Callback function for user_reg_add. */
3437 static struct value *
3438 value_of_aarch64_user_reg (const frame_info_ptr &frame, const void *baton)
3440 const int *reg_p = (const int *) baton;
3442 return value_of_register (*reg_p, get_next_frame_sentinel_okay (frame));
3445 /* Implement the "software_single_step" gdbarch method, needed to
3446 single step through atomic sequences on AArch64. */
3448 static std::vector<CORE_ADDR>
3449 aarch64_software_single_step (struct regcache *regcache)
3451 struct gdbarch *gdbarch = regcache->arch ();
3452 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3453 const int insn_size = 4;
3454 const int atomic_sequence_length = 16; /* Instruction sequence length. */
3455 CORE_ADDR pc = regcache_read_pc (regcache);
3456 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
3457 CORE_ADDR loc = pc;
3458 CORE_ADDR closing_insn = 0;
3460 ULONGEST insn_from_memory;
3461 if (!safe_read_memory_unsigned_integer (loc, insn_size,
3462 byte_order_for_code,
3463 &insn_from_memory))
3465 /* Assume we don't have a atomic sequence, as we couldn't read the
3466 instruction in this location. */
3467 return {};
3470 uint32_t insn = insn_from_memory;
3471 int index;
3472 int insn_count;
3473 int bc_insn_count = 0; /* Conditional branch instruction count. */
3474 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
3475 aarch64_inst inst;
3477 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
3478 return {};
3480 /* Look for a Load Exclusive instruction which begins the sequence. */
3481 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
3482 return {};
3484 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
3486 loc += insn_size;
3488 if (!safe_read_memory_unsigned_integer (loc, insn_size,
3489 byte_order_for_code,
3490 &insn_from_memory))
3492 /* Assume we don't have a atomic sequence, as we couldn't read the
3493 instruction in this location. */
3494 return {};
3497 insn = insn_from_memory;
3498 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
3499 return {};
3500 /* Check if the instruction is a conditional branch. */
3501 if (inst.opcode->iclass == condbranch)
3503 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
3505 if (bc_insn_count >= 1)
3506 return {};
3508 /* It is, so we'll try to set a breakpoint at the destination. */
3509 breaks[1] = loc + inst.operands[0].imm.value;
3511 bc_insn_count++;
3512 last_breakpoint++;
3515 /* Look for the Store Exclusive which closes the atomic sequence. */
3516 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
3518 closing_insn = loc;
3519 break;
3523 /* We didn't find a closing Store Exclusive instruction, fall back. */
3524 if (!closing_insn)
3525 return {};
3527 /* Insert breakpoint after the end of the atomic sequence. */
3528 breaks[0] = loc + insn_size;
3530 /* Check for duplicated breakpoints, and also check that the second
3531 breakpoint is not within the atomic sequence. */
3532 if (last_breakpoint
3533 && (breaks[1] == breaks[0]
3534 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
3535 last_breakpoint = 0;
3537 std::vector<CORE_ADDR> next_pcs;
3539 /* Insert the breakpoint at the end of the sequence, and one at the
3540 destination of the conditional branch, if it exists. */
3541 for (index = 0; index <= last_breakpoint; index++)
3542 next_pcs.push_back (breaks[index]);
3544 return next_pcs;
3547 struct aarch64_displaced_step_copy_insn_closure
3548 : public displaced_step_copy_insn_closure
3550 /* It is true when condition instruction, such as B.CON, TBZ, etc,
3551 is being displaced stepping. */
3552 bool cond = false;
3554 /* PC adjustment offset after displaced stepping. If 0, then we don't
3555 write the PC back, assuming the PC is already the right address. */
3556 int32_t pc_adjust = 0;
3559 /* Data when visiting instructions for displaced stepping. */
3561 struct aarch64_displaced_step_data
3563 struct aarch64_insn_data base;
3565 /* The address where the instruction will be executed at. */
3566 CORE_ADDR new_addr;
3567 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
3568 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
3569 /* Number of instructions in INSN_BUF. */
3570 unsigned insn_count;
3571 /* Registers when doing displaced stepping. */
3572 struct regcache *regs;
3574 aarch64_displaced_step_copy_insn_closure *dsc;
3577 /* Implementation of aarch64_insn_visitor method "b". */
3579 static void
3580 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
3581 struct aarch64_insn_data *data)
3583 struct aarch64_displaced_step_data *dsd
3584 = (struct aarch64_displaced_step_data *) data;
3585 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
3587 if (can_encode_int32 (new_offset, 28))
3589 /* Emit B rather than BL, because executing BL on a new address
3590 will get the wrong address into LR. In order to avoid this,
3591 we emit B, and update LR if the instruction is BL. */
3592 emit_b (dsd->insn_buf, 0, new_offset);
3593 dsd->insn_count++;
3595 else
3597 /* Write NOP. */
3598 emit_nop (dsd->insn_buf);
3599 dsd->insn_count++;
3600 dsd->dsc->pc_adjust = offset;
3603 if (is_bl)
3605 /* Update LR. */
3606 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3607 data->insn_addr + 4);
3611 /* Implementation of aarch64_insn_visitor method "b_cond". */
3613 static void
3614 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
3615 struct aarch64_insn_data *data)
3617 struct aarch64_displaced_step_data *dsd
3618 = (struct aarch64_displaced_step_data *) data;
3620 /* GDB has to fix up PC after displaced step this instruction
3621 differently according to the condition is true or false. Instead
3622 of checking COND against conditional flags, we can use
3623 the following instructions, and GDB can tell how to fix up PC
3624 according to the PC value.
3626 B.COND TAKEN ; If cond is true, then jump to TAKEN.
3627 INSN1 ;
3628 TAKEN:
3629 INSN2
3632 emit_bcond (dsd->insn_buf, cond, 8);
3633 dsd->dsc->cond = true;
3634 dsd->dsc->pc_adjust = offset;
3635 dsd->insn_count = 1;
3638 /* Dynamically allocate a new register. If we know the register
3639 statically, we should make it a global as above instead of using this
3640 helper function. */
3642 static struct aarch64_register
3643 aarch64_register (unsigned num, int is64)
3645 return (struct aarch64_register) { num, is64 };
3648 /* Implementation of aarch64_insn_visitor method "cb". */
3650 static void
3651 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
3652 const unsigned rn, int is64,
3653 struct aarch64_insn_data *data)
3655 struct aarch64_displaced_step_data *dsd
3656 = (struct aarch64_displaced_step_data *) data;
3658 /* The offset is out of range for a compare and branch
3659 instruction. We can use the following instructions instead:
3661 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
3662 INSN1 ;
3663 TAKEN:
3664 INSN2
3666 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
3667 dsd->insn_count = 1;
3668 dsd->dsc->cond = true;
3669 dsd->dsc->pc_adjust = offset;
3672 /* Implementation of aarch64_insn_visitor method "tb". */
3674 static void
3675 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
3676 const unsigned rt, unsigned bit,
3677 struct aarch64_insn_data *data)
3679 struct aarch64_displaced_step_data *dsd
3680 = (struct aarch64_displaced_step_data *) data;
3682 /* The offset is out of range for a test bit and branch
3683 instruction We can use the following instructions instead:
3685 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
3686 INSN1 ;
3687 TAKEN:
3688 INSN2
3691 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
3692 dsd->insn_count = 1;
3693 dsd->dsc->cond = true;
3694 dsd->dsc->pc_adjust = offset;
3697 /* Implementation of aarch64_insn_visitor method "adr". */
3699 static void
3700 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
3701 const int is_adrp, struct aarch64_insn_data *data)
3703 struct aarch64_displaced_step_data *dsd
3704 = (struct aarch64_displaced_step_data *) data;
3705 /* We know exactly the address the ADR{P,} instruction will compute.
3706 We can just write it to the destination register. */
3707 CORE_ADDR address = data->insn_addr + offset;
3709 if (is_adrp)
3711 /* Clear the lower 12 bits of the offset to get the 4K page. */
3712 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3713 address & ~0xfff);
3715 else
3716 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3717 address);
3719 dsd->dsc->pc_adjust = 4;
3720 emit_nop (dsd->insn_buf);
3721 dsd->insn_count = 1;
3724 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
3726 static void
3727 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
3728 const unsigned rt, const int is64,
3729 struct aarch64_insn_data *data)
3731 struct aarch64_displaced_step_data *dsd
3732 = (struct aarch64_displaced_step_data *) data;
3733 CORE_ADDR address = data->insn_addr + offset;
3734 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
3736 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
3737 address);
3739 if (is_sw)
3740 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
3741 aarch64_register (rt, 1), zero);
3742 else
3743 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
3744 aarch64_register (rt, 1), zero);
3746 dsd->dsc->pc_adjust = 4;
3749 /* Implementation of aarch64_insn_visitor method "others". */
3751 static void
3752 aarch64_displaced_step_others (const uint32_t insn,
3753 struct aarch64_insn_data *data)
3755 struct aarch64_displaced_step_data *dsd
3756 = (struct aarch64_displaced_step_data *) data;
3758 uint32_t masked_insn = (insn & CLEAR_Rn_MASK);
3759 if (masked_insn == BLR)
3761 /* Emit a BR to the same register and then update LR to the original
3762 address (similar to aarch64_displaced_step_b). */
3763 aarch64_emit_insn (dsd->insn_buf, insn & 0xffdfffff);
3764 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3765 data->insn_addr + 4);
3767 else
3768 aarch64_emit_insn (dsd->insn_buf, insn);
3769 dsd->insn_count = 1;
3771 if (masked_insn == RET || masked_insn == BR || masked_insn == BLR)
3772 dsd->dsc->pc_adjust = 0;
3773 else
3774 dsd->dsc->pc_adjust = 4;
3777 static const struct aarch64_insn_visitor visitor =
3779 aarch64_displaced_step_b,
3780 aarch64_displaced_step_b_cond,
3781 aarch64_displaced_step_cb,
3782 aarch64_displaced_step_tb,
3783 aarch64_displaced_step_adr,
3784 aarch64_displaced_step_ldr_literal,
3785 aarch64_displaced_step_others,
3788 /* Implement the "displaced_step_copy_insn" gdbarch method. */
3790 displaced_step_copy_insn_closure_up
3791 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
3792 CORE_ADDR from, CORE_ADDR to,
3793 struct regcache *regs)
3795 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3796 struct aarch64_displaced_step_data dsd;
3797 aarch64_inst inst;
3798 ULONGEST insn_from_memory;
3800 if (!safe_read_memory_unsigned_integer (from, 4, byte_order_for_code,
3801 &insn_from_memory))
3802 return nullptr;
3804 uint32_t insn = insn_from_memory;
3806 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
3807 return NULL;
3809 /* Look for a Load Exclusive instruction which begins the sequence,
3810 or for a MOPS instruction. */
3811 if ((inst.opcode->iclass == ldstexcl && bit (insn, 22))
3812 || AARCH64_CPU_HAS_FEATURE (*inst.opcode->avariant, MOPS))
3814 /* We can't displaced step atomic sequences nor MOPS instructions. */
3815 return NULL;
3818 std::unique_ptr<aarch64_displaced_step_copy_insn_closure> dsc
3819 (new aarch64_displaced_step_copy_insn_closure);
3820 dsd.base.insn_addr = from;
3821 dsd.new_addr = to;
3822 dsd.regs = regs;
3823 dsd.dsc = dsc.get ();
3824 dsd.insn_count = 0;
3825 aarch64_relocate_instruction (insn, &visitor,
3826 (struct aarch64_insn_data *) &dsd);
3827 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
3829 if (dsd.insn_count != 0)
3831 int i;
3833 /* Instruction can be relocated to scratch pad. Copy
3834 relocated instruction(s) there. */
3835 for (i = 0; i < dsd.insn_count; i++)
3837 displaced_debug_printf ("writing insn %.8x at %s",
3838 dsd.insn_buf[i],
3839 paddress (gdbarch, to + i * 4));
3841 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3842 (ULONGEST) dsd.insn_buf[i]);
3845 else
3847 dsc = NULL;
3850 /* This is a work around for a problem with g++ 4.8. */
3851 return displaced_step_copy_insn_closure_up (dsc.release ());
3854 /* Implement the "displaced_step_fixup" gdbarch method. */
3856 void
3857 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3858 struct displaced_step_copy_insn_closure *dsc_,
3859 CORE_ADDR from, CORE_ADDR to,
3860 struct regcache *regs, bool completed_p)
3862 CORE_ADDR pc = regcache_read_pc (regs);
3864 /* If the displaced instruction didn't complete successfully then all we
3865 need to do is restore the program counter. */
3866 if (!completed_p)
3868 pc = from + (pc - to);
3869 regcache_write_pc (regs, pc);
3870 return;
3873 aarch64_displaced_step_copy_insn_closure *dsc
3874 = (aarch64_displaced_step_copy_insn_closure *) dsc_;
3876 displaced_debug_printf ("PC after stepping: %s (was %s).",
3877 paddress (gdbarch, pc), paddress (gdbarch, to));
3879 if (dsc->cond)
3881 displaced_debug_printf ("[Conditional] pc_adjust before: %d",
3882 dsc->pc_adjust);
3884 if (pc - to == 8)
3886 /* Condition is true. */
3888 else if (pc - to == 4)
3890 /* Condition is false. */
3891 dsc->pc_adjust = 4;
3893 else
3894 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3896 displaced_debug_printf ("[Conditional] pc_adjust after: %d",
3897 dsc->pc_adjust);
3900 displaced_debug_printf ("%s PC by %d",
3901 dsc->pc_adjust ? "adjusting" : "not adjusting",
3902 dsc->pc_adjust);
3904 if (dsc->pc_adjust != 0)
3906 /* Make sure the previous instruction was executed (that is, the PC
3907 has changed). If the PC didn't change, then discard the adjustment
3908 offset. Otherwise we may skip an instruction before its execution
3909 took place. */
3910 if ((pc - to) == 0)
3912 displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
3913 dsc->pc_adjust = 0;
3916 displaced_debug_printf ("fixup: set PC to %s:%d",
3917 paddress (gdbarch, from), dsc->pc_adjust);
3919 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3920 from + dsc->pc_adjust);
3924 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3926 bool
3927 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch)
3929 return true;
3932 /* Get the correct target description for the given VQ value.
3933 If VQ is zero then it is assumed SVE is not supported.
3934 (It is not possible to set VQ to zero on an SVE system).
3936 MTE_P indicates the presence of the Memory Tagging Extension feature.
3938 TLS_P indicates the presence of the Thread Local Storage feature. */
3940 const target_desc *
3941 aarch64_read_description (const aarch64_features &features)
3943 if (features.vq > AARCH64_MAX_SVE_VQ)
3944 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), features.vq,
3945 AARCH64_MAX_SVE_VQ);
3947 struct target_desc *tdesc = tdesc_aarch64_map[features];
3949 if (tdesc == NULL)
3951 tdesc = aarch64_create_target_description (features);
3952 tdesc_aarch64_map[features] = tdesc;
3955 return tdesc;
3958 /* Return the VQ used when creating the target description TDESC. */
3960 static uint64_t
3961 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3963 const struct tdesc_feature *feature_sve;
3965 if (!tdesc_has_registers (tdesc))
3966 return 0;
3968 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3970 if (feature_sve == nullptr)
3971 return 0;
3973 uint64_t vl = tdesc_register_bitsize (feature_sve,
3974 aarch64_sve_register_names[0]) / 8;
3975 return sve_vq_from_vl (vl);
3979 /* Return the svq (streaming vector quotient) used when creating the target
3980 description TDESC. */
3982 static uint64_t
3983 aarch64_get_tdesc_svq (const struct target_desc *tdesc)
3985 const struct tdesc_feature *feature_sme;
3987 if (!tdesc_has_registers (tdesc))
3988 return 0;
3990 feature_sme = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sme");
3992 if (feature_sme == nullptr)
3993 return 0;
3995 size_t svl_squared = tdesc_register_bitsize (feature_sme, "za");
3997 /* We have the total size of the ZA matrix, in bits. Figure out the svl
3998 value. */
3999 size_t svl = std::sqrt (svl_squared / 8);
4001 /* Now extract svq. */
4002 return sve_vq_from_vl (svl);
4005 /* Get the AArch64 features present in the given target description. */
4007 aarch64_features
4008 aarch64_features_from_target_desc (const struct target_desc *tdesc)
4010 aarch64_features features;
4012 if (tdesc == nullptr)
4013 return features;
4015 features.vq = aarch64_get_tdesc_vq (tdesc);
4017 /* We need to look for a couple pauth feature name variations. */
4018 features.pauth
4019 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth") != nullptr);
4021 if (!features.pauth)
4022 features.pauth = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth_v2")
4023 != nullptr);
4025 features.mte
4026 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte") != nullptr);
4028 const struct tdesc_feature *tls_feature
4029 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls");
4031 if (tls_feature != nullptr)
4033 /* We have TLS registers. Find out how many. */
4034 if (tdesc_unnumbered_register (tls_feature, "tpidr2"))
4035 features.tls = 2;
4036 else
4037 features.tls = 1;
4040 features.svq = aarch64_get_tdesc_svq (tdesc);
4042 /* Check for the SME2 feature. */
4043 features.sme2 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sme2")
4044 != nullptr);
4046 return features;
4049 /* Implement the "cannot_store_register" gdbarch method. */
4051 static int
4052 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
4054 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
4056 if (!tdep->has_pauth ())
4057 return 0;
4059 /* Pointer authentication registers are read-only. */
4060 return (regnum >= tdep->pauth_reg_base
4061 && regnum < tdep->pauth_reg_base + tdep->pauth_reg_count);
4064 /* Implement the stack_frame_destroyed_p gdbarch method. */
4066 static int
4067 aarch64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
4069 CORE_ADDR func_start, func_end;
4070 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
4071 return 0;
4073 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4075 ULONGEST insn_from_memory;
4076 if (!safe_read_memory_unsigned_integer (pc, 4, byte_order_for_code,
4077 &insn_from_memory))
4078 return 0;
4080 uint32_t insn = insn_from_memory;
4082 aarch64_inst inst;
4083 if (aarch64_decode_insn (insn, &inst, 1, nullptr) != 0)
4084 return 0;
4086 return streq (inst.opcode->name, "ret");
4089 /* Helper to get the allocation tag from a 64-bit ADDRESS.
4091 Return the allocation tag if successful and nullopt otherwise. */
4093 std::optional<CORE_ADDR>
4094 aarch64_mte_get_atag (CORE_ADDR address)
4096 gdb::byte_vector tags;
4098 /* Attempt to fetch the allocation tag. */
4099 if (!target_fetch_memtags (address, 1, tags,
4100 static_cast<int> (memtag_type::allocation)))
4101 return {};
4103 /* Only one tag should've been returned. Make sure we got exactly that. */
4104 if (tags.size () != 1)
4105 error (_("Target returned an unexpected number of tags."));
4107 /* Although our tags are 4 bits in size, they are stored in a
4108 byte. */
4109 return tags[0];
4112 /* Implement the memtag_matches_p gdbarch method. */
4114 static bool
4115 aarch64_memtag_matches_p (struct gdbarch *gdbarch,
4116 struct value *address)
4118 gdb_assert (address != nullptr);
4120 CORE_ADDR addr = value_as_address (address);
4122 /* Fetch the allocation tag for ADDRESS. */
4123 std::optional<CORE_ADDR> atag
4124 = aarch64_mte_get_atag (aarch64_remove_non_address_bits (gdbarch, addr));
4126 if (!atag.has_value ())
4127 return true;
4129 /* Fetch the logical tag for ADDRESS. */
4130 gdb_byte ltag = aarch64_mte_get_ltag (addr);
4132 /* Are the tags the same? */
4133 return ltag == *atag;
4136 /* Implement the set_memtags gdbarch method. */
4138 static bool
4139 aarch64_set_memtags (struct gdbarch *gdbarch, struct value *address,
4140 size_t length, const gdb::byte_vector &tags,
4141 memtag_type tag_type)
4143 gdb_assert (!tags.empty ());
4144 gdb_assert (address != nullptr);
4146 CORE_ADDR addr = value_as_address (address);
4148 /* Set the logical tag or the allocation tag. */
4149 if (tag_type == memtag_type::logical)
4151 /* When setting logical tags, we don't care about the length, since
4152 we are only setting a single logical tag. */
4153 addr = aarch64_mte_set_ltag (addr, tags[0]);
4155 /* Update the value's content with the tag. */
4156 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4157 gdb_byte *srcbuf = address->contents_raw ().data ();
4158 store_unsigned_integer (srcbuf, sizeof (addr), byte_order, addr);
4160 else
4162 /* Remove the top byte. */
4163 addr = aarch64_remove_non_address_bits (gdbarch, addr);
4165 /* With G being the number of tag granules and N the number of tags
4166 passed in, we can have the following cases:
4168 1 - G == N: Store all the N tags to memory.
4170 2 - G < N : Warn about having more tags than granules, but write G
4171 tags.
4173 3 - G > N : This is a "fill tags" operation. We should use the tags
4174 as a pattern to fill the granules repeatedly until we have
4175 written G tags to memory.
4178 size_t g = aarch64_mte_get_tag_granules (addr, length,
4179 AARCH64_MTE_GRANULE_SIZE);
4180 size_t n = tags.size ();
4182 if (g < n)
4183 warning (_("Got more tags than memory granules. Tags will be "
4184 "truncated."));
4185 else if (g > n)
4186 warning (_("Using tag pattern to fill memory range."));
4188 if (!target_store_memtags (addr, length, tags,
4189 static_cast<int> (memtag_type::allocation)))
4190 return false;
4192 return true;
4195 /* Implement the get_memtag gdbarch method. */
4197 static struct value *
4198 aarch64_get_memtag (struct gdbarch *gdbarch, struct value *address,
4199 memtag_type tag_type)
4201 gdb_assert (address != nullptr);
4203 CORE_ADDR addr = value_as_address (address);
4204 CORE_ADDR tag = 0;
4206 /* Get the logical tag or the allocation tag. */
4207 if (tag_type == memtag_type::logical)
4208 tag = aarch64_mte_get_ltag (addr);
4209 else
4211 /* Remove the top byte. */
4212 addr = aarch64_remove_non_address_bits (gdbarch, addr);
4213 std::optional<CORE_ADDR> atag = aarch64_mte_get_atag (addr);
4215 if (!atag.has_value ())
4216 return nullptr;
4218 tag = *atag;
4221 /* Convert the tag to a value. */
4222 return value_from_ulongest (builtin_type (gdbarch)->builtin_unsigned_int,
4223 tag);
4226 /* Implement the memtag_to_string gdbarch method. */
4228 static std::string
4229 aarch64_memtag_to_string (struct gdbarch *gdbarch, struct value *tag_value)
4231 if (tag_value == nullptr)
4232 return "";
4234 CORE_ADDR tag = value_as_address (tag_value);
4236 return string_printf ("0x%s", phex_nz (tag, sizeof (tag)));
4239 /* See aarch64-tdep.h. */
4241 CORE_ADDR
4242 aarch64_remove_non_address_bits (struct gdbarch *gdbarch, CORE_ADDR pointer)
4244 /* By default, we assume TBI and discard the top 8 bits plus the VA range
4245 select bit (55). Below we try to fetch information about pointer
4246 authentication masks in order to make non-address removal more
4247 precise. */
4248 CORE_ADDR mask = AARCH64_TOP_BITS_MASK;
4250 /* Check if we have an inferior first. If not, just use the default
4251 mask.
4253 We use the inferior_ptid here because the pointer authentication masks
4254 should be the same across threads of a process. Since we may not have
4255 access to the current thread (gdb may have switched to no inferiors
4256 momentarily), we use the inferior ptid. */
4257 if (inferior_ptid != null_ptid)
4259 /* If we do have an inferior, attempt to fetch its thread's thread_info
4260 struct. */
4261 thread_info *thread = current_inferior ()->find_thread (inferior_ptid);
4263 /* If the thread is running, we will not be able to fetch the mask
4264 registers. */
4265 if (thread != nullptr && thread->state != THREAD_RUNNING)
4267 /* Otherwise, fetch the register cache and the masks. */
4268 struct regcache *regs
4269 = get_thread_regcache (current_inferior ()->process_target (),
4270 inferior_ptid);
4272 /* Use the gdbarch from the register cache to check for pointer
4273 authentication support, as it matches the features found in
4274 that particular thread. */
4275 aarch64_gdbarch_tdep *tdep
4276 = gdbarch_tdep<aarch64_gdbarch_tdep> (regs->arch ());
4278 /* Is there pointer authentication support? */
4279 if (tdep->has_pauth ())
4281 CORE_ADDR cmask, dmask;
4282 int dmask_regnum
4283 = AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base);
4284 int cmask_regnum
4285 = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
4287 /* If we have a kernel address and we have kernel-mode address
4288 mask registers, use those instead. */
4289 if (tdep->pauth_reg_count > 2
4290 && pointer & VA_RANGE_SELECT_BIT_MASK)
4292 dmask_regnum
4293 = AARCH64_PAUTH_DMASK_HIGH_REGNUM (tdep->pauth_reg_base);
4294 cmask_regnum
4295 = AARCH64_PAUTH_CMASK_HIGH_REGNUM (tdep->pauth_reg_base);
4298 /* We have both a code mask and a data mask. For now they are
4299 the same, but this may change in the future. */
4300 if (regs->cooked_read (dmask_regnum, &dmask) != REG_VALID)
4301 dmask = mask;
4303 if (regs->cooked_read (cmask_regnum, &cmask) != REG_VALID)
4304 cmask = mask;
4306 mask |= aarch64_mask_from_pac_registers (cmask, dmask);
4311 return aarch64_remove_top_bits (pointer, mask);
4314 /* Given NAMES, a vector of strings, initialize it with all the SME
4315 pseudo-register names for the current streaming vector length. */
4317 static void
4318 aarch64_initialize_sme_pseudo_names (struct gdbarch *gdbarch,
4319 std::vector<std::string> &names)
4321 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
4323 gdb_assert (tdep->has_sme ());
4324 gdb_assert (tdep->sme_tile_slice_pseudo_base > 0);
4325 gdb_assert (tdep->sme_tile_pseudo_base > 0);
4327 for (int i = 0; i < tdep->sme_tile_slice_pseudo_count; i++)
4329 int regnum = tdep->sme_tile_slice_pseudo_base + i;
4330 struct za_pseudo_encoding encoding;
4331 aarch64_za_decode_pseudos (gdbarch, regnum, encoding);
4332 names.push_back (aarch64_za_tile_slice_name (encoding));
4334 for (int i = 0; i < AARCH64_ZA_TILES_NUM; i++)
4336 int regnum = tdep->sme_tile_pseudo_base + i;
4337 struct za_pseudo_encoding encoding;
4338 aarch64_za_decode_pseudos (gdbarch, regnum, encoding);
4339 names.push_back (aarch64_za_tile_name (encoding));
4343 /* Initialize the current architecture based on INFO. If possible,
4344 re-use an architecture from ARCHES, which is a list of
4345 architectures already created during this debugging session.
4347 Called e.g. at program startup, when reading a core file, and when
4348 reading a binary file. */
4350 static struct gdbarch *
4351 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
4353 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
4354 const struct tdesc_feature *feature_pauth;
4355 bool valid_p = true;
4356 int i, num_regs = 0, num_pseudo_regs = 0;
4357 int first_pauth_regnum = -1, ra_sign_state_offset = -1;
4358 int first_mte_regnum = -1, first_tls_regnum = -1;
4359 uint64_t vq = aarch64_get_tdesc_vq (info.target_desc);
4360 uint64_t svq = aarch64_get_tdesc_svq (info.target_desc);
4362 if (vq > AARCH64_MAX_SVE_VQ)
4363 internal_error (_("VQ out of bounds: %s (max %d)"),
4364 pulongest (vq), AARCH64_MAX_SVE_VQ);
4366 if (svq > AARCH64_MAX_SVE_VQ)
4367 internal_error (_("Streaming vector quotient (svq) out of bounds: %s"
4368 " (max %d)"),
4369 pulongest (svq), AARCH64_MAX_SVE_VQ);
4371 /* If there is already a candidate, use it. */
4372 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
4373 best_arch != nullptr;
4374 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
4376 aarch64_gdbarch_tdep *tdep
4377 = gdbarch_tdep<aarch64_gdbarch_tdep> (best_arch->gdbarch);
4378 if (tdep && tdep->vq == vq && tdep->sme_svq == svq)
4379 return best_arch->gdbarch;
4382 /* Ensure we always have a target descriptor, and that it is for the given VQ
4383 value. */
4384 const struct target_desc *tdesc = info.target_desc;
4385 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc)
4386 || svq != aarch64_get_tdesc_svq (tdesc))
4388 aarch64_features features;
4389 features.vq = vq;
4390 features.svq = svq;
4391 tdesc = aarch64_read_description (features);
4393 gdb_assert (tdesc);
4395 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
4396 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
4397 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
4398 const struct tdesc_feature *feature_mte
4399 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte");
4400 const struct tdesc_feature *feature_tls
4401 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls");
4403 if (feature_core == nullptr)
4404 return nullptr;
4406 tdesc_arch_data_up tdesc_data = tdesc_data_alloc ();
4408 /* Validate the description provides the mandatory core R registers
4409 and allocate their numbers. */
4410 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
4411 valid_p &= tdesc_numbered_register (feature_core, tdesc_data.get (),
4412 AARCH64_X0_REGNUM + i,
4413 aarch64_r_register_names[i]);
4415 num_regs = AARCH64_X0_REGNUM + i;
4417 /* Add the V registers. */
4418 if (feature_fpu != nullptr)
4420 if (feature_sve != nullptr)
4421 error (_("Program contains both fpu and SVE features."));
4423 /* Validate the description provides the mandatory V registers
4424 and allocate their numbers. */
4425 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
4426 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data.get (),
4427 AARCH64_V0_REGNUM + i,
4428 aarch64_v_register_names[i]);
4430 num_regs = AARCH64_V0_REGNUM + i;
4433 /* Add the SVE registers. */
4434 if (feature_sve != nullptr)
4436 /* Validate the description provides the mandatory SVE registers
4437 and allocate their numbers. */
4438 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
4439 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data.get (),
4440 AARCH64_SVE_Z0_REGNUM + i,
4441 aarch64_sve_register_names[i]);
4443 num_regs = AARCH64_SVE_Z0_REGNUM + i;
4444 num_pseudo_regs += 32; /* add the Vn register pseudos. */
4447 if (feature_fpu != nullptr || feature_sve != nullptr)
4449 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
4450 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
4451 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
4452 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
4453 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
4456 int first_sme_regnum = -1;
4457 int first_sme2_regnum = -1;
4458 int first_sme_pseudo_regnum = -1;
4459 const struct tdesc_feature *feature_sme
4460 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sme");
4461 if (feature_sme != nullptr)
4463 /* Record the first SME register. */
4464 first_sme_regnum = num_regs;
4466 valid_p &= tdesc_numbered_register (feature_sme, tdesc_data.get (),
4467 num_regs++, "svg");
4469 valid_p &= tdesc_numbered_register (feature_sme, tdesc_data.get (),
4470 num_regs++, "svcr");
4472 valid_p &= tdesc_numbered_register (feature_sme, tdesc_data.get (),
4473 num_regs++, "za");
4475 /* Record the first SME pseudo register. */
4476 first_sme_pseudo_regnum = num_pseudo_regs;
4478 /* Add the ZA tile slice pseudo registers. The number of tile slice
4479 pseudo-registers depend on the svl, and is always a multiple of 5. */
4480 num_pseudo_regs += (svq << 5) * 5;
4482 /* Add the ZA tile pseudo registers. */
4483 num_pseudo_regs += AARCH64_ZA_TILES_NUM;
4485 /* Now check for the SME2 feature. SME2 is only available if SME is
4486 available. */
4487 const struct tdesc_feature *feature_sme2
4488 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sme2");
4489 if (feature_sme2 != nullptr)
4491 /* Record the first SME2 register. */
4492 first_sme2_regnum = num_regs;
4494 valid_p &= tdesc_numbered_register (feature_sme2, tdesc_data.get (),
4495 num_regs++, "zt0");
4499 /* Add the TLS register. */
4500 int tls_register_count = 0;
4501 if (feature_tls != nullptr)
4503 first_tls_regnum = num_regs;
4505 /* Look for the TLS registers. tpidr is required, but tpidr2 is
4506 optional. */
4507 valid_p
4508 = tdesc_numbered_register (feature_tls, tdesc_data.get (),
4509 first_tls_regnum, "tpidr");
4511 if (valid_p)
4513 tls_register_count++;
4515 bool has_tpidr2
4516 = tdesc_numbered_register (feature_tls, tdesc_data.get (),
4517 first_tls_regnum + tls_register_count,
4518 "tpidr2");
4520 /* Figure out how many TLS registers we have. */
4521 if (has_tpidr2)
4522 tls_register_count++;
4524 num_regs += tls_register_count;
4526 else
4528 warning (_("Provided TLS register feature doesn't contain "
4529 "required tpidr register."));
4530 return nullptr;
4534 /* We have two versions of the pauth target description due to a past bug
4535 where GDB would crash when seeing the first version of the pauth target
4536 description. */
4537 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
4538 if (feature_pauth == nullptr)
4539 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth_v2");
4541 /* Add the pauth registers. */
4542 int pauth_masks = 0;
4543 if (feature_pauth != NULL)
4545 first_pauth_regnum = num_regs;
4546 ra_sign_state_offset = num_pseudo_regs;
4548 /* Size of the expected register set with all 4 masks. */
4549 int set_size = ARRAY_SIZE (aarch64_pauth_register_names);
4551 /* QEMU exposes a couple additional masks for the high half of the
4552 address. We should either have 2 registers or 4 registers. */
4553 if (tdesc_unnumbered_register (feature_pauth,
4554 "pauth_dmask_high") == 0)
4556 /* We did not find pauth_dmask_high, assume we only have
4557 2 masks. We are not dealing with QEMU/Emulators then. */
4558 set_size -= 2;
4561 /* Validate the descriptor provides the mandatory PAUTH registers and
4562 allocate their numbers. */
4563 for (i = 0; i < set_size; i++)
4564 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data.get (),
4565 first_pauth_regnum + i,
4566 aarch64_pauth_register_names[i]);
4568 num_regs += i;
4569 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
4570 pauth_masks = set_size;
4573 /* Add the MTE registers. */
4574 if (feature_mte != NULL)
4576 first_mte_regnum = num_regs;
4577 /* Validate the descriptor provides the mandatory MTE registers and
4578 allocate their numbers. */
4579 for (i = 0; i < ARRAY_SIZE (aarch64_mte_register_names); i++)
4580 valid_p &= tdesc_numbered_register (feature_mte, tdesc_data.get (),
4581 first_mte_regnum + i,
4582 aarch64_mte_register_names[i]);
4584 num_regs += i;
4586 /* W pseudo-registers */
4587 int first_w_regnum = num_pseudo_regs;
4588 num_pseudo_regs += 31;
4590 if (!valid_p)
4591 return nullptr;
4593 /* AArch64 code is always little-endian. */
4594 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
4596 gdbarch *gdbarch
4597 = gdbarch_alloc (&info, gdbarch_tdep_up (new aarch64_gdbarch_tdep));
4598 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
4600 /* This should be low enough for everything. */
4601 tdep->lowest_pc = 0x20;
4602 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
4603 tdep->jb_elt_size = 8;
4604 tdep->vq = vq;
4605 tdep->pauth_reg_base = first_pauth_regnum;
4606 tdep->pauth_reg_count = pauth_masks;
4607 tdep->ra_sign_state_regnum = -1;
4608 tdep->mte_reg_base = first_mte_regnum;
4609 tdep->tls_regnum_base = first_tls_regnum;
4610 tdep->tls_register_count = tls_register_count;
4612 /* Set the SME register set details. The pseudo-registers will be adjusted
4613 later. */
4614 tdep->sme_reg_base = first_sme_regnum;
4615 tdep->sme_svg_regnum = first_sme_regnum;
4616 tdep->sme_svcr_regnum = first_sme_regnum + 1;
4617 tdep->sme_za_regnum = first_sme_regnum + 2;
4618 tdep->sme_svq = svq;
4620 /* Set the SME2 register set details. */
4621 tdep->sme2_zt0_regnum = first_sme2_regnum;
4623 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
4624 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
4626 /* Advance PC across function entry code. */
4627 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
4629 /* The stack grows downward. */
4630 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
4632 /* Breakpoint manipulation. */
4633 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
4634 aarch64_breakpoint::kind_from_pc);
4635 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
4636 aarch64_breakpoint::bp_from_kind);
4637 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
4638 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
4640 /* Information about registers, etc. */
4641 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
4642 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
4643 set_gdbarch_num_regs (gdbarch, num_regs);
4645 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
4646 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
4647 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
4648 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
4649 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
4650 set_tdesc_pseudo_register_reggroup_p (gdbarch,
4651 aarch64_pseudo_register_reggroup_p);
4652 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
4654 /* Set the allocation tag granule size to 16 bytes. */
4655 set_gdbarch_memtag_granule_size (gdbarch, AARCH64_MTE_GRANULE_SIZE);
4657 /* Register a hook for checking if there is a memory tag match. */
4658 set_gdbarch_memtag_matches_p (gdbarch, aarch64_memtag_matches_p);
4660 /* Register a hook for setting the logical/allocation tags for
4661 a range of addresses. */
4662 set_gdbarch_set_memtags (gdbarch, aarch64_set_memtags);
4664 /* Register a hook for extracting the logical/allocation tag from an
4665 address. */
4666 set_gdbarch_get_memtag (gdbarch, aarch64_get_memtag);
4668 /* Register a hook for converting a memory tag to a string. */
4669 set_gdbarch_memtag_to_string (gdbarch, aarch64_memtag_to_string);
4671 /* ABI */
4672 set_gdbarch_short_bit (gdbarch, 16);
4673 set_gdbarch_int_bit (gdbarch, 32);
4674 set_gdbarch_float_bit (gdbarch, 32);
4675 set_gdbarch_double_bit (gdbarch, 64);
4676 set_gdbarch_long_double_bit (gdbarch, 128);
4677 set_gdbarch_long_bit (gdbarch, 64);
4678 set_gdbarch_long_long_bit (gdbarch, 64);
4679 set_gdbarch_ptr_bit (gdbarch, 64);
4680 set_gdbarch_char_signed (gdbarch, 0);
4681 set_gdbarch_wchar_signed (gdbarch, 0);
4682 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
4683 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
4684 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_quad);
4685 set_gdbarch_type_align (gdbarch, aarch64_type_align);
4687 /* Detect whether PC is at a point where the stack has been destroyed. */
4688 set_gdbarch_stack_frame_destroyed_p (gdbarch, aarch64_stack_frame_destroyed_p);
4690 /* Internal <-> external register number maps. */
4691 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
4693 /* Returning results. */
4694 set_gdbarch_return_value_as_value (gdbarch, aarch64_return_value);
4696 /* Disassembly. */
4697 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
4699 /* Virtual tables. */
4700 set_gdbarch_vbit_in_delta (gdbarch, 1);
4702 /* Hook in the ABI-specific overrides, if they have been registered. */
4703 info.target_desc = tdesc;
4704 info.tdesc_data = tdesc_data.get ();
4705 gdbarch_init_osabi (info, gdbarch);
4707 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
4708 /* Register DWARF CFA vendor handler. */
4709 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
4710 aarch64_execute_dwarf_cfa_vendor_op);
4712 /* Permanent/Program breakpoint handling. */
4713 set_gdbarch_program_breakpoint_here_p (gdbarch,
4714 aarch64_program_breakpoint_here_p);
4716 /* Add some default predicates. */
4717 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
4718 dwarf2_append_unwinders (gdbarch);
4719 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
4721 frame_base_set_default (gdbarch, &aarch64_normal_base);
4723 /* Now we have tuned the configuration, set a few final things,
4724 based on what the OS ABI has told us. */
4726 if (tdep->jb_pc >= 0)
4727 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
4729 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
4731 set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
4733 tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data));
4735 /* Fetch the updated number of registers after we're done adding all
4736 entries from features we don't explicitly care about. This is the case
4737 for bare metal debugging stubs that include a lot of system registers. */
4738 num_regs = gdbarch_num_regs (gdbarch);
4740 /* With the number of real registers updated, setup the pseudo-registers and
4741 record their numbers. */
4743 /* Setup W pseudo-register numbers. */
4744 tdep->w_pseudo_base = first_w_regnum + num_regs;
4745 tdep->w_pseudo_count = 31;
4747 /* Pointer authentication pseudo-registers. */
4748 if (tdep->has_pauth ())
4749 tdep->ra_sign_state_regnum = ra_sign_state_offset + num_regs;
4751 /* Architecture hook to remove bits of a pointer that are not part of the
4752 address, like memory tags (MTE) and pointer authentication signatures.
4753 Configure address adjustment for watchpoints, breakpoints and memory
4754 transfer. */
4755 set_gdbarch_remove_non_address_bits_watchpoint
4756 (gdbarch, aarch64_remove_non_address_bits);
4757 set_gdbarch_remove_non_address_bits_breakpoint
4758 (gdbarch, aarch64_remove_non_address_bits);
4759 set_gdbarch_remove_non_address_bits_memory
4760 (gdbarch, aarch64_remove_non_address_bits);
4762 /* SME pseudo-registers. */
4763 if (tdep->has_sme ())
4765 tdep->sme_pseudo_base = num_regs + first_sme_pseudo_regnum;
4766 tdep->sme_tile_slice_pseudo_base = tdep->sme_pseudo_base;
4767 tdep->sme_tile_slice_pseudo_count = (svq * 32) * 5;
4768 tdep->sme_tile_pseudo_base
4769 = tdep->sme_pseudo_base + tdep->sme_tile_slice_pseudo_count;
4770 tdep->sme_pseudo_count
4771 = tdep->sme_tile_slice_pseudo_count + AARCH64_ZA_TILES_NUM;
4773 /* The SME ZA pseudo-registers are a set of 160 to 2560 pseudo-registers
4774 depending on the value of svl.
4776 The tile pseudo-registers are organized around their qualifiers
4777 (b, h, s, d and q). Their numbers are distributed as follows:
4780 h 1~2
4781 s 3~6
4782 d 7~14
4783 q 15~30
4785 The naming of the tile pseudo-registers follows the pattern za<t><q>,
4786 where:
4788 <t> is the tile number, with the following possible values based on
4789 the qualifiers:
4791 Qualifier - Allocated indexes
4793 b - 0
4794 h - 0~1
4795 s - 0~3
4796 d - 0~7
4797 q - 0~15
4799 <q> is the qualifier: b, h, s, d and q.
4801 The tile slice pseudo-registers are organized around their
4802 qualifiers as well (b, h, s, d and q), but also around their
4803 direction (h - horizontal and v - vertical).
4805 Even-numbered tile slice pseudo-registers are horizontally-oriented
4806 and odd-numbered tile slice pseudo-registers are vertically-oriented.
4808 Their numbers are distributed as follows:
4810 Qualifier - Allocated indexes
4812 b tile slices - 0~511
4813 h tile slices - 512~1023
4814 s tile slices - 1024~1535
4815 d tile slices - 1536~2047
4816 q tile slices - 2048~2559
4818 The naming of the tile slice pseudo-registers follows the pattern
4819 za<t><d><q><s>, where:
4821 <t> is the tile number as described for the tile pseudo-registers.
4822 <d> is the direction of the tile slice (h or v)
4823 <q> is the qualifier of the tile slice (b, h, s, d or q)
4824 <s> is the slice number, defined as follows:
4826 Qualifier - Allocated indexes
4828 b - 0~15
4829 h - 0~7
4830 s - 0~3
4831 d - 0~1
4832 q - 0
4834 We have helper functions to translate to/from register index from/to
4835 the set of fields that make the pseudo-register names. */
4837 /* Build the array of pseudo-register names available for this
4838 particular gdbarch configuration. */
4839 aarch64_initialize_sme_pseudo_names (gdbarch, tdep->sme_pseudo_names);
4842 /* Add standard register aliases. */
4843 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
4844 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
4845 value_of_aarch64_user_reg,
4846 &aarch64_register_aliases[i].regnum);
4848 register_aarch64_ravenscar_ops (gdbarch);
4850 return gdbarch;
4853 static void
4854 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
4856 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
4858 if (tdep == NULL)
4859 return;
4861 gdb_printf (file, _("aarch64_dump_tdep: Lowest pc = 0x%s\n"),
4862 paddress (gdbarch, tdep->lowest_pc));
4864 /* SME fields. */
4865 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_q = %s\n"),
4866 host_address_to_string (tdep->sme_tile_type_q));
4867 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_d = %s\n"),
4868 host_address_to_string (tdep->sme_tile_type_d));
4869 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_s = %s\n"),
4870 host_address_to_string (tdep->sme_tile_type_s));
4871 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_h = %s\n"),
4872 host_address_to_string (tdep->sme_tile_type_h));
4873 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_n = %s\n"),
4874 host_address_to_string (tdep->sme_tile_type_b));
4875 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_q = %s\n"),
4876 host_address_to_string (tdep->sme_tile_slice_type_q));
4877 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_d = %s\n"),
4878 host_address_to_string (tdep->sme_tile_slice_type_d));
4879 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_s = %s\n"),
4880 host_address_to_string (tdep->sme_tile_slice_type_s));
4881 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_h = %s\n"),
4882 host_address_to_string (tdep->sme_tile_slice_type_h));
4883 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_b = %s\n"),
4884 host_address_to_string (tdep->sme_tile_slice_type_b));
4885 gdb_printf (file, _("aarch64_dump_tdep: sme_reg_base = %s\n"),
4886 pulongest (tdep->sme_reg_base));
4887 gdb_printf (file, _("aarch64_dump_tdep: sme_svg_regnum = %s\n"),
4888 pulongest (tdep->sme_svg_regnum));
4889 gdb_printf (file, _("aarch64_dump_tdep: sme_svcr_regnum = %s\n"),
4890 pulongest (tdep->sme_svcr_regnum));
4891 gdb_printf (file, _("aarch64_dump_tdep: sme_za_regnum = %s\n"),
4892 pulongest (tdep->sme_za_regnum));
4893 gdb_printf (file, _("aarch64_dump_tdep: sme_pseudo_base = %s\n"),
4894 pulongest (tdep->sme_pseudo_base));
4895 gdb_printf (file, _("aarch64_dump_tdep: sme_pseudo_count = %s\n"),
4896 pulongest (tdep->sme_pseudo_count));
4897 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_pseudo_base = %s\n"),
4898 pulongest (tdep->sme_tile_slice_pseudo_base));
4899 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_pseudo_count = %s\n"),
4900 pulongest (tdep->sme_tile_slice_pseudo_count));
4901 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_pseudo_base = %s\n"),
4902 pulongest (tdep->sme_tile_pseudo_base));
4903 gdb_printf (file, _("aarch64_dump_tdep: sme_svq = %s\n"),
4904 pulongest (tdep->sme_svq));
4907 #if GDB_SELF_TEST
4908 namespace selftests
4910 static void aarch64_process_record_test (void);
4912 #endif
4914 void _initialize_aarch64_tdep ();
4915 void
4916 _initialize_aarch64_tdep ()
4918 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
4919 aarch64_dump_tdep);
4921 /* Debug this file's internals. */
4922 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
4923 Set AArch64 debugging."), _("\
4924 Show AArch64 debugging."), _("\
4925 When on, AArch64 specific debugging is enabled."),
4926 NULL,
4927 show_aarch64_debug,
4928 &setdebuglist, &showdebuglist);
4930 #if GDB_SELF_TEST
4931 selftests::register_test ("aarch64-analyze-prologue",
4932 selftests::aarch64_analyze_prologue_test);
4933 selftests::register_test ("aarch64-process-record",
4934 selftests::aarch64_process_record_test);
4935 #endif
4938 /* AArch64 process record-replay related structures, defines etc. */
4940 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
4941 do \
4943 unsigned int reg_len = LENGTH; \
4944 if (reg_len) \
4946 REGS = XNEWVEC (uint32_t, reg_len); \
4947 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
4950 while (0)
4952 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
4953 do \
4955 unsigned int mem_len = LENGTH; \
4956 if (mem_len) \
4958 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
4959 memcpy(MEMS, &RECORD_BUF[0], \
4960 sizeof(struct aarch64_mem_r) * LENGTH); \
4963 while (0)
4965 /* AArch64 record/replay structures and enumerations. */
4967 struct aarch64_mem_r
4969 uint64_t len; /* Record length. */
4970 uint64_t addr; /* Memory address. */
4973 enum aarch64_record_result
4975 AARCH64_RECORD_SUCCESS,
4976 AARCH64_RECORD_UNSUPPORTED,
4977 AARCH64_RECORD_UNKNOWN
4980 struct aarch64_insn_decode_record
4982 struct gdbarch *gdbarch;
4983 struct regcache *regcache;
4984 CORE_ADDR this_addr; /* Address of insn to be recorded. */
4985 uint32_t aarch64_insn; /* Insn to be recorded. */
4986 uint32_t mem_rec_count; /* Count of memory records. */
4987 uint32_t reg_rec_count; /* Count of register records. */
4988 uint32_t *aarch64_regs; /* Registers to be recorded. */
4989 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
4992 /* Record handler for data processing - register instructions. */
4994 static unsigned int
4995 aarch64_record_data_proc_reg (aarch64_insn_decode_record *aarch64_insn_r)
4997 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
4998 uint32_t record_buf[4];
5000 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
5001 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
5002 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
5004 if (!bit (aarch64_insn_r->aarch64_insn, 28))
5006 uint8_t setflags;
5008 /* Logical (shifted register). */
5009 if (insn_bits24_27 == 0x0a)
5010 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
5011 /* Add/subtract. */
5012 else if (insn_bits24_27 == 0x0b)
5013 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
5014 else
5015 return AARCH64_RECORD_UNKNOWN;
5017 record_buf[0] = reg_rd;
5018 aarch64_insn_r->reg_rec_count = 1;
5019 if (setflags)
5020 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
5022 else
5024 if (insn_bits24_27 == 0x0b)
5026 /* Data-processing (3 source). */
5027 record_buf[0] = reg_rd;
5028 aarch64_insn_r->reg_rec_count = 1;
5030 else if (insn_bits24_27 == 0x0a)
5032 if (insn_bits21_23 == 0x00)
5034 /* Add/subtract (with carry). */
5035 record_buf[0] = reg_rd;
5036 aarch64_insn_r->reg_rec_count = 1;
5037 if (bit (aarch64_insn_r->aarch64_insn, 29))
5039 record_buf[1] = AARCH64_CPSR_REGNUM;
5040 aarch64_insn_r->reg_rec_count = 2;
5043 else if (insn_bits21_23 == 0x02)
5045 /* Conditional compare (register) and conditional compare
5046 (immediate) instructions. */
5047 record_buf[0] = AARCH64_CPSR_REGNUM;
5048 aarch64_insn_r->reg_rec_count = 1;
5050 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
5052 /* Conditional select. */
5053 /* Data-processing (2 source). */
5054 /* Data-processing (1 source). */
5055 record_buf[0] = reg_rd;
5056 aarch64_insn_r->reg_rec_count = 1;
5058 else
5059 return AARCH64_RECORD_UNKNOWN;
5063 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
5064 record_buf);
5065 return AARCH64_RECORD_SUCCESS;
5068 /* Record handler for data processing - immediate instructions. */
5070 static unsigned int
5071 aarch64_record_data_proc_imm (aarch64_insn_decode_record *aarch64_insn_r)
5073 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
5074 uint32_t record_buf[4];
5076 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
5077 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
5078 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
5080 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
5081 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
5082 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
5084 record_buf[0] = reg_rd;
5085 aarch64_insn_r->reg_rec_count = 1;
5087 else if (insn_bits24_27 == 0x01)
5089 /* Add/Subtract (immediate). */
5090 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
5091 record_buf[0] = reg_rd;
5092 aarch64_insn_r->reg_rec_count = 1;
5093 if (setflags)
5094 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
5096 else if (insn_bits24_27 == 0x02 && !insn_bit23)
5098 /* Logical (immediate). */
5099 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
5100 record_buf[0] = reg_rd;
5101 aarch64_insn_r->reg_rec_count = 1;
5102 if (setflags)
5103 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
5105 else
5106 return AARCH64_RECORD_UNKNOWN;
5108 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
5109 record_buf);
5110 return AARCH64_RECORD_SUCCESS;
5113 /* Record handler for branch, exception generation and system instructions. */
5115 static unsigned int
5116 aarch64_record_branch_except_sys (aarch64_insn_decode_record *aarch64_insn_r)
5119 aarch64_gdbarch_tdep *tdep
5120 = gdbarch_tdep<aarch64_gdbarch_tdep> (aarch64_insn_r->gdbarch);
5121 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
5122 uint32_t record_buf[4];
5124 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
5125 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
5126 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
5128 if (insn_bits28_31 == 0x0d)
5130 /* Exception generation instructions. */
5131 if (insn_bits24_27 == 0x04)
5133 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
5134 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
5135 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
5137 ULONGEST svc_number;
5139 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
5140 &svc_number);
5141 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
5142 svc_number);
5144 else
5145 return AARCH64_RECORD_UNSUPPORTED;
5147 /* System instructions. */
5148 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
5150 uint32_t reg_rt, reg_crn;
5152 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
5153 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
5155 /* Record rt in case of sysl and mrs instructions. */
5156 if (bit (aarch64_insn_r->aarch64_insn, 21))
5158 record_buf[0] = reg_rt;
5159 aarch64_insn_r->reg_rec_count = 1;
5161 /* Record cpsr for hint and msr(immediate) instructions. */
5162 else if (reg_crn == 0x02 || reg_crn == 0x04)
5164 record_buf[0] = AARCH64_CPSR_REGNUM;
5165 aarch64_insn_r->reg_rec_count = 1;
5168 /* Unconditional branch (register). */
5169 else if((insn_bits24_27 & 0x0e) == 0x06)
5171 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
5172 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
5173 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
5175 else
5176 return AARCH64_RECORD_UNKNOWN;
5178 /* Unconditional branch (immediate). */
5179 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
5181 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
5182 if (bit (aarch64_insn_r->aarch64_insn, 31))
5183 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
5185 else
5186 /* Compare & branch (immediate), Test & branch (immediate) and
5187 Conditional branch (immediate). */
5188 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
5190 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
5191 record_buf);
5192 return AARCH64_RECORD_SUCCESS;
5195 /* Record handler for advanced SIMD load and store instructions. */
5197 static unsigned int
5198 aarch64_record_asimd_load_store (aarch64_insn_decode_record *aarch64_insn_r)
5200 CORE_ADDR address;
5201 uint64_t addr_offset = 0;
5202 uint32_t record_buf[24];
5203 uint64_t record_buf_mem[24];
5204 uint32_t reg_rn, reg_rt;
5205 uint32_t reg_index = 0, mem_index = 0;
5206 uint8_t opcode_bits, size_bits;
5208 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
5209 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
5210 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
5211 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
5212 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
5214 if (record_debug)
5215 debug_printf ("Process record: Advanced SIMD load/store\n");
5217 /* Load/store single structure. */
5218 if (bit (aarch64_insn_r->aarch64_insn, 24))
5220 uint8_t sindex, scale, selem, esize, replicate = 0;
5221 scale = opcode_bits >> 2;
5222 selem = ((opcode_bits & 0x02) |
5223 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
5224 switch (scale)
5226 case 1:
5227 if (size_bits & 0x01)
5228 return AARCH64_RECORD_UNKNOWN;
5229 break;
5230 case 2:
5231 if ((size_bits >> 1) & 0x01)
5232 return AARCH64_RECORD_UNKNOWN;
5233 if (size_bits & 0x01)
5235 if (!((opcode_bits >> 1) & 0x01))
5236 scale = 3;
5237 else
5238 return AARCH64_RECORD_UNKNOWN;
5240 break;
5241 case 3:
5242 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
5244 scale = size_bits;
5245 replicate = 1;
5246 break;
5248 else
5249 return AARCH64_RECORD_UNKNOWN;
5250 default:
5251 break;
5253 esize = 8 << scale;
5254 if (replicate)
5255 for (sindex = 0; sindex < selem; sindex++)
5257 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
5258 reg_rt = (reg_rt + 1) % 32;
5260 else
5262 for (sindex = 0; sindex < selem; sindex++)
5264 if (bit (aarch64_insn_r->aarch64_insn, 22))
5265 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
5266 else
5268 record_buf_mem[mem_index++] = esize / 8;
5269 record_buf_mem[mem_index++] = address + addr_offset;
5271 addr_offset = addr_offset + (esize / 8);
5272 reg_rt = (reg_rt + 1) % 32;
5276 /* Load/store multiple structure. */
5277 else
5279 uint8_t selem, esize, rpt, elements;
5280 uint8_t eindex, rindex;
5282 esize = 8 << size_bits;
5283 if (bit (aarch64_insn_r->aarch64_insn, 30))
5284 elements = 128 / esize;
5285 else
5286 elements = 64 / esize;
5288 switch (opcode_bits)
5290 /*LD/ST4 (4 Registers). */
5291 case 0:
5292 rpt = 1;
5293 selem = 4;
5294 break;
5295 /*LD/ST1 (4 Registers). */
5296 case 2:
5297 rpt = 4;
5298 selem = 1;
5299 break;
5300 /*LD/ST3 (3 Registers). */
5301 case 4:
5302 rpt = 1;
5303 selem = 3;
5304 break;
5305 /*LD/ST1 (3 Registers). */
5306 case 6:
5307 rpt = 3;
5308 selem = 1;
5309 break;
5310 /*LD/ST1 (1 Register). */
5311 case 7:
5312 rpt = 1;
5313 selem = 1;
5314 break;
5315 /*LD/ST2 (2 Registers). */
5316 case 8:
5317 rpt = 1;
5318 selem = 2;
5319 break;
5320 /*LD/ST1 (2 Registers). */
5321 case 10:
5322 rpt = 2;
5323 selem = 1;
5324 break;
5325 default:
5326 return AARCH64_RECORD_UNSUPPORTED;
5327 break;
5329 for (rindex = 0; rindex < rpt; rindex++)
5330 for (eindex = 0; eindex < elements; eindex++)
5332 uint8_t reg_tt, sindex;
5333 reg_tt = (reg_rt + rindex) % 32;
5334 for (sindex = 0; sindex < selem; sindex++)
5336 if (bit (aarch64_insn_r->aarch64_insn, 22))
5337 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
5338 else
5340 record_buf_mem[mem_index++] = esize / 8;
5341 record_buf_mem[mem_index++] = address + addr_offset;
5343 addr_offset = addr_offset + (esize / 8);
5344 reg_tt = (reg_tt + 1) % 32;
5349 if (bit (aarch64_insn_r->aarch64_insn, 23))
5350 record_buf[reg_index++] = reg_rn;
5352 aarch64_insn_r->reg_rec_count = reg_index;
5353 aarch64_insn_r->mem_rec_count = mem_index / 2;
5354 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
5355 record_buf_mem);
5356 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
5357 record_buf);
5358 return AARCH64_RECORD_SUCCESS;
5361 /* Record handler for Memory Copy and Memory Set instructions. */
5363 static unsigned int
5364 aarch64_record_memcopy_memset (aarch64_insn_decode_record *aarch64_insn_r)
5366 if (record_debug)
5367 debug_printf ("Process record: memory copy and memory set\n");
5369 uint8_t op1 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
5370 uint8_t op2 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
5371 uint32_t reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
5372 uint32_t reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
5373 uint32_t record_buf[3];
5374 uint64_t record_buf_mem[4];
5376 if (op1 == 3 && op2 > 11)
5377 /* Unallocated instructions. */
5378 return AARCH64_RECORD_UNKNOWN;
5380 /* Set instructions have two registers and one memory region to be
5381 recorded. */
5382 record_buf[0] = reg_rd;
5383 record_buf[1] = reg_rn;
5384 aarch64_insn_r->reg_rec_count = 2;
5386 ULONGEST dest_addr;
5387 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rd, &dest_addr);
5389 LONGEST length;
5390 regcache_raw_read_signed (aarch64_insn_r->regcache, reg_rn, &length);
5392 /* In one of the algorithm options a processor can implement, the length
5393 in Rn has an inverted sign. */
5394 if (length < 0)
5395 length *= -1;
5397 record_buf_mem[0] = length;
5398 record_buf_mem[1] = dest_addr;
5399 aarch64_insn_r->mem_rec_count = 1;
5401 if (op1 != 3)
5403 /* Copy instructions have an additional register and an additional
5404 memory region to be recorded. */
5405 uint32_t reg_rs = bits (aarch64_insn_r->aarch64_insn, 16, 20);
5407 record_buf[2] = reg_rs;
5408 aarch64_insn_r->reg_rec_count++;
5410 ULONGEST source_addr;
5411 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rs,
5412 &source_addr);
5414 record_buf_mem[2] = length;
5415 record_buf_mem[3] = source_addr;
5416 aarch64_insn_r->mem_rec_count++;
5419 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
5420 record_buf_mem);
5421 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
5422 record_buf);
5423 return AARCH64_RECORD_SUCCESS;
5426 /* Record handler for load and store instructions. */
5428 static unsigned int
5429 aarch64_record_load_store (aarch64_insn_decode_record *aarch64_insn_r)
5431 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
5432 uint8_t insn_bit23, insn_bit21;
5433 uint8_t opc, size_bits, ld_flag, vector_flag;
5434 uint32_t reg_rn, reg_rt, reg_rt2;
5435 uint64_t datasize, offset;
5436 uint32_t record_buf[8];
5437 uint64_t record_buf_mem[8];
5438 CORE_ADDR address;
5440 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
5441 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
5442 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
5443 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
5444 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
5445 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
5446 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
5447 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
5448 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
5449 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
5450 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
5452 /* Load/store exclusive. */
5453 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
5455 if (record_debug)
5456 debug_printf ("Process record: load/store exclusive\n");
5458 if (ld_flag)
5460 record_buf[0] = reg_rt;
5461 aarch64_insn_r->reg_rec_count = 1;
5462 if (insn_bit21)
5464 record_buf[1] = reg_rt2;
5465 aarch64_insn_r->reg_rec_count = 2;
5468 else
5470 if (insn_bit21)
5471 datasize = (8 << size_bits) * 2;
5472 else
5473 datasize = (8 << size_bits);
5474 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5475 &address);
5476 record_buf_mem[0] = datasize / 8;
5477 record_buf_mem[1] = address;
5478 aarch64_insn_r->mem_rec_count = 1;
5479 if (!insn_bit23)
5481 /* Save register rs. */
5482 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
5483 aarch64_insn_r->reg_rec_count = 1;
5487 /* Load register (literal) instructions decoding. */
5488 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
5490 if (record_debug)
5491 debug_printf ("Process record: load register (literal)\n");
5492 if (vector_flag)
5493 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
5494 else
5495 record_buf[0] = reg_rt;
5496 aarch64_insn_r->reg_rec_count = 1;
5498 /* All types of load/store pair instructions decoding. */
5499 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
5501 if (record_debug)
5502 debug_printf ("Process record: load/store pair\n");
5504 if (ld_flag)
5506 if (vector_flag)
5508 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
5509 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
5511 else
5513 record_buf[0] = reg_rt;
5514 record_buf[1] = reg_rt2;
5516 aarch64_insn_r->reg_rec_count = 2;
5518 else
5520 uint16_t imm7_off;
5521 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
5522 if (!vector_flag)
5523 size_bits = size_bits >> 1;
5524 datasize = 8 << (2 + size_bits);
5525 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
5526 offset = offset << (2 + size_bits);
5527 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5528 &address);
5529 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
5531 if (imm7_off & 0x40)
5532 address = address - offset;
5533 else
5534 address = address + offset;
5537 record_buf_mem[0] = datasize / 8;
5538 record_buf_mem[1] = address;
5539 record_buf_mem[2] = datasize / 8;
5540 record_buf_mem[3] = address + (datasize / 8);
5541 aarch64_insn_r->mem_rec_count = 2;
5543 if (bit (aarch64_insn_r->aarch64_insn, 23))
5544 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
5546 /* Load/store register (unsigned immediate) instructions. */
5547 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
5549 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
5550 if (!(opc >> 1))
5552 if (opc & 0x01)
5553 ld_flag = 0x01;
5554 else
5555 ld_flag = 0x0;
5557 else
5559 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
5561 /* PRFM (immediate) */
5562 return AARCH64_RECORD_SUCCESS;
5564 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
5566 /* LDRSW (immediate) */
5567 ld_flag = 0x1;
5569 else
5571 if (opc & 0x01)
5572 ld_flag = 0x01;
5573 else
5574 ld_flag = 0x0;
5578 if (record_debug)
5580 debug_printf ("Process record: load/store (unsigned immediate):"
5581 " size %x V %d opc %x\n", size_bits, vector_flag,
5582 opc);
5585 if (!ld_flag)
5587 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
5588 datasize = 8 << size_bits;
5589 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5590 &address);
5591 offset = offset << size_bits;
5592 address = address + offset;
5594 record_buf_mem[0] = datasize >> 3;
5595 record_buf_mem[1] = address;
5596 aarch64_insn_r->mem_rec_count = 1;
5598 else
5600 if (vector_flag)
5601 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
5602 else
5603 record_buf[0] = reg_rt;
5604 aarch64_insn_r->reg_rec_count = 1;
5607 /* Load/store register (register offset) instructions. */
5608 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
5609 && insn_bits10_11 == 0x02 && insn_bit21)
5611 if (record_debug)
5612 debug_printf ("Process record: load/store (register offset)\n");
5613 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
5614 if (!(opc >> 1))
5615 if (opc & 0x01)
5616 ld_flag = 0x01;
5617 else
5618 ld_flag = 0x0;
5619 else
5620 if (size_bits != 0x03)
5621 ld_flag = 0x01;
5622 else
5623 return AARCH64_RECORD_UNKNOWN;
5625 if (!ld_flag)
5627 ULONGEST reg_rm_val;
5629 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
5630 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
5631 if (bit (aarch64_insn_r->aarch64_insn, 12))
5632 offset = reg_rm_val << size_bits;
5633 else
5634 offset = reg_rm_val;
5635 datasize = 8 << size_bits;
5636 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5637 &address);
5638 address = address + offset;
5639 record_buf_mem[0] = datasize >> 3;
5640 record_buf_mem[1] = address;
5641 aarch64_insn_r->mem_rec_count = 1;
5643 else
5645 if (vector_flag)
5646 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
5647 else
5648 record_buf[0] = reg_rt;
5649 aarch64_insn_r->reg_rec_count = 1;
5652 /* Load/store register (immediate and unprivileged) instructions. */
5653 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
5654 && !insn_bit21)
5656 if (record_debug)
5658 debug_printf ("Process record: load/store "
5659 "(immediate and unprivileged)\n");
5661 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
5662 if (!(opc >> 1))
5663 if (opc & 0x01)
5664 ld_flag = 0x01;
5665 else
5666 ld_flag = 0x0;
5667 else
5668 if (size_bits != 0x03)
5669 ld_flag = 0x01;
5670 else
5671 return AARCH64_RECORD_UNKNOWN;
5673 if (!ld_flag)
5675 uint16_t imm9_off;
5676 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
5677 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
5678 datasize = 8 << size_bits;
5679 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5680 &address);
5681 if (insn_bits10_11 != 0x01)
5683 if (imm9_off & 0x0100)
5684 address = address - offset;
5685 else
5686 address = address + offset;
5688 record_buf_mem[0] = datasize >> 3;
5689 record_buf_mem[1] = address;
5690 aarch64_insn_r->mem_rec_count = 1;
5692 else
5694 if (vector_flag)
5695 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
5696 else
5697 record_buf[0] = reg_rt;
5698 aarch64_insn_r->reg_rec_count = 1;
5700 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
5701 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
5703 /* Memory Copy and Memory Set instructions. */
5704 else if ((insn_bits24_27 & 1) == 1 && insn_bits28_29 == 1
5705 && insn_bits10_11 == 1 && !insn_bit21)
5706 return aarch64_record_memcopy_memset (aarch64_insn_r);
5707 /* Advanced SIMD load/store instructions. */
5708 else
5709 return aarch64_record_asimd_load_store (aarch64_insn_r);
5711 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
5712 record_buf_mem);
5713 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
5714 record_buf);
5715 return AARCH64_RECORD_SUCCESS;
5718 /* Record handler for data processing SIMD and floating point instructions. */
5720 static unsigned int
5721 aarch64_record_data_proc_simd_fp (aarch64_insn_decode_record *aarch64_insn_r)
5723 uint8_t insn_bit21, opcode, rmode, reg_rd;
5724 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
5725 uint8_t insn_bits11_14;
5726 uint32_t record_buf[2];
5728 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
5729 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
5730 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
5731 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
5732 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
5733 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
5734 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
5735 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
5736 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
5738 if (record_debug)
5739 debug_printf ("Process record: data processing SIMD/FP: ");
5741 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
5743 /* Floating point - fixed point conversion instructions. */
5744 if (!insn_bit21)
5746 if (record_debug)
5747 debug_printf ("FP - fixed point conversion");
5749 if ((opcode >> 1) == 0x0 && rmode == 0x03)
5750 record_buf[0] = reg_rd;
5751 else
5752 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5754 /* Floating point - conditional compare instructions. */
5755 else if (insn_bits10_11 == 0x01)
5757 if (record_debug)
5758 debug_printf ("FP - conditional compare");
5760 record_buf[0] = AARCH64_CPSR_REGNUM;
5762 /* Floating point - data processing (2-source) and
5763 conditional select instructions. */
5764 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
5766 if (record_debug)
5767 debug_printf ("FP - DP (2-source)");
5769 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5771 else if (insn_bits10_11 == 0x00)
5773 /* Floating point - immediate instructions. */
5774 if ((insn_bits12_15 & 0x01) == 0x01
5775 || (insn_bits12_15 & 0x07) == 0x04)
5777 if (record_debug)
5778 debug_printf ("FP - immediate");
5779 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5781 /* Floating point - compare instructions. */
5782 else if ((insn_bits12_15 & 0x03) == 0x02)
5784 if (record_debug)
5785 debug_printf ("FP - immediate");
5786 record_buf[0] = AARCH64_CPSR_REGNUM;
5788 /* Floating point - integer conversions instructions. */
5789 else if (insn_bits12_15 == 0x00)
5791 /* Convert float to integer instruction. */
5792 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
5794 if (record_debug)
5795 debug_printf ("float to int conversion");
5797 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
5799 /* Convert integer to float instruction. */
5800 else if ((opcode >> 1) == 0x01 && !rmode)
5802 if (record_debug)
5803 debug_printf ("int to float conversion");
5805 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5807 /* Move float to integer instruction. */
5808 else if ((opcode >> 1) == 0x03)
5810 if (record_debug)
5811 debug_printf ("move float to int");
5813 if (!(opcode & 0x01))
5814 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
5815 else
5816 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5818 else
5819 return AARCH64_RECORD_UNKNOWN;
5821 else
5822 return AARCH64_RECORD_UNKNOWN;
5824 else
5825 return AARCH64_RECORD_UNKNOWN;
5827 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
5829 if (record_debug)
5830 debug_printf ("SIMD copy");
5832 /* Advanced SIMD copy instructions. */
5833 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
5834 && !bit (aarch64_insn_r->aarch64_insn, 15)
5835 && bit (aarch64_insn_r->aarch64_insn, 10))
5837 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
5838 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
5839 else
5840 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5842 else
5843 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5845 /* All remaining floating point or advanced SIMD instructions. */
5846 else
5848 if (record_debug)
5849 debug_printf ("all remain");
5851 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5854 if (record_debug)
5855 debug_printf ("\n");
5857 /* Record the V/X register. */
5858 aarch64_insn_r->reg_rec_count++;
5860 /* Some of these instructions may set bits in the FPSR, so record it
5861 too. */
5862 record_buf[1] = AARCH64_FPSR_REGNUM;
5863 aarch64_insn_r->reg_rec_count++;
5865 gdb_assert (aarch64_insn_r->reg_rec_count == 2);
5866 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
5867 record_buf);
5868 return AARCH64_RECORD_SUCCESS;
5871 /* Decodes insns type and invokes its record handler. */
5873 static unsigned int
5874 aarch64_record_decode_insn_handler (aarch64_insn_decode_record *aarch64_insn_r)
5876 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
5878 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
5879 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
5880 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
5881 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
5883 /* Data processing - immediate instructions. */
5884 if (!ins_bit26 && !ins_bit27 && ins_bit28)
5885 return aarch64_record_data_proc_imm (aarch64_insn_r);
5887 /* Branch, exception generation and system instructions. */
5888 if (ins_bit26 && !ins_bit27 && ins_bit28)
5889 return aarch64_record_branch_except_sys (aarch64_insn_r);
5891 /* Load and store instructions. */
5892 if (!ins_bit25 && ins_bit27)
5893 return aarch64_record_load_store (aarch64_insn_r);
5895 /* Data processing - register instructions. */
5896 if (ins_bit25 && !ins_bit26 && ins_bit27)
5897 return aarch64_record_data_proc_reg (aarch64_insn_r);
5899 /* Data processing - SIMD and floating point instructions. */
5900 if (ins_bit25 && ins_bit26 && ins_bit27)
5901 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
5903 return AARCH64_RECORD_UNSUPPORTED;
5906 /* Cleans up local record registers and memory allocations. */
5908 static void
5909 deallocate_reg_mem (aarch64_insn_decode_record *record)
5911 xfree (record->aarch64_regs);
5912 xfree (record->aarch64_mems);
5915 #if GDB_SELF_TEST
5916 namespace selftests {
5918 static void
5919 aarch64_process_record_test (void)
5921 struct gdbarch_info info;
5922 uint32_t ret;
5924 info.bfd_arch_info = bfd_scan_arch ("aarch64");
5926 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
5927 SELF_CHECK (gdbarch != NULL);
5929 aarch64_insn_decode_record aarch64_record;
5931 memset (&aarch64_record, 0, sizeof (aarch64_insn_decode_record));
5932 aarch64_record.regcache = NULL;
5933 aarch64_record.this_addr = 0;
5934 aarch64_record.gdbarch = gdbarch;
5936 /* 20 00 80 f9 prfm pldl1keep, [x1] */
5937 aarch64_record.aarch64_insn = 0xf9800020;
5938 ret = aarch64_record_decode_insn_handler (&aarch64_record);
5939 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
5940 SELF_CHECK (aarch64_record.reg_rec_count == 0);
5941 SELF_CHECK (aarch64_record.mem_rec_count == 0);
5943 deallocate_reg_mem (&aarch64_record);
5946 } // namespace selftests
5947 #endif /* GDB_SELF_TEST */
5949 /* Parse the current instruction and record the values of the registers and
5950 memory that will be changed in current instruction to record_arch_list
5951 return -1 if something is wrong. */
5954 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
5955 CORE_ADDR insn_addr)
5957 uint32_t rec_no = 0;
5958 const uint8_t insn_size = 4;
5959 uint32_t ret = 0;
5960 gdb_byte buf[insn_size];
5961 aarch64_insn_decode_record aarch64_record;
5963 memset (&buf[0], 0, insn_size);
5964 memset (&aarch64_record, 0, sizeof (aarch64_insn_decode_record));
5965 target_read_memory (insn_addr, &buf[0], insn_size);
5966 aarch64_record.aarch64_insn
5967 = (uint32_t) extract_unsigned_integer (&buf[0],
5968 insn_size,
5969 gdbarch_byte_order (gdbarch));
5970 aarch64_record.regcache = regcache;
5971 aarch64_record.this_addr = insn_addr;
5972 aarch64_record.gdbarch = gdbarch;
5974 ret = aarch64_record_decode_insn_handler (&aarch64_record);
5975 if (ret == AARCH64_RECORD_UNSUPPORTED)
5977 gdb_printf (gdb_stderr,
5978 _("Process record does not support instruction "
5979 "0x%0x at address %s.\n"),
5980 aarch64_record.aarch64_insn,
5981 paddress (gdbarch, insn_addr));
5982 ret = -1;
5985 if (0 == ret)
5987 /* Record registers. */
5988 record_full_arch_list_add_reg (aarch64_record.regcache,
5989 AARCH64_PC_REGNUM);
5990 /* Always record register CPSR. */
5991 record_full_arch_list_add_reg (aarch64_record.regcache,
5992 AARCH64_CPSR_REGNUM);
5993 if (aarch64_record.aarch64_regs)
5994 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
5995 if (record_full_arch_list_add_reg (aarch64_record.regcache,
5996 aarch64_record.aarch64_regs[rec_no]))
5997 ret = -1;
5999 /* Record memories. */
6000 if (aarch64_record.aarch64_mems)
6001 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
6002 if (record_full_arch_list_add_mem
6003 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
6004 aarch64_record.aarch64_mems[rec_no].len))
6005 ret = -1;
6007 if (record_full_arch_list_add_end ())
6008 ret = -1;
6011 deallocate_reg_mem (&aarch64_record);
6012 return ret;