Automatic date update in version.in
[binutils-gdb/blckswan.git] / gdb / aarch64-tdep.c
blob67a3f96e1a7655b82f64547871cc337d2cd97564
1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2022 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 #include "defs.h"
23 #include "frame.h"
24 #include "gdbcmd.h"
25 #include "gdbcore.h"
26 #include "dis-asm.h"
27 #include "regcache.h"
28 #include "reggroups.h"
29 #include "value.h"
30 #include "arch-utils.h"
31 #include "osabi.h"
32 #include "frame-unwind.h"
33 #include "frame-base.h"
34 #include "trad-frame.h"
35 #include "objfiles.h"
36 #include "dwarf2.h"
37 #include "dwarf2/frame.h"
38 #include "gdbtypes.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
42 #include "ax-gdb.h"
43 #include "gdbsupport/selftest.h"
45 #include "aarch64-tdep.h"
46 #include "aarch64-ravenscar-thread.h"
48 #include "record.h"
49 #include "record-full.h"
50 #include "arch/aarch64-insn.h"
51 #include "gdbarch.h"
53 #include "opcode/aarch64.h"
54 #include <algorithm>
55 #include <unordered_map>
57 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
58 four members. */
59 #define HA_MAX_NUM_FLDS 4
61 /* All possible aarch64 target descriptors. */
62 static std::unordered_map <aarch64_features, target_desc *> tdesc_aarch64_map;
64 /* The standard register names, and all the valid aliases for them. */
65 static const struct
67 const char *const name;
68 int regnum;
69 } aarch64_register_aliases[] =
71 /* 64-bit register names. */
72 {"fp", AARCH64_FP_REGNUM},
73 {"lr", AARCH64_LR_REGNUM},
74 {"sp", AARCH64_SP_REGNUM},
76 /* 32-bit register names. */
77 {"w0", AARCH64_X0_REGNUM + 0},
78 {"w1", AARCH64_X0_REGNUM + 1},
79 {"w2", AARCH64_X0_REGNUM + 2},
80 {"w3", AARCH64_X0_REGNUM + 3},
81 {"w4", AARCH64_X0_REGNUM + 4},
82 {"w5", AARCH64_X0_REGNUM + 5},
83 {"w6", AARCH64_X0_REGNUM + 6},
84 {"w7", AARCH64_X0_REGNUM + 7},
85 {"w8", AARCH64_X0_REGNUM + 8},
86 {"w9", AARCH64_X0_REGNUM + 9},
87 {"w10", AARCH64_X0_REGNUM + 10},
88 {"w11", AARCH64_X0_REGNUM + 11},
89 {"w12", AARCH64_X0_REGNUM + 12},
90 {"w13", AARCH64_X0_REGNUM + 13},
91 {"w14", AARCH64_X0_REGNUM + 14},
92 {"w15", AARCH64_X0_REGNUM + 15},
93 {"w16", AARCH64_X0_REGNUM + 16},
94 {"w17", AARCH64_X0_REGNUM + 17},
95 {"w18", AARCH64_X0_REGNUM + 18},
96 {"w19", AARCH64_X0_REGNUM + 19},
97 {"w20", AARCH64_X0_REGNUM + 20},
98 {"w21", AARCH64_X0_REGNUM + 21},
99 {"w22", AARCH64_X0_REGNUM + 22},
100 {"w23", AARCH64_X0_REGNUM + 23},
101 {"w24", AARCH64_X0_REGNUM + 24},
102 {"w25", AARCH64_X0_REGNUM + 25},
103 {"w26", AARCH64_X0_REGNUM + 26},
104 {"w27", AARCH64_X0_REGNUM + 27},
105 {"w28", AARCH64_X0_REGNUM + 28},
106 {"w29", AARCH64_X0_REGNUM + 29},
107 {"w30", AARCH64_X0_REGNUM + 30},
109 /* specials */
110 {"ip0", AARCH64_X0_REGNUM + 16},
111 {"ip1", AARCH64_X0_REGNUM + 17}
114 /* The required core 'R' registers. */
115 static const char *const aarch64_r_register_names[] =
117 /* These registers must appear in consecutive RAW register number
118 order and they must begin with AARCH64_X0_REGNUM! */
119 "x0", "x1", "x2", "x3",
120 "x4", "x5", "x6", "x7",
121 "x8", "x9", "x10", "x11",
122 "x12", "x13", "x14", "x15",
123 "x16", "x17", "x18", "x19",
124 "x20", "x21", "x22", "x23",
125 "x24", "x25", "x26", "x27",
126 "x28", "x29", "x30", "sp",
127 "pc", "cpsr"
130 /* The FP/SIMD 'V' registers. */
131 static const char *const aarch64_v_register_names[] =
133 /* These registers must appear in consecutive RAW register number
134 order and they must begin with AARCH64_V0_REGNUM! */
135 "v0", "v1", "v2", "v3",
136 "v4", "v5", "v6", "v7",
137 "v8", "v9", "v10", "v11",
138 "v12", "v13", "v14", "v15",
139 "v16", "v17", "v18", "v19",
140 "v20", "v21", "v22", "v23",
141 "v24", "v25", "v26", "v27",
142 "v28", "v29", "v30", "v31",
143 "fpsr",
144 "fpcr"
147 /* The SVE 'Z' and 'P' registers. */
148 static const char *const aarch64_sve_register_names[] =
150 /* These registers must appear in consecutive RAW register number
151 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
152 "z0", "z1", "z2", "z3",
153 "z4", "z5", "z6", "z7",
154 "z8", "z9", "z10", "z11",
155 "z12", "z13", "z14", "z15",
156 "z16", "z17", "z18", "z19",
157 "z20", "z21", "z22", "z23",
158 "z24", "z25", "z26", "z27",
159 "z28", "z29", "z30", "z31",
160 "fpsr", "fpcr",
161 "p0", "p1", "p2", "p3",
162 "p4", "p5", "p6", "p7",
163 "p8", "p9", "p10", "p11",
164 "p12", "p13", "p14", "p15",
165 "ffr", "vg"
168 static const char *const aarch64_pauth_register_names[] =
170 /* Authentication mask for data pointer. */
171 "pauth_dmask",
172 /* Authentication mask for code pointer. */
173 "pauth_cmask"
176 static const char *const aarch64_mte_register_names[] =
178 /* Tag Control Register. */
179 "tag_ctl"
182 /* AArch64 prologue cache structure. */
183 struct aarch64_prologue_cache
185 /* The program counter at the start of the function. It is used to
186 identify this frame as a prologue frame. */
187 CORE_ADDR func;
189 /* The program counter at the time this frame was created; i.e. where
190 this function was called from. It is used to identify this frame as a
191 stub frame. */
192 CORE_ADDR prev_pc;
194 /* The stack pointer at the time this frame was created; i.e. the
195 caller's stack pointer when this function was called. It is used
196 to identify this frame. */
197 CORE_ADDR prev_sp;
199 /* Is the target available to read from? */
200 int available_p;
202 /* The frame base for this frame is just prev_sp - frame size.
203 FRAMESIZE is the distance from the frame pointer to the
204 initial stack pointer. */
205 int framesize;
207 /* The register used to hold the frame pointer for this frame. */
208 int framereg;
210 /* Saved register offsets. */
211 trad_frame_saved_reg *saved_regs;
214 static void
215 show_aarch64_debug (struct ui_file *file, int from_tty,
216 struct cmd_list_element *c, const char *value)
218 gdb_printf (file, _("AArch64 debugging is %s.\n"), value);
221 namespace {
223 /* Abstract instruction reader. */
225 class abstract_instruction_reader
227 public:
228 /* Read in one instruction. */
229 virtual ULONGEST read (CORE_ADDR memaddr, int len,
230 enum bfd_endian byte_order) = 0;
233 /* Instruction reader from real target. */
235 class instruction_reader : public abstract_instruction_reader
237 public:
238 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
239 override
241 return read_code_unsigned_integer (memaddr, len, byte_order);
245 } // namespace
247 /* If address signing is enabled, mask off the signature bits from the link
248 register, which is passed by value in ADDR, using the register values in
249 THIS_FRAME. */
251 static CORE_ADDR
252 aarch64_frame_unmask_lr (aarch64_gdbarch_tdep *tdep,
253 struct frame_info *this_frame, CORE_ADDR addr)
255 if (tdep->has_pauth ()
256 && frame_unwind_register_unsigned (this_frame,
257 tdep->ra_sign_state_regnum))
259 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
260 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
261 addr = addr & ~cmask;
263 /* Record in the frame that the link register required unmasking. */
264 set_frame_previous_pc_masked (this_frame);
267 return addr;
270 /* Implement the "get_pc_address_flags" gdbarch method. */
272 static std::string
273 aarch64_get_pc_address_flags (frame_info *frame, CORE_ADDR pc)
275 if (pc != 0 && get_frame_pc_masked (frame))
276 return "PAC";
278 return "";
281 /* Analyze a prologue, looking for a recognizable stack frame
282 and frame pointer. Scan until we encounter a store that could
283 clobber the stack frame unexpectedly, or an unknown instruction. */
285 static CORE_ADDR
286 aarch64_analyze_prologue (struct gdbarch *gdbarch,
287 CORE_ADDR start, CORE_ADDR limit,
288 struct aarch64_prologue_cache *cache,
289 abstract_instruction_reader& reader)
291 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
292 int i;
294 /* Whether the stack has been set. This should be true when we notice a SP
295 to FP move or if we are using the SP as the base register for storing
296 data, in case the FP is ommitted. */
297 bool seen_stack_set = false;
299 /* Track X registers and D registers in prologue. */
300 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
302 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
303 regs[i] = pv_register (i, 0);
304 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
306 for (; start < limit; start += 4)
308 uint32_t insn;
309 aarch64_inst inst;
311 insn = reader.read (start, 4, byte_order_for_code);
313 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
314 break;
316 if (inst.opcode->iclass == addsub_imm
317 && (inst.opcode->op == OP_ADD
318 || strcmp ("sub", inst.opcode->name) == 0))
320 unsigned rd = inst.operands[0].reg.regno;
321 unsigned rn = inst.operands[1].reg.regno;
323 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
324 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
325 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
326 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
328 if (inst.opcode->op == OP_ADD)
330 regs[rd] = pv_add_constant (regs[rn],
331 inst.operands[2].imm.value);
333 else
335 regs[rd] = pv_add_constant (regs[rn],
336 -inst.operands[2].imm.value);
339 /* Did we move SP to FP? */
340 if (rn == AARCH64_SP_REGNUM && rd == AARCH64_FP_REGNUM)
341 seen_stack_set = true;
343 else if (inst.opcode->iclass == pcreladdr
344 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
346 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
347 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
349 regs[inst.operands[0].reg.regno] = pv_unknown ();
351 else if (inst.opcode->iclass == branch_imm)
353 /* Stop analysis on branch. */
354 break;
356 else if (inst.opcode->iclass == condbranch)
358 /* Stop analysis on branch. */
359 break;
361 else if (inst.opcode->iclass == branch_reg)
363 /* Stop analysis on branch. */
364 break;
366 else if (inst.opcode->iclass == compbranch)
368 /* Stop analysis on branch. */
369 break;
371 else if (inst.opcode->op == OP_MOVZ)
373 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
375 /* If this shows up before we set the stack, keep going. Otherwise
376 stop the analysis. */
377 if (seen_stack_set)
378 break;
380 regs[inst.operands[0].reg.regno] = pv_unknown ();
382 else if (inst.opcode->iclass == log_shift
383 && strcmp (inst.opcode->name, "orr") == 0)
385 unsigned rd = inst.operands[0].reg.regno;
386 unsigned rn = inst.operands[1].reg.regno;
387 unsigned rm = inst.operands[2].reg.regno;
389 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
390 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
391 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
393 if (inst.operands[2].shifter.amount == 0
394 && rn == AARCH64_SP_REGNUM)
395 regs[rd] = regs[rm];
396 else
398 aarch64_debug_printf ("prologue analysis gave up "
399 "addr=%s opcode=0x%x (orr x register)",
400 core_addr_to_string_nz (start), insn);
402 break;
405 else if (inst.opcode->op == OP_STUR)
407 unsigned rt = inst.operands[0].reg.regno;
408 unsigned rn = inst.operands[1].addr.base_regno;
409 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
411 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
412 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
413 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
414 gdb_assert (!inst.operands[1].addr.offset.is_reg);
416 stack.store
417 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
418 size, regs[rt]);
420 /* Are we storing with SP as a base? */
421 if (rn == AARCH64_SP_REGNUM)
422 seen_stack_set = true;
424 else if ((inst.opcode->iclass == ldstpair_off
425 || (inst.opcode->iclass == ldstpair_indexed
426 && inst.operands[2].addr.preind))
427 && strcmp ("stp", inst.opcode->name) == 0)
429 /* STP with addressing mode Pre-indexed and Base register. */
430 unsigned rt1;
431 unsigned rt2;
432 unsigned rn = inst.operands[2].addr.base_regno;
433 int32_t imm = inst.operands[2].addr.offset.imm;
434 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
436 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
437 || inst.operands[0].type == AARCH64_OPND_Ft);
438 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
439 || inst.operands[1].type == AARCH64_OPND_Ft2);
440 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
441 gdb_assert (!inst.operands[2].addr.offset.is_reg);
443 /* If recording this store would invalidate the store area
444 (perhaps because rn is not known) then we should abandon
445 further prologue analysis. */
446 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
447 break;
449 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
450 break;
452 rt1 = inst.operands[0].reg.regno;
453 rt2 = inst.operands[1].reg.regno;
454 if (inst.operands[0].type == AARCH64_OPND_Ft)
456 rt1 += AARCH64_X_REGISTER_COUNT;
457 rt2 += AARCH64_X_REGISTER_COUNT;
460 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
461 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
463 if (inst.operands[2].addr.writeback)
464 regs[rn] = pv_add_constant (regs[rn], imm);
466 /* Ignore the instruction that allocates stack space and sets
467 the SP. */
468 if (rn == AARCH64_SP_REGNUM && !inst.operands[2].addr.writeback)
469 seen_stack_set = true;
471 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
472 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
473 && (inst.opcode->op == OP_STR_POS
474 || inst.opcode->op == OP_STRF_POS)))
475 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
476 && strcmp ("str", inst.opcode->name) == 0)
478 /* STR (immediate) */
479 unsigned int rt = inst.operands[0].reg.regno;
480 int32_t imm = inst.operands[1].addr.offset.imm;
481 unsigned int rn = inst.operands[1].addr.base_regno;
482 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
483 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
484 || inst.operands[0].type == AARCH64_OPND_Ft);
486 if (inst.operands[0].type == AARCH64_OPND_Ft)
487 rt += AARCH64_X_REGISTER_COUNT;
489 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
490 if (inst.operands[1].addr.writeback)
491 regs[rn] = pv_add_constant (regs[rn], imm);
493 /* Are we storing with SP as a base? */
494 if (rn == AARCH64_SP_REGNUM)
495 seen_stack_set = true;
497 else if (inst.opcode->iclass == testbranch)
499 /* Stop analysis on branch. */
500 break;
502 else if (inst.opcode->iclass == ic_system)
504 aarch64_gdbarch_tdep *tdep
505 = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
506 int ra_state_val = 0;
508 if (insn == 0xd503233f /* paciasp. */
509 || insn == 0xd503237f /* pacibsp. */)
511 /* Return addresses are mangled. */
512 ra_state_val = 1;
514 else if (insn == 0xd50323bf /* autiasp. */
515 || insn == 0xd50323ff /* autibsp. */)
517 /* Return addresses are not mangled. */
518 ra_state_val = 0;
520 else if (IS_BTI (insn))
521 /* We don't need to do anything special for a BTI instruction. */
522 continue;
523 else
525 aarch64_debug_printf ("prologue analysis gave up addr=%s"
526 " opcode=0x%x (iclass)",
527 core_addr_to_string_nz (start), insn);
528 break;
531 if (tdep->has_pauth () && cache != nullptr)
533 int regnum = tdep->ra_sign_state_regnum;
534 cache->saved_regs[regnum].set_value (ra_state_val);
537 else
539 aarch64_debug_printf ("prologue analysis gave up addr=%s"
540 " opcode=0x%x",
541 core_addr_to_string_nz (start), insn);
543 break;
547 if (cache == NULL)
548 return start;
550 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
552 /* Frame pointer is fp. Frame size is constant. */
553 cache->framereg = AARCH64_FP_REGNUM;
554 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
556 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
558 /* Try the stack pointer. */
559 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
560 cache->framereg = AARCH64_SP_REGNUM;
562 else
564 /* We're just out of luck. We don't know where the frame is. */
565 cache->framereg = -1;
566 cache->framesize = 0;
569 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
571 CORE_ADDR offset;
573 if (stack.find_reg (gdbarch, i, &offset))
574 cache->saved_regs[i].set_addr (offset);
577 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
579 int regnum = gdbarch_num_regs (gdbarch);
580 CORE_ADDR offset;
582 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
583 &offset))
584 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].set_addr (offset);
587 return start;
590 static CORE_ADDR
591 aarch64_analyze_prologue (struct gdbarch *gdbarch,
592 CORE_ADDR start, CORE_ADDR limit,
593 struct aarch64_prologue_cache *cache)
595 instruction_reader reader;
597 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
598 reader);
601 #if GDB_SELF_TEST
603 namespace selftests {
605 /* Instruction reader from manually cooked instruction sequences. */
607 class instruction_reader_test : public abstract_instruction_reader
609 public:
610 template<size_t SIZE>
611 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
612 : m_insns (insns), m_insns_size (SIZE)
615 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
616 override
618 SELF_CHECK (len == 4);
619 SELF_CHECK (memaddr % 4 == 0);
620 SELF_CHECK (memaddr / 4 < m_insns_size);
622 return m_insns[memaddr / 4];
625 private:
626 const uint32_t *m_insns;
627 size_t m_insns_size;
630 static void
631 aarch64_analyze_prologue_test (void)
633 struct gdbarch_info info;
635 info.bfd_arch_info = bfd_scan_arch ("aarch64");
637 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
638 SELF_CHECK (gdbarch != NULL);
640 struct aarch64_prologue_cache cache;
641 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
643 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
645 /* Test the simple prologue in which frame pointer is used. */
647 static const uint32_t insns[] = {
648 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
649 0x910003fd, /* mov x29, sp */
650 0x97ffffe6, /* bl 0x400580 */
652 instruction_reader_test reader (insns);
654 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
655 SELF_CHECK (end == 4 * 2);
657 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
658 SELF_CHECK (cache.framesize == 272);
660 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
662 if (i == AARCH64_FP_REGNUM)
663 SELF_CHECK (cache.saved_regs[i].addr () == -272);
664 else if (i == AARCH64_LR_REGNUM)
665 SELF_CHECK (cache.saved_regs[i].addr () == -264);
666 else
667 SELF_CHECK (cache.saved_regs[i].is_realreg ()
668 && cache.saved_regs[i].realreg () == i);
671 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
673 int num_regs = gdbarch_num_regs (gdbarch);
674 int regnum = i + num_regs + AARCH64_D0_REGNUM;
676 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
677 && cache.saved_regs[regnum].realreg () == regnum);
681 /* Test a prologue in which STR is used and frame pointer is not
682 used. */
684 static const uint32_t insns[] = {
685 0xf81d0ff3, /* str x19, [sp, #-48]! */
686 0xb9002fe0, /* str w0, [sp, #44] */
687 0xf90013e1, /* str x1, [sp, #32]*/
688 0xfd000fe0, /* str d0, [sp, #24] */
689 0xaa0203f3, /* mov x19, x2 */
690 0xf94013e0, /* ldr x0, [sp, #32] */
692 instruction_reader_test reader (insns);
694 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
695 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
697 SELF_CHECK (end == 4 * 5);
699 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
700 SELF_CHECK (cache.framesize == 48);
702 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
704 if (i == 1)
705 SELF_CHECK (cache.saved_regs[i].addr () == -16);
706 else if (i == 19)
707 SELF_CHECK (cache.saved_regs[i].addr () == -48);
708 else
709 SELF_CHECK (cache.saved_regs[i].is_realreg ()
710 && cache.saved_regs[i].realreg () == i);
713 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
715 int num_regs = gdbarch_num_regs (gdbarch);
716 int regnum = i + num_regs + AARCH64_D0_REGNUM;
719 if (i == 0)
720 SELF_CHECK (cache.saved_regs[regnum].addr () == -24);
721 else
722 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
723 && cache.saved_regs[regnum].realreg () == regnum);
727 /* Test handling of movz before setting the frame pointer. */
729 static const uint32_t insns[] = {
730 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
731 0x52800020, /* mov w0, #0x1 */
732 0x910003fd, /* mov x29, sp */
733 0x528000a2, /* mov w2, #0x5 */
734 0x97fffff8, /* bl 6e4 */
737 instruction_reader_test reader (insns);
739 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
740 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
742 /* We should stop at the 4th instruction. */
743 SELF_CHECK (end == (4 - 1) * 4);
744 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
745 SELF_CHECK (cache.framesize == 16);
748 /* Test handling of movz/stp when using the stack pointer as frame
749 pointer. */
751 static const uint32_t insns[] = {
752 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
753 0x52800020, /* mov w0, #0x1 */
754 0x290207e0, /* stp w0, w1, [sp, #16] */
755 0xa9018fe2, /* stp x2, x3, [sp, #24] */
756 0x528000a2, /* mov w2, #0x5 */
757 0x97fffff8, /* bl 6e4 */
760 instruction_reader_test reader (insns);
762 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
763 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
765 /* We should stop at the 5th instruction. */
766 SELF_CHECK (end == (5 - 1) * 4);
767 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
768 SELF_CHECK (cache.framesize == 64);
771 /* Test handling of movz/str when using the stack pointer as frame
772 pointer */
774 static const uint32_t insns[] = {
775 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
776 0x52800020, /* mov w0, #0x1 */
777 0xb9002be4, /* str w4, [sp, #40] */
778 0xf9001be5, /* str x5, [sp, #48] */
779 0x528000a2, /* mov w2, #0x5 */
780 0x97fffff8, /* bl 6e4 */
783 instruction_reader_test reader (insns);
785 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
786 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
788 /* We should stop at the 5th instruction. */
789 SELF_CHECK (end == (5 - 1) * 4);
790 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
791 SELF_CHECK (cache.framesize == 64);
794 /* Test handling of movz/stur when using the stack pointer as frame
795 pointer. */
797 static const uint32_t insns[] = {
798 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
799 0x52800020, /* mov w0, #0x1 */
800 0xb80343e6, /* stur w6, [sp, #52] */
801 0xf80383e7, /* stur x7, [sp, #56] */
802 0x528000a2, /* mov w2, #0x5 */
803 0x97fffff8, /* bl 6e4 */
806 instruction_reader_test reader (insns);
808 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
809 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
811 /* We should stop at the 5th instruction. */
812 SELF_CHECK (end == (5 - 1) * 4);
813 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
814 SELF_CHECK (cache.framesize == 64);
817 /* Test handling of movz when there is no frame pointer set or no stack
818 pointer used. */
820 static const uint32_t insns[] = {
821 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
822 0x52800020, /* mov w0, #0x1 */
823 0x528000a2, /* mov w2, #0x5 */
824 0x97fffff8, /* bl 6e4 */
827 instruction_reader_test reader (insns);
829 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
830 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
832 /* We should stop at the 4th instruction. */
833 SELF_CHECK (end == (4 - 1) * 4);
834 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
835 SELF_CHECK (cache.framesize == 16);
838 /* Test a prologue in which there is a return address signing instruction. */
839 if (tdep->has_pauth ())
841 static const uint32_t insns[] = {
842 0xd503233f, /* paciasp */
843 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
844 0x910003fd, /* mov x29, sp */
845 0xf801c3f3, /* str x19, [sp, #28] */
846 0xb9401fa0, /* ldr x19, [x29, #28] */
848 instruction_reader_test reader (insns);
850 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
851 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
852 reader);
854 SELF_CHECK (end == 4 * 4);
855 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
856 SELF_CHECK (cache.framesize == 48);
858 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
860 if (i == 19)
861 SELF_CHECK (cache.saved_regs[i].addr () == -20);
862 else if (i == AARCH64_FP_REGNUM)
863 SELF_CHECK (cache.saved_regs[i].addr () == -48);
864 else if (i == AARCH64_LR_REGNUM)
865 SELF_CHECK (cache.saved_regs[i].addr () == -40);
866 else
867 SELF_CHECK (cache.saved_regs[i].is_realreg ()
868 && cache.saved_regs[i].realreg () == i);
871 if (tdep->has_pauth ())
873 int regnum = tdep->ra_sign_state_regnum;
874 SELF_CHECK (cache.saved_regs[regnum].is_value ());
878 /* Test a prologue with a BTI instruction. */
880 static const uint32_t insns[] = {
881 0xd503245f, /* bti */
882 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
883 0x910003fd, /* mov x29, sp */
884 0xf801c3f3, /* str x19, [sp, #28] */
885 0xb9401fa0, /* ldr x19, [x29, #28] */
887 instruction_reader_test reader (insns);
889 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
890 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
891 reader);
893 SELF_CHECK (end == 4 * 4);
894 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
895 SELF_CHECK (cache.framesize == 48);
897 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
899 if (i == 19)
900 SELF_CHECK (cache.saved_regs[i].addr () == -20);
901 else if (i == AARCH64_FP_REGNUM)
902 SELF_CHECK (cache.saved_regs[i].addr () == -48);
903 else if (i == AARCH64_LR_REGNUM)
904 SELF_CHECK (cache.saved_regs[i].addr () == -40);
905 else
906 SELF_CHECK (cache.saved_regs[i].is_realreg ()
907 && cache.saved_regs[i].realreg () == i);
911 } // namespace selftests
912 #endif /* GDB_SELF_TEST */
914 /* Implement the "skip_prologue" gdbarch method. */
916 static CORE_ADDR
917 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
919 CORE_ADDR func_addr, limit_pc;
921 /* See if we can determine the end of the prologue via the symbol
922 table. If so, then return either PC, or the PC after the
923 prologue, whichever is greater. */
924 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
926 CORE_ADDR post_prologue_pc
927 = skip_prologue_using_sal (gdbarch, func_addr);
929 if (post_prologue_pc != 0)
930 return std::max (pc, post_prologue_pc);
933 /* Can't determine prologue from the symbol table, need to examine
934 instructions. */
936 /* Find an upper limit on the function prologue using the debug
937 information. If the debug information could not be used to
938 provide that bound, then use an arbitrary large number as the
939 upper bound. */
940 limit_pc = skip_prologue_using_sal (gdbarch, pc);
941 if (limit_pc == 0)
942 limit_pc = pc + 128; /* Magic. */
944 /* Try disassembling prologue. */
945 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
948 /* Scan the function prologue for THIS_FRAME and populate the prologue
949 cache CACHE. */
951 static void
952 aarch64_scan_prologue (struct frame_info *this_frame,
953 struct aarch64_prologue_cache *cache)
955 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
956 CORE_ADDR prologue_start;
957 CORE_ADDR prologue_end;
958 CORE_ADDR prev_pc = get_frame_pc (this_frame);
959 struct gdbarch *gdbarch = get_frame_arch (this_frame);
961 cache->prev_pc = prev_pc;
963 /* Assume we do not find a frame. */
964 cache->framereg = -1;
965 cache->framesize = 0;
967 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
968 &prologue_end))
970 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
972 if (sal.line == 0)
974 /* No line info so use the current PC. */
975 prologue_end = prev_pc;
977 else if (sal.end < prologue_end)
979 /* The next line begins after the function end. */
980 prologue_end = sal.end;
983 prologue_end = std::min (prologue_end, prev_pc);
984 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
986 else
988 CORE_ADDR frame_loc;
990 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
991 if (frame_loc == 0)
992 return;
994 cache->framereg = AARCH64_FP_REGNUM;
995 cache->framesize = 16;
996 cache->saved_regs[29].set_addr (0);
997 cache->saved_regs[30].set_addr (8);
1001 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
1002 function may throw an exception if the inferior's registers or memory is
1003 not available. */
1005 static void
1006 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
1007 struct aarch64_prologue_cache *cache)
1009 CORE_ADDR unwound_fp;
1010 int reg;
1012 aarch64_scan_prologue (this_frame, cache);
1014 if (cache->framereg == -1)
1015 return;
1017 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
1018 if (unwound_fp == 0)
1019 return;
1021 cache->prev_sp = unwound_fp + cache->framesize;
1023 /* Calculate actual addresses of saved registers using offsets
1024 determined by aarch64_analyze_prologue. */
1025 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
1026 if (cache->saved_regs[reg].is_addr ())
1027 cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr ()
1028 + cache->prev_sp);
1030 cache->func = get_frame_func (this_frame);
1032 cache->available_p = 1;
1035 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1036 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1037 Return a pointer to the current aarch64_prologue_cache in
1038 *THIS_CACHE. */
1040 static struct aarch64_prologue_cache *
1041 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
1043 struct aarch64_prologue_cache *cache;
1045 if (*this_cache != NULL)
1046 return (struct aarch64_prologue_cache *) *this_cache;
1048 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1049 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1050 *this_cache = cache;
1054 aarch64_make_prologue_cache_1 (this_frame, cache);
1056 catch (const gdb_exception_error &ex)
1058 if (ex.error != NOT_AVAILABLE_ERROR)
1059 throw;
1062 return cache;
1065 /* Implement the "stop_reason" frame_unwind method. */
1067 static enum unwind_stop_reason
1068 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
1069 void **this_cache)
1071 struct aarch64_prologue_cache *cache
1072 = aarch64_make_prologue_cache (this_frame, this_cache);
1074 if (!cache->available_p)
1075 return UNWIND_UNAVAILABLE;
1077 /* Halt the backtrace at "_start". */
1078 gdbarch *arch = get_frame_arch (this_frame);
1079 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (arch);
1080 if (cache->prev_pc <= tdep->lowest_pc)
1081 return UNWIND_OUTERMOST;
1083 /* We've hit a wall, stop. */
1084 if (cache->prev_sp == 0)
1085 return UNWIND_OUTERMOST;
1087 return UNWIND_NO_REASON;
1090 /* Our frame ID for a normal frame is the current function's starting
1091 PC and the caller's SP when we were called. */
1093 static void
1094 aarch64_prologue_this_id (struct frame_info *this_frame,
1095 void **this_cache, struct frame_id *this_id)
1097 struct aarch64_prologue_cache *cache
1098 = aarch64_make_prologue_cache (this_frame, this_cache);
1100 if (!cache->available_p)
1101 *this_id = frame_id_build_unavailable_stack (cache->func);
1102 else
1103 *this_id = frame_id_build (cache->prev_sp, cache->func);
1106 /* Implement the "prev_register" frame_unwind method. */
1108 static struct value *
1109 aarch64_prologue_prev_register (struct frame_info *this_frame,
1110 void **this_cache, int prev_regnum)
1112 struct aarch64_prologue_cache *cache
1113 = aarch64_make_prologue_cache (this_frame, this_cache);
1115 /* If we are asked to unwind the PC, then we need to return the LR
1116 instead. The prologue may save PC, but it will point into this
1117 frame's prologue, not the next frame's resume location. */
1118 if (prev_regnum == AARCH64_PC_REGNUM)
1120 CORE_ADDR lr;
1121 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1122 aarch64_gdbarch_tdep *tdep
1123 = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
1125 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1127 if (tdep->has_pauth ()
1128 && cache->saved_regs[tdep->ra_sign_state_regnum].is_value ())
1129 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1131 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1134 /* SP is generally not saved to the stack, but this frame is
1135 identified by the next frame's stack pointer at the time of the
1136 call. The value was already reconstructed into PREV_SP. */
1138 +----------+ ^
1139 | saved lr | |
1140 +->| saved fp |--+
1141 | | |
1142 | | | <- Previous SP
1143 | +----------+
1144 | | saved lr |
1145 +--| saved fp |<- FP
1147 | |<- SP
1148 +----------+ */
1149 if (prev_regnum == AARCH64_SP_REGNUM)
1150 return frame_unwind_got_constant (this_frame, prev_regnum,
1151 cache->prev_sp);
1153 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1154 prev_regnum);
1157 /* AArch64 prologue unwinder. */
1158 static frame_unwind aarch64_prologue_unwind =
1160 "aarch64 prologue",
1161 NORMAL_FRAME,
1162 aarch64_prologue_frame_unwind_stop_reason,
1163 aarch64_prologue_this_id,
1164 aarch64_prologue_prev_register,
1165 NULL,
1166 default_frame_sniffer
1169 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1170 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1171 Return a pointer to the current aarch64_prologue_cache in
1172 *THIS_CACHE. */
1174 static struct aarch64_prologue_cache *
1175 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
1177 struct aarch64_prologue_cache *cache;
1179 if (*this_cache != NULL)
1180 return (struct aarch64_prologue_cache *) *this_cache;
1182 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1183 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1184 *this_cache = cache;
1188 cache->prev_sp = get_frame_register_unsigned (this_frame,
1189 AARCH64_SP_REGNUM);
1190 cache->prev_pc = get_frame_pc (this_frame);
1191 cache->available_p = 1;
1193 catch (const gdb_exception_error &ex)
1195 if (ex.error != NOT_AVAILABLE_ERROR)
1196 throw;
1199 return cache;
1202 /* Implement the "stop_reason" frame_unwind method. */
1204 static enum unwind_stop_reason
1205 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1206 void **this_cache)
1208 struct aarch64_prologue_cache *cache
1209 = aarch64_make_stub_cache (this_frame, this_cache);
1211 if (!cache->available_p)
1212 return UNWIND_UNAVAILABLE;
1214 return UNWIND_NO_REASON;
1217 /* Our frame ID for a stub frame is the current SP and LR. */
1219 static void
1220 aarch64_stub_this_id (struct frame_info *this_frame,
1221 void **this_cache, struct frame_id *this_id)
1223 struct aarch64_prologue_cache *cache
1224 = aarch64_make_stub_cache (this_frame, this_cache);
1226 if (cache->available_p)
1227 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1228 else
1229 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1232 /* Implement the "sniffer" frame_unwind method. */
1234 static int
1235 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1236 struct frame_info *this_frame,
1237 void **this_prologue_cache)
1239 CORE_ADDR addr_in_block;
1240 gdb_byte dummy[4];
1242 addr_in_block = get_frame_address_in_block (this_frame);
1243 if (in_plt_section (addr_in_block)
1244 /* We also use the stub winder if the target memory is unreadable
1245 to avoid having the prologue unwinder trying to read it. */
1246 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1247 return 1;
1249 return 0;
1252 /* AArch64 stub unwinder. */
1253 static frame_unwind aarch64_stub_unwind =
1255 "aarch64 stub",
1256 NORMAL_FRAME,
1257 aarch64_stub_frame_unwind_stop_reason,
1258 aarch64_stub_this_id,
1259 aarch64_prologue_prev_register,
1260 NULL,
1261 aarch64_stub_unwind_sniffer
1264 /* Return the frame base address of *THIS_FRAME. */
1266 static CORE_ADDR
1267 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1269 struct aarch64_prologue_cache *cache
1270 = aarch64_make_prologue_cache (this_frame, this_cache);
1272 return cache->prev_sp - cache->framesize;
1275 /* AArch64 default frame base information. */
1276 static frame_base aarch64_normal_base =
1278 &aarch64_prologue_unwind,
1279 aarch64_normal_frame_base,
1280 aarch64_normal_frame_base,
1281 aarch64_normal_frame_base
1284 /* Return the value of the REGNUM register in the previous frame of
1285 *THIS_FRAME. */
1287 static struct value *
1288 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1289 void **this_cache, int regnum)
1291 gdbarch *arch = get_frame_arch (this_frame);
1292 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (arch);
1293 CORE_ADDR lr;
1295 switch (regnum)
1297 case AARCH64_PC_REGNUM:
1298 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1299 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1300 return frame_unwind_got_constant (this_frame, regnum, lr);
1302 default:
1303 internal_error (__FILE__, __LINE__,
1304 _("Unexpected register %d"), regnum);
1308 static const unsigned char op_lit0 = DW_OP_lit0;
1309 static const unsigned char op_lit1 = DW_OP_lit1;
1311 /* Implement the "init_reg" dwarf2_frame_ops method. */
1313 static void
1314 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1315 struct dwarf2_frame_state_reg *reg,
1316 struct frame_info *this_frame)
1318 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
1320 switch (regnum)
1322 case AARCH64_PC_REGNUM:
1323 reg->how = DWARF2_FRAME_REG_FN;
1324 reg->loc.fn = aarch64_dwarf2_prev_register;
1325 return;
1327 case AARCH64_SP_REGNUM:
1328 reg->how = DWARF2_FRAME_REG_CFA;
1329 return;
1332 /* Init pauth registers. */
1333 if (tdep->has_pauth ())
1335 if (regnum == tdep->ra_sign_state_regnum)
1337 /* Initialize RA_STATE to zero. */
1338 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1339 reg->loc.exp.start = &op_lit0;
1340 reg->loc.exp.len = 1;
1341 return;
1343 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1344 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1346 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1347 return;
1352 /* Implement the execute_dwarf_cfa_vendor_op method. */
1354 static bool
1355 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1356 struct dwarf2_frame_state *fs)
1358 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
1359 struct dwarf2_frame_state_reg *ra_state;
1361 if (op == DW_CFA_AARCH64_negate_ra_state)
1363 /* On systems without pauth, treat as a nop. */
1364 if (!tdep->has_pauth ())
1365 return true;
1367 /* Allocate RA_STATE column if it's not allocated yet. */
1368 fs->regs.alloc_regs (AARCH64_DWARF_RA_SIGN_STATE + 1);
1370 /* Toggle the status of RA_STATE between 0 and 1. */
1371 ra_state = &(fs->regs.reg[AARCH64_DWARF_RA_SIGN_STATE]);
1372 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1374 if (ra_state->loc.exp.start == nullptr
1375 || ra_state->loc.exp.start == &op_lit0)
1376 ra_state->loc.exp.start = &op_lit1;
1377 else
1378 ra_state->loc.exp.start = &op_lit0;
1380 ra_state->loc.exp.len = 1;
1382 return true;
1385 return false;
1388 /* Used for matching BRK instructions for AArch64. */
1389 static constexpr uint32_t BRK_INSN_MASK = 0xffe0001f;
1390 static constexpr uint32_t BRK_INSN_BASE = 0xd4200000;
1392 /* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */
1394 static bool
1395 aarch64_program_breakpoint_here_p (gdbarch *gdbarch, CORE_ADDR address)
1397 const uint32_t insn_len = 4;
1398 gdb_byte target_mem[4];
1400 /* Enable the automatic memory restoration from breakpoints while
1401 we read the memory. Otherwise we may find temporary breakpoints, ones
1402 inserted by GDB, and flag them as permanent breakpoints. */
1403 scoped_restore restore_memory
1404 = make_scoped_restore_show_memory_breakpoints (0);
1406 if (target_read_memory (address, target_mem, insn_len) == 0)
1408 uint32_t insn =
1409 (uint32_t) extract_unsigned_integer (target_mem, insn_len,
1410 gdbarch_byte_order_for_code (gdbarch));
1412 /* Check if INSN is a BRK instruction pattern. There are multiple choices
1413 of such instructions with different immediate values. Different OS'
1414 may use a different variation, but they have the same outcome. */
1415 return ((insn & BRK_INSN_MASK) == BRK_INSN_BASE);
1418 return false;
1421 /* When arguments must be pushed onto the stack, they go on in reverse
1422 order. The code below implements a FILO (stack) to do this. */
1424 struct stack_item_t
1426 /* Value to pass on stack. It can be NULL if this item is for stack
1427 padding. */
1428 const gdb_byte *data;
1430 /* Size in bytes of value to pass on stack. */
1431 int len;
1434 /* Implement the gdbarch type alignment method, overrides the generic
1435 alignment algorithm for anything that is aarch64 specific. */
1437 static ULONGEST
1438 aarch64_type_align (gdbarch *gdbarch, struct type *t)
1440 t = check_typedef (t);
1441 if (t->code () == TYPE_CODE_ARRAY && t->is_vector ())
1443 /* Use the natural alignment for vector types (the same for
1444 scalar type), but the maximum alignment is 128-bit. */
1445 if (TYPE_LENGTH (t) > 16)
1446 return 16;
1447 else
1448 return TYPE_LENGTH (t);
1451 /* Allow the common code to calculate the alignment. */
1452 return 0;
1455 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1457 Return the number of register required, or -1 on failure.
1459 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1460 to the element, else fail if the type of this element does not match the
1461 existing value. */
1463 static int
1464 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1465 struct type **fundamental_type)
1467 if (type == nullptr)
1468 return -1;
1470 switch (type->code ())
1472 case TYPE_CODE_FLT:
1473 case TYPE_CODE_DECFLOAT:
1474 if (TYPE_LENGTH (type) > 16)
1475 return -1;
1477 if (*fundamental_type == nullptr)
1478 *fundamental_type = type;
1479 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1480 || type->code () != (*fundamental_type)->code ())
1481 return -1;
1483 return 1;
1485 case TYPE_CODE_COMPLEX:
1487 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1488 if (TYPE_LENGTH (target_type) > 16)
1489 return -1;
1491 if (*fundamental_type == nullptr)
1492 *fundamental_type = target_type;
1493 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1494 || target_type->code () != (*fundamental_type)->code ())
1495 return -1;
1497 return 2;
1500 case TYPE_CODE_ARRAY:
1502 if (type->is_vector ())
1504 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1505 return -1;
1507 if (*fundamental_type == nullptr)
1508 *fundamental_type = type;
1509 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1510 || type->code () != (*fundamental_type)->code ())
1511 return -1;
1513 return 1;
1515 else
1517 struct type *target_type = TYPE_TARGET_TYPE (type);
1518 int count = aapcs_is_vfp_call_or_return_candidate_1
1519 (target_type, fundamental_type);
1521 if (count == -1)
1522 return count;
1524 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1525 return count;
1529 case TYPE_CODE_STRUCT:
1530 case TYPE_CODE_UNION:
1532 int count = 0;
1534 for (int i = 0; i < type->num_fields (); i++)
1536 /* Ignore any static fields. */
1537 if (field_is_static (&type->field (i)))
1538 continue;
1540 struct type *member = check_typedef (type->field (i).type ());
1542 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1543 (member, fundamental_type);
1544 if (sub_count == -1)
1545 return -1;
1546 count += sub_count;
1549 /* Ensure there is no padding between the fields (allowing for empty
1550 zero length structs) */
1551 int ftype_length = (*fundamental_type == nullptr)
1552 ? 0 : TYPE_LENGTH (*fundamental_type);
1553 if (count * ftype_length != TYPE_LENGTH (type))
1554 return -1;
1556 return count;
1559 default:
1560 break;
1563 return -1;
1566 /* Return true if an argument, whose type is described by TYPE, can be passed or
1567 returned in simd/fp registers, providing enough parameter passing registers
1568 are available. This is as described in the AAPCS64.
1570 Upon successful return, *COUNT returns the number of needed registers,
1571 *FUNDAMENTAL_TYPE contains the type of those registers.
1573 Candidate as per the AAPCS64 5.4.2.C is either a:
1574 - float.
1575 - short-vector.
1576 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1577 all the members are floats and has at most 4 members.
1578 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1579 all the members are short vectors and has at most 4 members.
1580 - Complex (7.1.1)
1582 Note that HFAs and HVAs can include nested structures and arrays. */
1584 static bool
1585 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1586 struct type **fundamental_type)
1588 if (type == nullptr)
1589 return false;
1591 *fundamental_type = nullptr;
1593 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1594 fundamental_type);
1596 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1598 *count = ag_count;
1599 return true;
1601 else
1602 return false;
1605 /* AArch64 function call information structure. */
1606 struct aarch64_call_info
1608 /* the current argument number. */
1609 unsigned argnum = 0;
1611 /* The next general purpose register number, equivalent to NGRN as
1612 described in the AArch64 Procedure Call Standard. */
1613 unsigned ngrn = 0;
1615 /* The next SIMD and floating point register number, equivalent to
1616 NSRN as described in the AArch64 Procedure Call Standard. */
1617 unsigned nsrn = 0;
1619 /* The next stacked argument address, equivalent to NSAA as
1620 described in the AArch64 Procedure Call Standard. */
1621 unsigned nsaa = 0;
1623 /* Stack item vector. */
1624 std::vector<stack_item_t> si;
1627 /* Pass a value in a sequence of consecutive X registers. The caller
1628 is responsible for ensuring sufficient registers are available. */
1630 static void
1631 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1632 struct aarch64_call_info *info, struct type *type,
1633 struct value *arg)
1635 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1636 int len = TYPE_LENGTH (type);
1637 enum type_code typecode = type->code ();
1638 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1639 const bfd_byte *buf = value_contents (arg).data ();
1641 info->argnum++;
1643 while (len > 0)
1645 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1646 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1647 byte_order);
1650 /* Adjust sub-word struct/union args when big-endian. */
1651 if (byte_order == BFD_ENDIAN_BIG
1652 && partial_len < X_REGISTER_SIZE
1653 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1654 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1656 aarch64_debug_printf ("arg %d in %s = 0x%s", info->argnum,
1657 gdbarch_register_name (gdbarch, regnum),
1658 phex (regval, X_REGISTER_SIZE));
1660 regcache_cooked_write_unsigned (regcache, regnum, regval);
1661 len -= partial_len;
1662 buf += partial_len;
1663 regnum++;
1667 /* Attempt to marshall a value in a V register. Return 1 if
1668 successful, or 0 if insufficient registers are available. This
1669 function, unlike the equivalent pass_in_x() function does not
1670 handle arguments spread across multiple registers. */
1672 static int
1673 pass_in_v (struct gdbarch *gdbarch,
1674 struct regcache *regcache,
1675 struct aarch64_call_info *info,
1676 int len, const bfd_byte *buf)
1678 if (info->nsrn < 8)
1680 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1681 /* Enough space for a full vector register. */
1682 gdb_byte reg[register_size (gdbarch, regnum)];
1683 gdb_assert (len <= sizeof (reg));
1685 info->argnum++;
1686 info->nsrn++;
1688 memset (reg, 0, sizeof (reg));
1689 /* PCS C.1, the argument is allocated to the least significant
1690 bits of V register. */
1691 memcpy (reg, buf, len);
1692 regcache->cooked_write (regnum, reg);
1694 aarch64_debug_printf ("arg %d in %s", info->argnum,
1695 gdbarch_register_name (gdbarch, regnum));
1697 return 1;
1699 info->nsrn = 8;
1700 return 0;
1703 /* Marshall an argument onto the stack. */
1705 static void
1706 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1707 struct value *arg)
1709 const bfd_byte *buf = value_contents (arg).data ();
1710 int len = TYPE_LENGTH (type);
1711 int align;
1712 stack_item_t item;
1714 info->argnum++;
1716 align = type_align (type);
1718 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1719 Natural alignment of the argument's type. */
1720 align = align_up (align, 8);
1722 /* The AArch64 PCS requires at most doubleword alignment. */
1723 if (align > 16)
1724 align = 16;
1726 aarch64_debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1727 info->nsaa);
1729 item.len = len;
1730 item.data = buf;
1731 info->si.push_back (item);
1733 info->nsaa += len;
1734 if (info->nsaa & (align - 1))
1736 /* Push stack alignment padding. */
1737 int pad = align - (info->nsaa & (align - 1));
1739 item.len = pad;
1740 item.data = NULL;
1742 info->si.push_back (item);
1743 info->nsaa += pad;
1747 /* Marshall an argument into a sequence of one or more consecutive X
1748 registers or, if insufficient X registers are available then onto
1749 the stack. */
1751 static void
1752 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1753 struct aarch64_call_info *info, struct type *type,
1754 struct value *arg)
1756 int len = TYPE_LENGTH (type);
1757 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1759 /* PCS C.13 - Pass in registers if we have enough spare */
1760 if (info->ngrn + nregs <= 8)
1762 pass_in_x (gdbarch, regcache, info, type, arg);
1763 info->ngrn += nregs;
1765 else
1767 info->ngrn = 8;
1768 pass_on_stack (info, type, arg);
1772 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1773 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1774 registers. A return value of false is an error state as the value will have
1775 been partially passed to the stack. */
1776 static bool
1777 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1778 struct aarch64_call_info *info, struct type *arg_type,
1779 struct value *arg)
1781 switch (arg_type->code ())
1783 case TYPE_CODE_FLT:
1784 case TYPE_CODE_DECFLOAT:
1785 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1786 value_contents (arg).data ());
1787 break;
1789 case TYPE_CODE_COMPLEX:
1791 const bfd_byte *buf = value_contents (arg).data ();
1792 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1794 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1795 buf))
1796 return false;
1798 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1799 buf + TYPE_LENGTH (target_type));
1802 case TYPE_CODE_ARRAY:
1803 if (arg_type->is_vector ())
1804 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1805 value_contents (arg).data ());
1806 /* fall through. */
1808 case TYPE_CODE_STRUCT:
1809 case TYPE_CODE_UNION:
1810 for (int i = 0; i < arg_type->num_fields (); i++)
1812 /* Don't include static fields. */
1813 if (field_is_static (&arg_type->field (i)))
1814 continue;
1816 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1817 struct type *field_type = check_typedef (value_type (field));
1819 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1820 field))
1821 return false;
1823 return true;
1825 default:
1826 return false;
1830 /* Implement the "push_dummy_call" gdbarch method. */
1832 static CORE_ADDR
1833 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1834 struct regcache *regcache, CORE_ADDR bp_addr,
1835 int nargs,
1836 struct value **args, CORE_ADDR sp,
1837 function_call_return_method return_method,
1838 CORE_ADDR struct_addr)
1840 int argnum;
1841 struct aarch64_call_info info;
1843 /* We need to know what the type of the called function is in order
1844 to determine the number of named/anonymous arguments for the
1845 actual argument placement, and the return type in order to handle
1846 return value correctly.
1848 The generic code above us views the decision of return in memory
1849 or return in registers as a two stage processes. The language
1850 handler is consulted first and may decide to return in memory (eg
1851 class with copy constructor returned by value), this will cause
1852 the generic code to allocate space AND insert an initial leading
1853 argument.
1855 If the language code does not decide to pass in memory then the
1856 target code is consulted.
1858 If the language code decides to pass in memory we want to move
1859 the pointer inserted as the initial argument from the argument
1860 list and into X8, the conventional AArch64 struct return pointer
1861 register. */
1863 /* Set the return address. For the AArch64, the return breakpoint
1864 is always at BP_ADDR. */
1865 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1867 /* If we were given an initial argument for the return slot, lose it. */
1868 if (return_method == return_method_hidden_param)
1870 args++;
1871 nargs--;
1874 /* The struct_return pointer occupies X8. */
1875 if (return_method != return_method_normal)
1877 aarch64_debug_printf ("struct return in %s = 0x%s",
1878 gdbarch_register_name
1879 (gdbarch, AARCH64_STRUCT_RETURN_REGNUM),
1880 paddress (gdbarch, struct_addr));
1882 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1883 struct_addr);
1886 for (argnum = 0; argnum < nargs; argnum++)
1888 struct value *arg = args[argnum];
1889 struct type *arg_type, *fundamental_type;
1890 int len, elements;
1892 arg_type = check_typedef (value_type (arg));
1893 len = TYPE_LENGTH (arg_type);
1895 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1896 if there are enough spare registers. */
1897 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1898 &fundamental_type))
1900 if (info.nsrn + elements <= 8)
1902 /* We know that we have sufficient registers available therefore
1903 this will never need to fallback to the stack. */
1904 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1905 arg))
1906 gdb_assert_not_reached ("Failed to push args");
1908 else
1910 info.nsrn = 8;
1911 pass_on_stack (&info, arg_type, arg);
1913 continue;
1916 switch (arg_type->code ())
1918 case TYPE_CODE_INT:
1919 case TYPE_CODE_BOOL:
1920 case TYPE_CODE_CHAR:
1921 case TYPE_CODE_RANGE:
1922 case TYPE_CODE_ENUM:
1923 if (len < 4 && !is_fixed_point_type (arg_type))
1925 /* Promote to 32 bit integer. */
1926 if (arg_type->is_unsigned ())
1927 arg_type = builtin_type (gdbarch)->builtin_uint32;
1928 else
1929 arg_type = builtin_type (gdbarch)->builtin_int32;
1930 arg = value_cast (arg_type, arg);
1932 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1933 break;
1935 case TYPE_CODE_STRUCT:
1936 case TYPE_CODE_ARRAY:
1937 case TYPE_CODE_UNION:
1938 if (len > 16)
1940 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1941 invisible reference. */
1943 /* Allocate aligned storage. */
1944 sp = align_down (sp - len, 16);
1946 /* Write the real data into the stack. */
1947 write_memory (sp, value_contents (arg).data (), len);
1949 /* Construct the indirection. */
1950 arg_type = lookup_pointer_type (arg_type);
1951 arg = value_from_pointer (arg_type, sp);
1952 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1954 else
1955 /* PCS C.15 / C.18 multiple values pass. */
1956 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1957 break;
1959 default:
1960 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1961 break;
1965 /* Make sure stack retains 16 byte alignment. */
1966 if (info.nsaa & 15)
1967 sp -= 16 - (info.nsaa & 15);
1969 while (!info.si.empty ())
1971 const stack_item_t &si = info.si.back ();
1973 sp -= si.len;
1974 if (si.data != NULL)
1975 write_memory (sp, si.data, si.len);
1976 info.si.pop_back ();
1979 /* Finally, update the SP register. */
1980 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1982 return sp;
1985 /* Implement the "frame_align" gdbarch method. */
1987 static CORE_ADDR
1988 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1990 /* Align the stack to sixteen bytes. */
1991 return sp & ~(CORE_ADDR) 15;
1994 /* Return the type for an AdvSISD Q register. */
1996 static struct type *
1997 aarch64_vnq_type (struct gdbarch *gdbarch)
1999 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2001 if (tdep->vnq_type == NULL)
2003 struct type *t;
2004 struct type *elem;
2006 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2007 TYPE_CODE_UNION);
2009 elem = builtin_type (gdbarch)->builtin_uint128;
2010 append_composite_type_field (t, "u", elem);
2012 elem = builtin_type (gdbarch)->builtin_int128;
2013 append_composite_type_field (t, "s", elem);
2015 tdep->vnq_type = t;
2018 return tdep->vnq_type;
2021 /* Return the type for an AdvSISD D register. */
2023 static struct type *
2024 aarch64_vnd_type (struct gdbarch *gdbarch)
2026 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2028 if (tdep->vnd_type == NULL)
2030 struct type *t;
2031 struct type *elem;
2033 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2034 TYPE_CODE_UNION);
2036 elem = builtin_type (gdbarch)->builtin_double;
2037 append_composite_type_field (t, "f", elem);
2039 elem = builtin_type (gdbarch)->builtin_uint64;
2040 append_composite_type_field (t, "u", elem);
2042 elem = builtin_type (gdbarch)->builtin_int64;
2043 append_composite_type_field (t, "s", elem);
2045 tdep->vnd_type = t;
2048 return tdep->vnd_type;
2051 /* Return the type for an AdvSISD S register. */
2053 static struct type *
2054 aarch64_vns_type (struct gdbarch *gdbarch)
2056 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2058 if (tdep->vns_type == NULL)
2060 struct type *t;
2061 struct type *elem;
2063 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2064 TYPE_CODE_UNION);
2066 elem = builtin_type (gdbarch)->builtin_float;
2067 append_composite_type_field (t, "f", elem);
2069 elem = builtin_type (gdbarch)->builtin_uint32;
2070 append_composite_type_field (t, "u", elem);
2072 elem = builtin_type (gdbarch)->builtin_int32;
2073 append_composite_type_field (t, "s", elem);
2075 tdep->vns_type = t;
2078 return tdep->vns_type;
2081 /* Return the type for an AdvSISD H register. */
2083 static struct type *
2084 aarch64_vnh_type (struct gdbarch *gdbarch)
2086 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2088 if (tdep->vnh_type == NULL)
2090 struct type *t;
2091 struct type *elem;
2093 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2094 TYPE_CODE_UNION);
2096 elem = builtin_type (gdbarch)->builtin_bfloat16;
2097 append_composite_type_field (t, "bf", elem);
2099 elem = builtin_type (gdbarch)->builtin_half;
2100 append_composite_type_field (t, "f", elem);
2102 elem = builtin_type (gdbarch)->builtin_uint16;
2103 append_composite_type_field (t, "u", elem);
2105 elem = builtin_type (gdbarch)->builtin_int16;
2106 append_composite_type_field (t, "s", elem);
2108 tdep->vnh_type = t;
2111 return tdep->vnh_type;
2114 /* Return the type for an AdvSISD B register. */
2116 static struct type *
2117 aarch64_vnb_type (struct gdbarch *gdbarch)
2119 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2121 if (tdep->vnb_type == NULL)
2123 struct type *t;
2124 struct type *elem;
2126 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2127 TYPE_CODE_UNION);
2129 elem = builtin_type (gdbarch)->builtin_uint8;
2130 append_composite_type_field (t, "u", elem);
2132 elem = builtin_type (gdbarch)->builtin_int8;
2133 append_composite_type_field (t, "s", elem);
2135 tdep->vnb_type = t;
2138 return tdep->vnb_type;
2141 /* Return the type for an AdvSISD V register. */
2143 static struct type *
2144 aarch64_vnv_type (struct gdbarch *gdbarch)
2146 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2148 if (tdep->vnv_type == NULL)
2150 /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
2151 slice from the non-pseudo vector registers. However NEON V registers
2152 are always vector registers, and need constructing as such. */
2153 const struct builtin_type *bt = builtin_type (gdbarch);
2155 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
2156 TYPE_CODE_UNION);
2158 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2159 TYPE_CODE_UNION);
2160 append_composite_type_field (sub, "f",
2161 init_vector_type (bt->builtin_double, 2));
2162 append_composite_type_field (sub, "u",
2163 init_vector_type (bt->builtin_uint64, 2));
2164 append_composite_type_field (sub, "s",
2165 init_vector_type (bt->builtin_int64, 2));
2166 append_composite_type_field (t, "d", sub);
2168 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2169 TYPE_CODE_UNION);
2170 append_composite_type_field (sub, "f",
2171 init_vector_type (bt->builtin_float, 4));
2172 append_composite_type_field (sub, "u",
2173 init_vector_type (bt->builtin_uint32, 4));
2174 append_composite_type_field (sub, "s",
2175 init_vector_type (bt->builtin_int32, 4));
2176 append_composite_type_field (t, "s", sub);
2178 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2179 TYPE_CODE_UNION);
2180 append_composite_type_field (sub, "bf",
2181 init_vector_type (bt->builtin_bfloat16, 8));
2182 append_composite_type_field (sub, "f",
2183 init_vector_type (bt->builtin_half, 8));
2184 append_composite_type_field (sub, "u",
2185 init_vector_type (bt->builtin_uint16, 8));
2186 append_composite_type_field (sub, "s",
2187 init_vector_type (bt->builtin_int16, 8));
2188 append_composite_type_field (t, "h", sub);
2190 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2191 TYPE_CODE_UNION);
2192 append_composite_type_field (sub, "u",
2193 init_vector_type (bt->builtin_uint8, 16));
2194 append_composite_type_field (sub, "s",
2195 init_vector_type (bt->builtin_int8, 16));
2196 append_composite_type_field (t, "b", sub);
2198 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2199 TYPE_CODE_UNION);
2200 append_composite_type_field (sub, "u",
2201 init_vector_type (bt->builtin_uint128, 1));
2202 append_composite_type_field (sub, "s",
2203 init_vector_type (bt->builtin_int128, 1));
2204 append_composite_type_field (t, "q", sub);
2206 tdep->vnv_type = t;
2209 return tdep->vnv_type;
2212 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2214 static int
2215 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2217 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2219 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2220 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2222 if (reg == AARCH64_DWARF_SP)
2223 return AARCH64_SP_REGNUM;
2225 if (reg == AARCH64_DWARF_PC)
2226 return AARCH64_PC_REGNUM;
2228 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2229 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2231 if (reg == AARCH64_DWARF_SVE_VG)
2232 return AARCH64_SVE_VG_REGNUM;
2234 if (reg == AARCH64_DWARF_SVE_FFR)
2235 return AARCH64_SVE_FFR_REGNUM;
2237 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2238 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2240 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2241 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2243 if (tdep->has_pauth ())
2245 if (reg == AARCH64_DWARF_RA_SIGN_STATE)
2246 return tdep->ra_sign_state_regnum;
2249 return -1;
2252 /* Implement the "print_insn" gdbarch method. */
2254 static int
2255 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2257 info->symbols = NULL;
2258 return default_print_insn (memaddr, info);
2261 /* AArch64 BRK software debug mode instruction.
2262 Note that AArch64 code is always little-endian.
2263 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2264 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2266 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2268 /* Extract from an array REGS containing the (raw) register state a
2269 function return value of type TYPE, and copy that, in virtual
2270 format, into VALBUF. */
2272 static void
2273 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2274 gdb_byte *valbuf)
2276 struct gdbarch *gdbarch = regs->arch ();
2277 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2278 int elements;
2279 struct type *fundamental_type;
2281 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2282 &fundamental_type))
2284 int len = TYPE_LENGTH (fundamental_type);
2286 for (int i = 0; i < elements; i++)
2288 int regno = AARCH64_V0_REGNUM + i;
2289 /* Enough space for a full vector register. */
2290 gdb_byte buf[register_size (gdbarch, regno)];
2291 gdb_assert (len <= sizeof (buf));
2293 aarch64_debug_printf
2294 ("read HFA or HVA return value element %d from %s",
2295 i + 1, gdbarch_register_name (gdbarch, regno));
2297 regs->cooked_read (regno, buf);
2299 memcpy (valbuf, buf, len);
2300 valbuf += len;
2303 else if (type->code () == TYPE_CODE_INT
2304 || type->code () == TYPE_CODE_CHAR
2305 || type->code () == TYPE_CODE_BOOL
2306 || type->code () == TYPE_CODE_PTR
2307 || TYPE_IS_REFERENCE (type)
2308 || type->code () == TYPE_CODE_ENUM)
2310 /* If the type is a plain integer, then the access is
2311 straight-forward. Otherwise we have to play around a bit
2312 more. */
2313 int len = TYPE_LENGTH (type);
2314 int regno = AARCH64_X0_REGNUM;
2315 ULONGEST tmp;
2317 while (len > 0)
2319 /* By using store_unsigned_integer we avoid having to do
2320 anything special for small big-endian values. */
2321 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2322 store_unsigned_integer (valbuf,
2323 (len > X_REGISTER_SIZE
2324 ? X_REGISTER_SIZE : len), byte_order, tmp);
2325 len -= X_REGISTER_SIZE;
2326 valbuf += X_REGISTER_SIZE;
2329 else
2331 /* For a structure or union the behaviour is as if the value had
2332 been stored to word-aligned memory and then loaded into
2333 registers with 64-bit load instruction(s). */
2334 int len = TYPE_LENGTH (type);
2335 int regno = AARCH64_X0_REGNUM;
2336 bfd_byte buf[X_REGISTER_SIZE];
2338 while (len > 0)
2340 regs->cooked_read (regno++, buf);
2341 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2342 len -= X_REGISTER_SIZE;
2343 valbuf += X_REGISTER_SIZE;
2349 /* Will a function return an aggregate type in memory or in a
2350 register? Return 0 if an aggregate type can be returned in a
2351 register, 1 if it must be returned in memory. */
2353 static int
2354 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2356 type = check_typedef (type);
2357 int elements;
2358 struct type *fundamental_type;
2360 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2361 &fundamental_type))
2363 /* v0-v7 are used to return values and one register is allocated
2364 for one member. However, HFA or HVA has at most four members. */
2365 return 0;
2368 if (TYPE_LENGTH (type) > 16
2369 || !language_pass_by_reference (type).trivially_copyable)
2371 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2372 invisible reference. */
2374 return 1;
2377 return 0;
2380 /* Write into appropriate registers a function return value of type
2381 TYPE, given in virtual format. */
2383 static void
2384 aarch64_store_return_value (struct type *type, struct regcache *regs,
2385 const gdb_byte *valbuf)
2387 struct gdbarch *gdbarch = regs->arch ();
2388 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2389 int elements;
2390 struct type *fundamental_type;
2392 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2393 &fundamental_type))
2395 int len = TYPE_LENGTH (fundamental_type);
2397 for (int i = 0; i < elements; i++)
2399 int regno = AARCH64_V0_REGNUM + i;
2400 /* Enough space for a full vector register. */
2401 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2402 gdb_assert (len <= sizeof (tmpbuf));
2404 aarch64_debug_printf
2405 ("write HFA or HVA return value element %d to %s",
2406 i + 1, gdbarch_register_name (gdbarch, regno));
2408 memcpy (tmpbuf, valbuf,
2409 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2410 regs->cooked_write (regno, tmpbuf);
2411 valbuf += len;
2414 else if (type->code () == TYPE_CODE_INT
2415 || type->code () == TYPE_CODE_CHAR
2416 || type->code () == TYPE_CODE_BOOL
2417 || type->code () == TYPE_CODE_PTR
2418 || TYPE_IS_REFERENCE (type)
2419 || type->code () == TYPE_CODE_ENUM)
2421 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2423 /* Values of one word or less are zero/sign-extended and
2424 returned in r0. */
2425 bfd_byte tmpbuf[X_REGISTER_SIZE];
2426 LONGEST val = unpack_long (type, valbuf);
2428 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2429 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2431 else
2433 /* Integral values greater than one word are stored in
2434 consecutive registers starting with r0. This will always
2435 be a multiple of the regiser size. */
2436 int len = TYPE_LENGTH (type);
2437 int regno = AARCH64_X0_REGNUM;
2439 while (len > 0)
2441 regs->cooked_write (regno++, valbuf);
2442 len -= X_REGISTER_SIZE;
2443 valbuf += X_REGISTER_SIZE;
2447 else
2449 /* For a structure or union the behaviour is as if the value had
2450 been stored to word-aligned memory and then loaded into
2451 registers with 64-bit load instruction(s). */
2452 int len = TYPE_LENGTH (type);
2453 int regno = AARCH64_X0_REGNUM;
2454 bfd_byte tmpbuf[X_REGISTER_SIZE];
2456 while (len > 0)
2458 memcpy (tmpbuf, valbuf,
2459 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2460 regs->cooked_write (regno++, tmpbuf);
2461 len -= X_REGISTER_SIZE;
2462 valbuf += X_REGISTER_SIZE;
2467 /* Implement the "return_value" gdbarch method. */
2469 static enum return_value_convention
2470 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2471 struct type *valtype, struct regcache *regcache,
2472 gdb_byte *readbuf, const gdb_byte *writebuf)
2475 if (valtype->code () == TYPE_CODE_STRUCT
2476 || valtype->code () == TYPE_CODE_UNION
2477 || valtype->code () == TYPE_CODE_ARRAY)
2479 if (aarch64_return_in_memory (gdbarch, valtype))
2481 /* From the AAPCS64's Result Return section:
2483 "Otherwise, the caller shall reserve a block of memory of
2484 sufficient size and alignment to hold the result. The address
2485 of the memory block shall be passed as an additional argument to
2486 the function in x8. */
2488 aarch64_debug_printf ("return value in memory");
2490 if (readbuf)
2492 CORE_ADDR addr;
2494 regcache->cooked_read (AARCH64_STRUCT_RETURN_REGNUM, &addr);
2495 read_memory (addr, readbuf, TYPE_LENGTH (valtype));
2498 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
2502 if (writebuf)
2503 aarch64_store_return_value (valtype, regcache, writebuf);
2505 if (readbuf)
2506 aarch64_extract_return_value (valtype, regcache, readbuf);
2508 aarch64_debug_printf ("return value in registers");
2510 return RETURN_VALUE_REGISTER_CONVENTION;
2513 /* Implement the "get_longjmp_target" gdbarch method. */
2515 static int
2516 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2518 CORE_ADDR jb_addr;
2519 gdb_byte buf[X_REGISTER_SIZE];
2520 struct gdbarch *gdbarch = get_frame_arch (frame);
2521 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2522 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2524 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2526 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2527 X_REGISTER_SIZE))
2528 return 0;
2530 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2531 return 1;
2534 /* Implement the "gen_return_address" gdbarch method. */
2536 static void
2537 aarch64_gen_return_address (struct gdbarch *gdbarch,
2538 struct agent_expr *ax, struct axs_value *value,
2539 CORE_ADDR scope)
2541 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2542 value->kind = axs_lvalue_register;
2543 value->u.reg = AARCH64_LR_REGNUM;
2547 /* Return the pseudo register name corresponding to register regnum. */
2549 static const char *
2550 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2552 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2554 static const char *const q_name[] =
2556 "q0", "q1", "q2", "q3",
2557 "q4", "q5", "q6", "q7",
2558 "q8", "q9", "q10", "q11",
2559 "q12", "q13", "q14", "q15",
2560 "q16", "q17", "q18", "q19",
2561 "q20", "q21", "q22", "q23",
2562 "q24", "q25", "q26", "q27",
2563 "q28", "q29", "q30", "q31",
2566 static const char *const d_name[] =
2568 "d0", "d1", "d2", "d3",
2569 "d4", "d5", "d6", "d7",
2570 "d8", "d9", "d10", "d11",
2571 "d12", "d13", "d14", "d15",
2572 "d16", "d17", "d18", "d19",
2573 "d20", "d21", "d22", "d23",
2574 "d24", "d25", "d26", "d27",
2575 "d28", "d29", "d30", "d31",
2578 static const char *const s_name[] =
2580 "s0", "s1", "s2", "s3",
2581 "s4", "s5", "s6", "s7",
2582 "s8", "s9", "s10", "s11",
2583 "s12", "s13", "s14", "s15",
2584 "s16", "s17", "s18", "s19",
2585 "s20", "s21", "s22", "s23",
2586 "s24", "s25", "s26", "s27",
2587 "s28", "s29", "s30", "s31",
2590 static const char *const h_name[] =
2592 "h0", "h1", "h2", "h3",
2593 "h4", "h5", "h6", "h7",
2594 "h8", "h9", "h10", "h11",
2595 "h12", "h13", "h14", "h15",
2596 "h16", "h17", "h18", "h19",
2597 "h20", "h21", "h22", "h23",
2598 "h24", "h25", "h26", "h27",
2599 "h28", "h29", "h30", "h31",
2602 static const char *const b_name[] =
2604 "b0", "b1", "b2", "b3",
2605 "b4", "b5", "b6", "b7",
2606 "b8", "b9", "b10", "b11",
2607 "b12", "b13", "b14", "b15",
2608 "b16", "b17", "b18", "b19",
2609 "b20", "b21", "b22", "b23",
2610 "b24", "b25", "b26", "b27",
2611 "b28", "b29", "b30", "b31",
2614 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2616 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2617 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2619 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2620 return d_name[p_regnum - AARCH64_D0_REGNUM];
2622 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2623 return s_name[p_regnum - AARCH64_S0_REGNUM];
2625 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2626 return h_name[p_regnum - AARCH64_H0_REGNUM];
2628 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2629 return b_name[p_regnum - AARCH64_B0_REGNUM];
2631 if (tdep->has_sve ())
2633 static const char *const sve_v_name[] =
2635 "v0", "v1", "v2", "v3",
2636 "v4", "v5", "v6", "v7",
2637 "v8", "v9", "v10", "v11",
2638 "v12", "v13", "v14", "v15",
2639 "v16", "v17", "v18", "v19",
2640 "v20", "v21", "v22", "v23",
2641 "v24", "v25", "v26", "v27",
2642 "v28", "v29", "v30", "v31",
2645 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2646 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2647 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
2650 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2651 prevents it from being read by methods such as
2652 mi_cmd_trace_frame_collected. */
2653 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
2654 return "";
2656 internal_error (__FILE__, __LINE__,
2657 _("aarch64_pseudo_register_name: bad register number %d"),
2658 p_regnum);
2661 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2663 static struct type *
2664 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2666 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2668 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2670 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2671 return aarch64_vnq_type (gdbarch);
2673 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2674 return aarch64_vnd_type (gdbarch);
2676 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2677 return aarch64_vns_type (gdbarch);
2679 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2680 return aarch64_vnh_type (gdbarch);
2682 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2683 return aarch64_vnb_type (gdbarch);
2685 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2686 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2687 return aarch64_vnv_type (gdbarch);
2689 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
2690 return builtin_type (gdbarch)->builtin_uint64;
2692 internal_error (__FILE__, __LINE__,
2693 _("aarch64_pseudo_register_type: bad register number %d"),
2694 p_regnum);
2697 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2699 static int
2700 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2701 const struct reggroup *group)
2703 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2705 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2707 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2708 return group == all_reggroup || group == vector_reggroup;
2709 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2710 return (group == all_reggroup || group == vector_reggroup
2711 || group == float_reggroup);
2712 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2713 return (group == all_reggroup || group == vector_reggroup
2714 || group == float_reggroup);
2715 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2716 return group == all_reggroup || group == vector_reggroup;
2717 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2718 return group == all_reggroup || group == vector_reggroup;
2719 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2720 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2721 return group == all_reggroup || group == vector_reggroup;
2722 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2723 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
2724 return 0;
2726 return group == all_reggroup;
2729 /* Helper for aarch64_pseudo_read_value. */
2731 static struct value *
2732 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2733 readable_regcache *regcache, int regnum_offset,
2734 int regsize, struct value *result_value)
2736 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2738 /* Enough space for a full vector register. */
2739 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2740 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2742 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2743 mark_value_bytes_unavailable (result_value, 0,
2744 TYPE_LENGTH (value_type (result_value)));
2745 else
2746 memcpy (value_contents_raw (result_value).data (), reg_buf, regsize);
2748 return result_value;
2751 /* Implement the "pseudo_register_read_value" gdbarch method. */
2753 static struct value *
2754 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2755 int regnum)
2757 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2758 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2760 VALUE_LVAL (result_value) = lval_register;
2761 VALUE_REGNUM (result_value) = regnum;
2763 regnum -= gdbarch_num_regs (gdbarch);
2765 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2766 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2767 regnum - AARCH64_Q0_REGNUM,
2768 Q_REGISTER_SIZE, result_value);
2770 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2771 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2772 regnum - AARCH64_D0_REGNUM,
2773 D_REGISTER_SIZE, result_value);
2775 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2776 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2777 regnum - AARCH64_S0_REGNUM,
2778 S_REGISTER_SIZE, result_value);
2780 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2781 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2782 regnum - AARCH64_H0_REGNUM,
2783 H_REGISTER_SIZE, result_value);
2785 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2786 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2787 regnum - AARCH64_B0_REGNUM,
2788 B_REGISTER_SIZE, result_value);
2790 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2791 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2792 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2793 regnum - AARCH64_SVE_V0_REGNUM,
2794 V_REGISTER_SIZE, result_value);
2796 gdb_assert_not_reached ("regnum out of bound");
2799 /* Helper for aarch64_pseudo_write. */
2801 static void
2802 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2803 int regnum_offset, int regsize, const gdb_byte *buf)
2805 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2807 /* Enough space for a full vector register. */
2808 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2809 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2811 /* Ensure the register buffer is zero, we want gdb writes of the
2812 various 'scalar' pseudo registers to behavior like architectural
2813 writes, register width bytes are written the remainder are set to
2814 zero. */
2815 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2817 memcpy (reg_buf, buf, regsize);
2818 regcache->raw_write (v_regnum, reg_buf);
2821 /* Implement the "pseudo_register_write" gdbarch method. */
2823 static void
2824 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2825 int regnum, const gdb_byte *buf)
2827 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
2828 regnum -= gdbarch_num_regs (gdbarch);
2830 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2831 return aarch64_pseudo_write_1 (gdbarch, regcache,
2832 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2833 buf);
2835 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2836 return aarch64_pseudo_write_1 (gdbarch, regcache,
2837 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2838 buf);
2840 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2841 return aarch64_pseudo_write_1 (gdbarch, regcache,
2842 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2843 buf);
2845 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2846 return aarch64_pseudo_write_1 (gdbarch, regcache,
2847 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2848 buf);
2850 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2851 return aarch64_pseudo_write_1 (gdbarch, regcache,
2852 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2853 buf);
2855 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2856 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2857 return aarch64_pseudo_write_1 (gdbarch, regcache,
2858 regnum - AARCH64_SVE_V0_REGNUM,
2859 V_REGISTER_SIZE, buf);
2861 gdb_assert_not_reached ("regnum out of bound");
2864 /* Callback function for user_reg_add. */
2866 static struct value *
2867 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2869 const int *reg_p = (const int *) baton;
2871 return value_of_register (*reg_p, frame);
2875 /* Implement the "software_single_step" gdbarch method, needed to
2876 single step through atomic sequences on AArch64. */
2878 static std::vector<CORE_ADDR>
2879 aarch64_software_single_step (struct regcache *regcache)
2881 struct gdbarch *gdbarch = regcache->arch ();
2882 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2883 const int insn_size = 4;
2884 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2885 CORE_ADDR pc = regcache_read_pc (regcache);
2886 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2887 CORE_ADDR loc = pc;
2888 CORE_ADDR closing_insn = 0;
2889 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2890 byte_order_for_code);
2891 int index;
2892 int insn_count;
2893 int bc_insn_count = 0; /* Conditional branch instruction count. */
2894 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2895 aarch64_inst inst;
2897 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2898 return {};
2900 /* Look for a Load Exclusive instruction which begins the sequence. */
2901 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2902 return {};
2904 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2906 loc += insn_size;
2907 insn = read_memory_unsigned_integer (loc, insn_size,
2908 byte_order_for_code);
2910 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2911 return {};
2912 /* Check if the instruction is a conditional branch. */
2913 if (inst.opcode->iclass == condbranch)
2915 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2917 if (bc_insn_count >= 1)
2918 return {};
2920 /* It is, so we'll try to set a breakpoint at the destination. */
2921 breaks[1] = loc + inst.operands[0].imm.value;
2923 bc_insn_count++;
2924 last_breakpoint++;
2927 /* Look for the Store Exclusive which closes the atomic sequence. */
2928 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2930 closing_insn = loc;
2931 break;
2935 /* We didn't find a closing Store Exclusive instruction, fall back. */
2936 if (!closing_insn)
2937 return {};
2939 /* Insert breakpoint after the end of the atomic sequence. */
2940 breaks[0] = loc + insn_size;
2942 /* Check for duplicated breakpoints, and also check that the second
2943 breakpoint is not within the atomic sequence. */
2944 if (last_breakpoint
2945 && (breaks[1] == breaks[0]
2946 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2947 last_breakpoint = 0;
2949 std::vector<CORE_ADDR> next_pcs;
2951 /* Insert the breakpoint at the end of the sequence, and one at the
2952 destination of the conditional branch, if it exists. */
2953 for (index = 0; index <= last_breakpoint; index++)
2954 next_pcs.push_back (breaks[index]);
2956 return next_pcs;
2959 struct aarch64_displaced_step_copy_insn_closure
2960 : public displaced_step_copy_insn_closure
2962 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2963 is being displaced stepping. */
2964 bool cond = false;
2966 /* PC adjustment offset after displaced stepping. If 0, then we don't
2967 write the PC back, assuming the PC is already the right address. */
2968 int32_t pc_adjust = 0;
2971 /* Data when visiting instructions for displaced stepping. */
2973 struct aarch64_displaced_step_data
2975 struct aarch64_insn_data base;
2977 /* The address where the instruction will be executed at. */
2978 CORE_ADDR new_addr;
2979 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2980 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
2981 /* Number of instructions in INSN_BUF. */
2982 unsigned insn_count;
2983 /* Registers when doing displaced stepping. */
2984 struct regcache *regs;
2986 aarch64_displaced_step_copy_insn_closure *dsc;
2989 /* Implementation of aarch64_insn_visitor method "b". */
2991 static void
2992 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2993 struct aarch64_insn_data *data)
2995 struct aarch64_displaced_step_data *dsd
2996 = (struct aarch64_displaced_step_data *) data;
2997 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2999 if (can_encode_int32 (new_offset, 28))
3001 /* Emit B rather than BL, because executing BL on a new address
3002 will get the wrong address into LR. In order to avoid this,
3003 we emit B, and update LR if the instruction is BL. */
3004 emit_b (dsd->insn_buf, 0, new_offset);
3005 dsd->insn_count++;
3007 else
3009 /* Write NOP. */
3010 emit_nop (dsd->insn_buf);
3011 dsd->insn_count++;
3012 dsd->dsc->pc_adjust = offset;
3015 if (is_bl)
3017 /* Update LR. */
3018 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3019 data->insn_addr + 4);
3023 /* Implementation of aarch64_insn_visitor method "b_cond". */
3025 static void
3026 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
3027 struct aarch64_insn_data *data)
3029 struct aarch64_displaced_step_data *dsd
3030 = (struct aarch64_displaced_step_data *) data;
3032 /* GDB has to fix up PC after displaced step this instruction
3033 differently according to the condition is true or false. Instead
3034 of checking COND against conditional flags, we can use
3035 the following instructions, and GDB can tell how to fix up PC
3036 according to the PC value.
3038 B.COND TAKEN ; If cond is true, then jump to TAKEN.
3039 INSN1 ;
3040 TAKEN:
3041 INSN2
3044 emit_bcond (dsd->insn_buf, cond, 8);
3045 dsd->dsc->cond = true;
3046 dsd->dsc->pc_adjust = offset;
3047 dsd->insn_count = 1;
3050 /* Dynamically allocate a new register. If we know the register
3051 statically, we should make it a global as above instead of using this
3052 helper function. */
3054 static struct aarch64_register
3055 aarch64_register (unsigned num, int is64)
3057 return (struct aarch64_register) { num, is64 };
3060 /* Implementation of aarch64_insn_visitor method "cb". */
3062 static void
3063 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
3064 const unsigned rn, int is64,
3065 struct aarch64_insn_data *data)
3067 struct aarch64_displaced_step_data *dsd
3068 = (struct aarch64_displaced_step_data *) data;
3070 /* The offset is out of range for a compare and branch
3071 instruction. We can use the following instructions instead:
3073 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
3074 INSN1 ;
3075 TAKEN:
3076 INSN2
3078 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
3079 dsd->insn_count = 1;
3080 dsd->dsc->cond = true;
3081 dsd->dsc->pc_adjust = offset;
3084 /* Implementation of aarch64_insn_visitor method "tb". */
3086 static void
3087 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
3088 const unsigned rt, unsigned bit,
3089 struct aarch64_insn_data *data)
3091 struct aarch64_displaced_step_data *dsd
3092 = (struct aarch64_displaced_step_data *) data;
3094 /* The offset is out of range for a test bit and branch
3095 instruction We can use the following instructions instead:
3097 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
3098 INSN1 ;
3099 TAKEN:
3100 INSN2
3103 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
3104 dsd->insn_count = 1;
3105 dsd->dsc->cond = true;
3106 dsd->dsc->pc_adjust = offset;
3109 /* Implementation of aarch64_insn_visitor method "adr". */
3111 static void
3112 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
3113 const int is_adrp, struct aarch64_insn_data *data)
3115 struct aarch64_displaced_step_data *dsd
3116 = (struct aarch64_displaced_step_data *) data;
3117 /* We know exactly the address the ADR{P,} instruction will compute.
3118 We can just write it to the destination register. */
3119 CORE_ADDR address = data->insn_addr + offset;
3121 if (is_adrp)
3123 /* Clear the lower 12 bits of the offset to get the 4K page. */
3124 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3125 address & ~0xfff);
3127 else
3128 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3129 address);
3131 dsd->dsc->pc_adjust = 4;
3132 emit_nop (dsd->insn_buf);
3133 dsd->insn_count = 1;
3136 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
3138 static void
3139 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
3140 const unsigned rt, const int is64,
3141 struct aarch64_insn_data *data)
3143 struct aarch64_displaced_step_data *dsd
3144 = (struct aarch64_displaced_step_data *) data;
3145 CORE_ADDR address = data->insn_addr + offset;
3146 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
3148 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
3149 address);
3151 if (is_sw)
3152 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
3153 aarch64_register (rt, 1), zero);
3154 else
3155 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
3156 aarch64_register (rt, 1), zero);
3158 dsd->dsc->pc_adjust = 4;
3161 /* Implementation of aarch64_insn_visitor method "others". */
3163 static void
3164 aarch64_displaced_step_others (const uint32_t insn,
3165 struct aarch64_insn_data *data)
3167 struct aarch64_displaced_step_data *dsd
3168 = (struct aarch64_displaced_step_data *) data;
3170 uint32_t masked_insn = (insn & CLEAR_Rn_MASK);
3171 if (masked_insn == BLR)
3173 /* Emit a BR to the same register and then update LR to the original
3174 address (similar to aarch64_displaced_step_b). */
3175 aarch64_emit_insn (dsd->insn_buf, insn & 0xffdfffff);
3176 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3177 data->insn_addr + 4);
3179 else
3180 aarch64_emit_insn (dsd->insn_buf, insn);
3181 dsd->insn_count = 1;
3183 if (masked_insn == RET || masked_insn == BR || masked_insn == BLR)
3184 dsd->dsc->pc_adjust = 0;
3185 else
3186 dsd->dsc->pc_adjust = 4;
3189 static const struct aarch64_insn_visitor visitor =
3191 aarch64_displaced_step_b,
3192 aarch64_displaced_step_b_cond,
3193 aarch64_displaced_step_cb,
3194 aarch64_displaced_step_tb,
3195 aarch64_displaced_step_adr,
3196 aarch64_displaced_step_ldr_literal,
3197 aarch64_displaced_step_others,
3200 /* Implement the "displaced_step_copy_insn" gdbarch method. */
3202 displaced_step_copy_insn_closure_up
3203 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
3204 CORE_ADDR from, CORE_ADDR to,
3205 struct regcache *regs)
3207 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3208 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
3209 struct aarch64_displaced_step_data dsd;
3210 aarch64_inst inst;
3212 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
3213 return NULL;
3215 /* Look for a Load Exclusive instruction which begins the sequence. */
3216 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
3218 /* We can't displaced step atomic sequences. */
3219 return NULL;
3222 std::unique_ptr<aarch64_displaced_step_copy_insn_closure> dsc
3223 (new aarch64_displaced_step_copy_insn_closure);
3224 dsd.base.insn_addr = from;
3225 dsd.new_addr = to;
3226 dsd.regs = regs;
3227 dsd.dsc = dsc.get ();
3228 dsd.insn_count = 0;
3229 aarch64_relocate_instruction (insn, &visitor,
3230 (struct aarch64_insn_data *) &dsd);
3231 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
3233 if (dsd.insn_count != 0)
3235 int i;
3237 /* Instruction can be relocated to scratch pad. Copy
3238 relocated instruction(s) there. */
3239 for (i = 0; i < dsd.insn_count; i++)
3241 displaced_debug_printf ("writing insn %.8x at %s",
3242 dsd.insn_buf[i],
3243 paddress (gdbarch, to + i * 4));
3245 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3246 (ULONGEST) dsd.insn_buf[i]);
3249 else
3251 dsc = NULL;
3254 /* This is a work around for a problem with g++ 4.8. */
3255 return displaced_step_copy_insn_closure_up (dsc.release ());
3258 /* Implement the "displaced_step_fixup" gdbarch method. */
3260 void
3261 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3262 struct displaced_step_copy_insn_closure *dsc_,
3263 CORE_ADDR from, CORE_ADDR to,
3264 struct regcache *regs)
3266 aarch64_displaced_step_copy_insn_closure *dsc
3267 = (aarch64_displaced_step_copy_insn_closure *) dsc_;
3269 ULONGEST pc;
3271 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3273 displaced_debug_printf ("PC after stepping: %s (was %s).",
3274 paddress (gdbarch, pc), paddress (gdbarch, to));
3276 if (dsc->cond)
3278 displaced_debug_printf ("[Conditional] pc_adjust before: %d",
3279 dsc->pc_adjust);
3281 if (pc - to == 8)
3283 /* Condition is true. */
3285 else if (pc - to == 4)
3287 /* Condition is false. */
3288 dsc->pc_adjust = 4;
3290 else
3291 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3293 displaced_debug_printf ("[Conditional] pc_adjust after: %d",
3294 dsc->pc_adjust);
3297 displaced_debug_printf ("%s PC by %d",
3298 dsc->pc_adjust ? "adjusting" : "not adjusting",
3299 dsc->pc_adjust);
3301 if (dsc->pc_adjust != 0)
3303 /* Make sure the previous instruction was executed (that is, the PC
3304 has changed). If the PC didn't change, then discard the adjustment
3305 offset. Otherwise we may skip an instruction before its execution
3306 took place. */
3307 if ((pc - to) == 0)
3309 displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
3310 dsc->pc_adjust = 0;
3313 displaced_debug_printf ("fixup: set PC to %s:%d",
3314 paddress (gdbarch, from), dsc->pc_adjust);
3316 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3317 from + dsc->pc_adjust);
3321 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3323 bool
3324 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch)
3326 return true;
3329 /* Get the correct target description for the given VQ value.
3330 If VQ is zero then it is assumed SVE is not supported.
3331 (It is not possible to set VQ to zero on an SVE system).
3333 MTE_P indicates the presence of the Memory Tagging Extension feature.
3335 TLS_P indicates the presence of the Thread Local Storage feature. */
3337 const target_desc *
3338 aarch64_read_description (const aarch64_features &features)
3340 if (features.vq > AARCH64_MAX_SVE_VQ)
3341 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), features.vq,
3342 AARCH64_MAX_SVE_VQ);
3344 struct target_desc *tdesc = tdesc_aarch64_map[features];
3346 if (tdesc == NULL)
3348 tdesc = aarch64_create_target_description (features);
3349 tdesc_aarch64_map[features] = tdesc;
3352 return tdesc;
3355 /* Return the VQ used when creating the target description TDESC. */
3357 static uint64_t
3358 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3360 const struct tdesc_feature *feature_sve;
3362 if (!tdesc_has_registers (tdesc))
3363 return 0;
3365 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3367 if (feature_sve == nullptr)
3368 return 0;
3370 uint64_t vl = tdesc_register_bitsize (feature_sve,
3371 aarch64_sve_register_names[0]) / 8;
3372 return sve_vq_from_vl (vl);
3375 /* Implement the "cannot_store_register" gdbarch method. */
3377 static int
3378 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3380 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
3382 if (!tdep->has_pauth ())
3383 return 0;
3385 /* Pointer authentication registers are read-only. */
3386 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3387 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3390 /* Implement the stack_frame_destroyed_p gdbarch method. */
3392 static int
3393 aarch64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3395 CORE_ADDR func_start, func_end;
3396 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3397 return 0;
3399 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3400 uint32_t insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3402 aarch64_inst inst;
3403 if (aarch64_decode_insn (insn, &inst, 1, nullptr) != 0)
3404 return 0;
3406 return streq (inst.opcode->name, "ret");
3409 /* Initialize the current architecture based on INFO. If possible,
3410 re-use an architecture from ARCHES, which is a list of
3411 architectures already created during this debugging session.
3413 Called e.g. at program startup, when reading a core file, and when
3414 reading a binary file. */
3416 static struct gdbarch *
3417 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3419 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
3420 const struct tdesc_feature *feature_pauth;
3421 bool valid_p = true;
3422 int i, num_regs = 0, num_pseudo_regs = 0;
3423 int first_pauth_regnum = -1, ra_sign_state_offset = -1;
3424 int first_mte_regnum = -1, tls_regnum = -1;
3426 /* Use the vector length passed via the target info. Here -1 is used for no
3427 SVE, and 0 is unset. If unset then use the vector length from the existing
3428 tdesc. */
3429 uint64_t vq = 0;
3430 if (info.id == (int *) -1)
3431 vq = 0;
3432 else if (info.id != 0)
3433 vq = (uint64_t) info.id;
3434 else
3435 vq = aarch64_get_tdesc_vq (info.target_desc);
3437 if (vq > AARCH64_MAX_SVE_VQ)
3438 internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"),
3439 pulongest (vq), AARCH64_MAX_SVE_VQ);
3441 /* If there is already a candidate, use it. */
3442 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3443 best_arch != nullptr;
3444 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3446 aarch64_gdbarch_tdep *tdep
3447 = (aarch64_gdbarch_tdep *) gdbarch_tdep (best_arch->gdbarch);
3448 if (tdep && tdep->vq == vq)
3449 return best_arch->gdbarch;
3452 /* Ensure we always have a target descriptor, and that it is for the given VQ
3453 value. */
3454 const struct target_desc *tdesc = info.target_desc;
3455 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc))
3457 aarch64_features features;
3458 features.vq = vq;
3459 tdesc = aarch64_read_description (features);
3461 gdb_assert (tdesc);
3463 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
3464 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3465 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3466 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3467 const struct tdesc_feature *feature_mte
3468 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte");
3469 const struct tdesc_feature *feature_tls
3470 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls");
3472 if (feature_core == nullptr)
3473 return nullptr;
3475 tdesc_arch_data_up tdesc_data = tdesc_data_alloc ();
3477 /* Validate the description provides the mandatory core R registers
3478 and allocate their numbers. */
3479 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3480 valid_p &= tdesc_numbered_register (feature_core, tdesc_data.get (),
3481 AARCH64_X0_REGNUM + i,
3482 aarch64_r_register_names[i]);
3484 num_regs = AARCH64_X0_REGNUM + i;
3486 /* Add the V registers. */
3487 if (feature_fpu != nullptr)
3489 if (feature_sve != nullptr)
3490 error (_("Program contains both fpu and SVE features."));
3492 /* Validate the description provides the mandatory V registers
3493 and allocate their numbers. */
3494 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3495 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data.get (),
3496 AARCH64_V0_REGNUM + i,
3497 aarch64_v_register_names[i]);
3499 num_regs = AARCH64_V0_REGNUM + i;
3502 /* Add the SVE registers. */
3503 if (feature_sve != nullptr)
3505 /* Validate the description provides the mandatory SVE registers
3506 and allocate their numbers. */
3507 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3508 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data.get (),
3509 AARCH64_SVE_Z0_REGNUM + i,
3510 aarch64_sve_register_names[i]);
3512 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3513 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3516 if (feature_fpu != nullptr || feature_sve != nullptr)
3518 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3519 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3520 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3521 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3522 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3525 /* Add the TLS register. */
3526 if (feature_tls != nullptr)
3528 tls_regnum = num_regs;
3529 /* Validate the descriptor provides the mandatory TLS register
3530 and allocate its number. */
3531 valid_p = tdesc_numbered_register (feature_tls, tdesc_data.get (),
3532 tls_regnum, "tpidr");
3534 num_regs++;
3537 /* Add the pauth registers. */
3538 if (feature_pauth != NULL)
3540 first_pauth_regnum = num_regs;
3541 ra_sign_state_offset = num_pseudo_regs;
3542 /* Validate the descriptor provides the mandatory PAUTH registers and
3543 allocate their numbers. */
3544 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3545 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data.get (),
3546 first_pauth_regnum + i,
3547 aarch64_pauth_register_names[i]);
3549 num_regs += i;
3550 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
3553 /* Add the MTE registers. */
3554 if (feature_mte != NULL)
3556 first_mte_regnum = num_regs;
3557 /* Validate the descriptor provides the mandatory MTE registers and
3558 allocate their numbers. */
3559 for (i = 0; i < ARRAY_SIZE (aarch64_mte_register_names); i++)
3560 valid_p &= tdesc_numbered_register (feature_mte, tdesc_data.get (),
3561 first_mte_regnum + i,
3562 aarch64_mte_register_names[i]);
3564 num_regs += i;
3567 if (!valid_p)
3568 return nullptr;
3570 /* AArch64 code is always little-endian. */
3571 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3573 aarch64_gdbarch_tdep *tdep = new aarch64_gdbarch_tdep;
3574 struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep);
3576 /* This should be low enough for everything. */
3577 tdep->lowest_pc = 0x20;
3578 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3579 tdep->jb_elt_size = 8;
3580 tdep->vq = vq;
3581 tdep->pauth_reg_base = first_pauth_regnum;
3582 tdep->ra_sign_state_regnum = (feature_pauth == NULL) ? -1
3583 : ra_sign_state_offset + num_regs;
3584 tdep->mte_reg_base = first_mte_regnum;
3585 tdep->tls_regnum = tls_regnum;
3587 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3588 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3590 /* Advance PC across function entry code. */
3591 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3593 /* The stack grows downward. */
3594 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3596 /* Breakpoint manipulation. */
3597 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3598 aarch64_breakpoint::kind_from_pc);
3599 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3600 aarch64_breakpoint::bp_from_kind);
3601 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3602 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3604 /* Information about registers, etc. */
3605 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3606 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3607 set_gdbarch_num_regs (gdbarch, num_regs);
3609 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3610 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3611 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3612 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3613 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3614 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3615 aarch64_pseudo_register_reggroup_p);
3616 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
3618 /* ABI */
3619 set_gdbarch_short_bit (gdbarch, 16);
3620 set_gdbarch_int_bit (gdbarch, 32);
3621 set_gdbarch_float_bit (gdbarch, 32);
3622 set_gdbarch_double_bit (gdbarch, 64);
3623 set_gdbarch_long_double_bit (gdbarch, 128);
3624 set_gdbarch_long_bit (gdbarch, 64);
3625 set_gdbarch_long_long_bit (gdbarch, 64);
3626 set_gdbarch_ptr_bit (gdbarch, 64);
3627 set_gdbarch_char_signed (gdbarch, 0);
3628 set_gdbarch_wchar_signed (gdbarch, 0);
3629 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3630 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3631 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_quad);
3632 set_gdbarch_type_align (gdbarch, aarch64_type_align);
3634 /* Detect whether PC is at a point where the stack has been destroyed. */
3635 set_gdbarch_stack_frame_destroyed_p (gdbarch, aarch64_stack_frame_destroyed_p);
3637 /* Internal <-> external register number maps. */
3638 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3640 /* Returning results. */
3641 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3643 /* Disassembly. */
3644 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3646 /* Virtual tables. */
3647 set_gdbarch_vbit_in_delta (gdbarch, 1);
3649 /* Hook in the ABI-specific overrides, if they have been registered. */
3650 info.target_desc = tdesc;
3651 info.tdesc_data = tdesc_data.get ();
3652 gdbarch_init_osabi (info, gdbarch);
3654 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3655 /* Register DWARF CFA vendor handler. */
3656 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3657 aarch64_execute_dwarf_cfa_vendor_op);
3659 /* Permanent/Program breakpoint handling. */
3660 set_gdbarch_program_breakpoint_here_p (gdbarch,
3661 aarch64_program_breakpoint_here_p);
3663 /* Add some default predicates. */
3664 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3665 dwarf2_append_unwinders (gdbarch);
3666 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3668 frame_base_set_default (gdbarch, &aarch64_normal_base);
3670 /* Now we have tuned the configuration, set a few final things,
3671 based on what the OS ABI has told us. */
3673 if (tdep->jb_pc >= 0)
3674 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3676 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3678 set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
3680 tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data));
3682 /* Add standard register aliases. */
3683 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3684 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3685 value_of_aarch64_user_reg,
3686 &aarch64_register_aliases[i].regnum);
3688 register_aarch64_ravenscar_ops (gdbarch);
3690 return gdbarch;
3693 static void
3694 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3696 aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
3698 if (tdep == NULL)
3699 return;
3701 gdb_printf (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3702 paddress (gdbarch, tdep->lowest_pc));
3705 #if GDB_SELF_TEST
3706 namespace selftests
3708 static void aarch64_process_record_test (void);
3710 #endif
3712 void _initialize_aarch64_tdep ();
3713 void
3714 _initialize_aarch64_tdep ()
3716 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3717 aarch64_dump_tdep);
3719 /* Debug this file's internals. */
3720 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3721 Set AArch64 debugging."), _("\
3722 Show AArch64 debugging."), _("\
3723 When on, AArch64 specific debugging is enabled."),
3724 NULL,
3725 show_aarch64_debug,
3726 &setdebuglist, &showdebuglist);
3728 #if GDB_SELF_TEST
3729 selftests::register_test ("aarch64-analyze-prologue",
3730 selftests::aarch64_analyze_prologue_test);
3731 selftests::register_test ("aarch64-process-record",
3732 selftests::aarch64_process_record_test);
3733 #endif
3736 /* AArch64 process record-replay related structures, defines etc. */
3738 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3739 do \
3741 unsigned int reg_len = LENGTH; \
3742 if (reg_len) \
3744 REGS = XNEWVEC (uint32_t, reg_len); \
3745 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3748 while (0)
3750 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3751 do \
3753 unsigned int mem_len = LENGTH; \
3754 if (mem_len) \
3756 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3757 memcpy(MEMS, &RECORD_BUF[0], \
3758 sizeof(struct aarch64_mem_r) * LENGTH); \
3761 while (0)
3763 /* AArch64 record/replay structures and enumerations. */
3765 struct aarch64_mem_r
3767 uint64_t len; /* Record length. */
3768 uint64_t addr; /* Memory address. */
3771 enum aarch64_record_result
3773 AARCH64_RECORD_SUCCESS,
3774 AARCH64_RECORD_UNSUPPORTED,
3775 AARCH64_RECORD_UNKNOWN
3778 typedef struct insn_decode_record_t
3780 struct gdbarch *gdbarch;
3781 struct regcache *regcache;
3782 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3783 uint32_t aarch64_insn; /* Insn to be recorded. */
3784 uint32_t mem_rec_count; /* Count of memory records. */
3785 uint32_t reg_rec_count; /* Count of register records. */
3786 uint32_t *aarch64_regs; /* Registers to be recorded. */
3787 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3788 } insn_decode_record;
3790 /* Record handler for data processing - register instructions. */
3792 static unsigned int
3793 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3795 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3796 uint32_t record_buf[4];
3798 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3799 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3800 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3802 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3804 uint8_t setflags;
3806 /* Logical (shifted register). */
3807 if (insn_bits24_27 == 0x0a)
3808 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3809 /* Add/subtract. */
3810 else if (insn_bits24_27 == 0x0b)
3811 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3812 else
3813 return AARCH64_RECORD_UNKNOWN;
3815 record_buf[0] = reg_rd;
3816 aarch64_insn_r->reg_rec_count = 1;
3817 if (setflags)
3818 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3820 else
3822 if (insn_bits24_27 == 0x0b)
3824 /* Data-processing (3 source). */
3825 record_buf[0] = reg_rd;
3826 aarch64_insn_r->reg_rec_count = 1;
3828 else if (insn_bits24_27 == 0x0a)
3830 if (insn_bits21_23 == 0x00)
3832 /* Add/subtract (with carry). */
3833 record_buf[0] = reg_rd;
3834 aarch64_insn_r->reg_rec_count = 1;
3835 if (bit (aarch64_insn_r->aarch64_insn, 29))
3837 record_buf[1] = AARCH64_CPSR_REGNUM;
3838 aarch64_insn_r->reg_rec_count = 2;
3841 else if (insn_bits21_23 == 0x02)
3843 /* Conditional compare (register) and conditional compare
3844 (immediate) instructions. */
3845 record_buf[0] = AARCH64_CPSR_REGNUM;
3846 aarch64_insn_r->reg_rec_count = 1;
3848 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3850 /* Conditional select. */
3851 /* Data-processing (2 source). */
3852 /* Data-processing (1 source). */
3853 record_buf[0] = reg_rd;
3854 aarch64_insn_r->reg_rec_count = 1;
3856 else
3857 return AARCH64_RECORD_UNKNOWN;
3861 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3862 record_buf);
3863 return AARCH64_RECORD_SUCCESS;
3866 /* Record handler for data processing - immediate instructions. */
3868 static unsigned int
3869 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3871 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3872 uint32_t record_buf[4];
3874 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3875 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3876 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3878 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3879 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3880 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3882 record_buf[0] = reg_rd;
3883 aarch64_insn_r->reg_rec_count = 1;
3885 else if (insn_bits24_27 == 0x01)
3887 /* Add/Subtract (immediate). */
3888 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3889 record_buf[0] = reg_rd;
3890 aarch64_insn_r->reg_rec_count = 1;
3891 if (setflags)
3892 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3894 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3896 /* Logical (immediate). */
3897 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3898 record_buf[0] = reg_rd;
3899 aarch64_insn_r->reg_rec_count = 1;
3900 if (setflags)
3901 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3903 else
3904 return AARCH64_RECORD_UNKNOWN;
3906 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3907 record_buf);
3908 return AARCH64_RECORD_SUCCESS;
3911 /* Record handler for branch, exception generation and system instructions. */
3913 static unsigned int
3914 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3917 aarch64_gdbarch_tdep *tdep
3918 = (aarch64_gdbarch_tdep *) gdbarch_tdep (aarch64_insn_r->gdbarch);
3919 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3920 uint32_t record_buf[4];
3922 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3923 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3924 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3926 if (insn_bits28_31 == 0x0d)
3928 /* Exception generation instructions. */
3929 if (insn_bits24_27 == 0x04)
3931 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3932 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3933 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3935 ULONGEST svc_number;
3937 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3938 &svc_number);
3939 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3940 svc_number);
3942 else
3943 return AARCH64_RECORD_UNSUPPORTED;
3945 /* System instructions. */
3946 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3948 uint32_t reg_rt, reg_crn;
3950 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3951 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3953 /* Record rt in case of sysl and mrs instructions. */
3954 if (bit (aarch64_insn_r->aarch64_insn, 21))
3956 record_buf[0] = reg_rt;
3957 aarch64_insn_r->reg_rec_count = 1;
3959 /* Record cpsr for hint and msr(immediate) instructions. */
3960 else if (reg_crn == 0x02 || reg_crn == 0x04)
3962 record_buf[0] = AARCH64_CPSR_REGNUM;
3963 aarch64_insn_r->reg_rec_count = 1;
3966 /* Unconditional branch (register). */
3967 else if((insn_bits24_27 & 0x0e) == 0x06)
3969 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3970 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3971 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3973 else
3974 return AARCH64_RECORD_UNKNOWN;
3976 /* Unconditional branch (immediate). */
3977 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3979 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3980 if (bit (aarch64_insn_r->aarch64_insn, 31))
3981 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3983 else
3984 /* Compare & branch (immediate), Test & branch (immediate) and
3985 Conditional branch (immediate). */
3986 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3988 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3989 record_buf);
3990 return AARCH64_RECORD_SUCCESS;
3993 /* Record handler for advanced SIMD load and store instructions. */
3995 static unsigned int
3996 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3998 CORE_ADDR address;
3999 uint64_t addr_offset = 0;
4000 uint32_t record_buf[24];
4001 uint64_t record_buf_mem[24];
4002 uint32_t reg_rn, reg_rt;
4003 uint32_t reg_index = 0, mem_index = 0;
4004 uint8_t opcode_bits, size_bits;
4006 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4007 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
4008 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4009 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4010 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
4012 if (record_debug)
4013 debug_printf ("Process record: Advanced SIMD load/store\n");
4015 /* Load/store single structure. */
4016 if (bit (aarch64_insn_r->aarch64_insn, 24))
4018 uint8_t sindex, scale, selem, esize, replicate = 0;
4019 scale = opcode_bits >> 2;
4020 selem = ((opcode_bits & 0x02) |
4021 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
4022 switch (scale)
4024 case 1:
4025 if (size_bits & 0x01)
4026 return AARCH64_RECORD_UNKNOWN;
4027 break;
4028 case 2:
4029 if ((size_bits >> 1) & 0x01)
4030 return AARCH64_RECORD_UNKNOWN;
4031 if (size_bits & 0x01)
4033 if (!((opcode_bits >> 1) & 0x01))
4034 scale = 3;
4035 else
4036 return AARCH64_RECORD_UNKNOWN;
4038 break;
4039 case 3:
4040 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
4042 scale = size_bits;
4043 replicate = 1;
4044 break;
4046 else
4047 return AARCH64_RECORD_UNKNOWN;
4048 default:
4049 break;
4051 esize = 8 << scale;
4052 if (replicate)
4053 for (sindex = 0; sindex < selem; sindex++)
4055 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
4056 reg_rt = (reg_rt + 1) % 32;
4058 else
4060 for (sindex = 0; sindex < selem; sindex++)
4062 if (bit (aarch64_insn_r->aarch64_insn, 22))
4063 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
4064 else
4066 record_buf_mem[mem_index++] = esize / 8;
4067 record_buf_mem[mem_index++] = address + addr_offset;
4069 addr_offset = addr_offset + (esize / 8);
4070 reg_rt = (reg_rt + 1) % 32;
4074 /* Load/store multiple structure. */
4075 else
4077 uint8_t selem, esize, rpt, elements;
4078 uint8_t eindex, rindex;
4080 esize = 8 << size_bits;
4081 if (bit (aarch64_insn_r->aarch64_insn, 30))
4082 elements = 128 / esize;
4083 else
4084 elements = 64 / esize;
4086 switch (opcode_bits)
4088 /*LD/ST4 (4 Registers). */
4089 case 0:
4090 rpt = 1;
4091 selem = 4;
4092 break;
4093 /*LD/ST1 (4 Registers). */
4094 case 2:
4095 rpt = 4;
4096 selem = 1;
4097 break;
4098 /*LD/ST3 (3 Registers). */
4099 case 4:
4100 rpt = 1;
4101 selem = 3;
4102 break;
4103 /*LD/ST1 (3 Registers). */
4104 case 6:
4105 rpt = 3;
4106 selem = 1;
4107 break;
4108 /*LD/ST1 (1 Register). */
4109 case 7:
4110 rpt = 1;
4111 selem = 1;
4112 break;
4113 /*LD/ST2 (2 Registers). */
4114 case 8:
4115 rpt = 1;
4116 selem = 2;
4117 break;
4118 /*LD/ST1 (2 Registers). */
4119 case 10:
4120 rpt = 2;
4121 selem = 1;
4122 break;
4123 default:
4124 return AARCH64_RECORD_UNSUPPORTED;
4125 break;
4127 for (rindex = 0; rindex < rpt; rindex++)
4128 for (eindex = 0; eindex < elements; eindex++)
4130 uint8_t reg_tt, sindex;
4131 reg_tt = (reg_rt + rindex) % 32;
4132 for (sindex = 0; sindex < selem; sindex++)
4134 if (bit (aarch64_insn_r->aarch64_insn, 22))
4135 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
4136 else
4138 record_buf_mem[mem_index++] = esize / 8;
4139 record_buf_mem[mem_index++] = address + addr_offset;
4141 addr_offset = addr_offset + (esize / 8);
4142 reg_tt = (reg_tt + 1) % 32;
4147 if (bit (aarch64_insn_r->aarch64_insn, 23))
4148 record_buf[reg_index++] = reg_rn;
4150 aarch64_insn_r->reg_rec_count = reg_index;
4151 aarch64_insn_r->mem_rec_count = mem_index / 2;
4152 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4153 record_buf_mem);
4154 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4155 record_buf);
4156 return AARCH64_RECORD_SUCCESS;
4159 /* Record handler for load and store instructions. */
4161 static unsigned int
4162 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
4164 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
4165 uint8_t insn_bit23, insn_bit21;
4166 uint8_t opc, size_bits, ld_flag, vector_flag;
4167 uint32_t reg_rn, reg_rt, reg_rt2;
4168 uint64_t datasize, offset;
4169 uint32_t record_buf[8];
4170 uint64_t record_buf_mem[8];
4171 CORE_ADDR address;
4173 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4174 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4175 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
4176 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4177 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
4178 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
4179 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
4180 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4181 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
4182 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
4183 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
4185 /* Load/store exclusive. */
4186 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
4188 if (record_debug)
4189 debug_printf ("Process record: load/store exclusive\n");
4191 if (ld_flag)
4193 record_buf[0] = reg_rt;
4194 aarch64_insn_r->reg_rec_count = 1;
4195 if (insn_bit21)
4197 record_buf[1] = reg_rt2;
4198 aarch64_insn_r->reg_rec_count = 2;
4201 else
4203 if (insn_bit21)
4204 datasize = (8 << size_bits) * 2;
4205 else
4206 datasize = (8 << size_bits);
4207 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4208 &address);
4209 record_buf_mem[0] = datasize / 8;
4210 record_buf_mem[1] = address;
4211 aarch64_insn_r->mem_rec_count = 1;
4212 if (!insn_bit23)
4214 /* Save register rs. */
4215 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
4216 aarch64_insn_r->reg_rec_count = 1;
4220 /* Load register (literal) instructions decoding. */
4221 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
4223 if (record_debug)
4224 debug_printf ("Process record: load register (literal)\n");
4225 if (vector_flag)
4226 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4227 else
4228 record_buf[0] = reg_rt;
4229 aarch64_insn_r->reg_rec_count = 1;
4231 /* All types of load/store pair instructions decoding. */
4232 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
4234 if (record_debug)
4235 debug_printf ("Process record: load/store pair\n");
4237 if (ld_flag)
4239 if (vector_flag)
4241 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4242 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
4244 else
4246 record_buf[0] = reg_rt;
4247 record_buf[1] = reg_rt2;
4249 aarch64_insn_r->reg_rec_count = 2;
4251 else
4253 uint16_t imm7_off;
4254 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
4255 if (!vector_flag)
4256 size_bits = size_bits >> 1;
4257 datasize = 8 << (2 + size_bits);
4258 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
4259 offset = offset << (2 + size_bits);
4260 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4261 &address);
4262 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
4264 if (imm7_off & 0x40)
4265 address = address - offset;
4266 else
4267 address = address + offset;
4270 record_buf_mem[0] = datasize / 8;
4271 record_buf_mem[1] = address;
4272 record_buf_mem[2] = datasize / 8;
4273 record_buf_mem[3] = address + (datasize / 8);
4274 aarch64_insn_r->mem_rec_count = 2;
4276 if (bit (aarch64_insn_r->aarch64_insn, 23))
4277 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4279 /* Load/store register (unsigned immediate) instructions. */
4280 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
4282 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4283 if (!(opc >> 1))
4285 if (opc & 0x01)
4286 ld_flag = 0x01;
4287 else
4288 ld_flag = 0x0;
4290 else
4292 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
4294 /* PRFM (immediate) */
4295 return AARCH64_RECORD_SUCCESS;
4297 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
4299 /* LDRSW (immediate) */
4300 ld_flag = 0x1;
4302 else
4304 if (opc & 0x01)
4305 ld_flag = 0x01;
4306 else
4307 ld_flag = 0x0;
4311 if (record_debug)
4313 debug_printf ("Process record: load/store (unsigned immediate):"
4314 " size %x V %d opc %x\n", size_bits, vector_flag,
4315 opc);
4318 if (!ld_flag)
4320 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4321 datasize = 8 << size_bits;
4322 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4323 &address);
4324 offset = offset << size_bits;
4325 address = address + offset;
4327 record_buf_mem[0] = datasize >> 3;
4328 record_buf_mem[1] = address;
4329 aarch64_insn_r->mem_rec_count = 1;
4331 else
4333 if (vector_flag)
4334 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4335 else
4336 record_buf[0] = reg_rt;
4337 aarch64_insn_r->reg_rec_count = 1;
4340 /* Load/store register (register offset) instructions. */
4341 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4342 && insn_bits10_11 == 0x02 && insn_bit21)
4344 if (record_debug)
4345 debug_printf ("Process record: load/store (register offset)\n");
4346 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4347 if (!(opc >> 1))
4348 if (opc & 0x01)
4349 ld_flag = 0x01;
4350 else
4351 ld_flag = 0x0;
4352 else
4353 if (size_bits != 0x03)
4354 ld_flag = 0x01;
4355 else
4356 return AARCH64_RECORD_UNKNOWN;
4358 if (!ld_flag)
4360 ULONGEST reg_rm_val;
4362 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4363 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4364 if (bit (aarch64_insn_r->aarch64_insn, 12))
4365 offset = reg_rm_val << size_bits;
4366 else
4367 offset = reg_rm_val;
4368 datasize = 8 << size_bits;
4369 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4370 &address);
4371 address = address + offset;
4372 record_buf_mem[0] = datasize >> 3;
4373 record_buf_mem[1] = address;
4374 aarch64_insn_r->mem_rec_count = 1;
4376 else
4378 if (vector_flag)
4379 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4380 else
4381 record_buf[0] = reg_rt;
4382 aarch64_insn_r->reg_rec_count = 1;
4385 /* Load/store register (immediate and unprivileged) instructions. */
4386 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4387 && !insn_bit21)
4389 if (record_debug)
4391 debug_printf ("Process record: load/store "
4392 "(immediate and unprivileged)\n");
4394 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4395 if (!(opc >> 1))
4396 if (opc & 0x01)
4397 ld_flag = 0x01;
4398 else
4399 ld_flag = 0x0;
4400 else
4401 if (size_bits != 0x03)
4402 ld_flag = 0x01;
4403 else
4404 return AARCH64_RECORD_UNKNOWN;
4406 if (!ld_flag)
4408 uint16_t imm9_off;
4409 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4410 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4411 datasize = 8 << size_bits;
4412 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4413 &address);
4414 if (insn_bits10_11 != 0x01)
4416 if (imm9_off & 0x0100)
4417 address = address - offset;
4418 else
4419 address = address + offset;
4421 record_buf_mem[0] = datasize >> 3;
4422 record_buf_mem[1] = address;
4423 aarch64_insn_r->mem_rec_count = 1;
4425 else
4427 if (vector_flag)
4428 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4429 else
4430 record_buf[0] = reg_rt;
4431 aarch64_insn_r->reg_rec_count = 1;
4433 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4434 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4436 /* Advanced SIMD load/store instructions. */
4437 else
4438 return aarch64_record_asimd_load_store (aarch64_insn_r);
4440 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4441 record_buf_mem);
4442 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4443 record_buf);
4444 return AARCH64_RECORD_SUCCESS;
4447 /* Record handler for data processing SIMD and floating point instructions. */
4449 static unsigned int
4450 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4452 uint8_t insn_bit21, opcode, rmode, reg_rd;
4453 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4454 uint8_t insn_bits11_14;
4455 uint32_t record_buf[2];
4457 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4458 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4459 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4460 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4461 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4462 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4463 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4464 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4465 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4467 if (record_debug)
4468 debug_printf ("Process record: data processing SIMD/FP: ");
4470 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4472 /* Floating point - fixed point conversion instructions. */
4473 if (!insn_bit21)
4475 if (record_debug)
4476 debug_printf ("FP - fixed point conversion");
4478 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4479 record_buf[0] = reg_rd;
4480 else
4481 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4483 /* Floating point - conditional compare instructions. */
4484 else if (insn_bits10_11 == 0x01)
4486 if (record_debug)
4487 debug_printf ("FP - conditional compare");
4489 record_buf[0] = AARCH64_CPSR_REGNUM;
4491 /* Floating point - data processing (2-source) and
4492 conditional select instructions. */
4493 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4495 if (record_debug)
4496 debug_printf ("FP - DP (2-source)");
4498 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4500 else if (insn_bits10_11 == 0x00)
4502 /* Floating point - immediate instructions. */
4503 if ((insn_bits12_15 & 0x01) == 0x01
4504 || (insn_bits12_15 & 0x07) == 0x04)
4506 if (record_debug)
4507 debug_printf ("FP - immediate");
4508 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4510 /* Floating point - compare instructions. */
4511 else if ((insn_bits12_15 & 0x03) == 0x02)
4513 if (record_debug)
4514 debug_printf ("FP - immediate");
4515 record_buf[0] = AARCH64_CPSR_REGNUM;
4517 /* Floating point - integer conversions instructions. */
4518 else if (insn_bits12_15 == 0x00)
4520 /* Convert float to integer instruction. */
4521 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4523 if (record_debug)
4524 debug_printf ("float to int conversion");
4526 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4528 /* Convert integer to float instruction. */
4529 else if ((opcode >> 1) == 0x01 && !rmode)
4531 if (record_debug)
4532 debug_printf ("int to float conversion");
4534 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4536 /* Move float to integer instruction. */
4537 else if ((opcode >> 1) == 0x03)
4539 if (record_debug)
4540 debug_printf ("move float to int");
4542 if (!(opcode & 0x01))
4543 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4544 else
4545 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4547 else
4548 return AARCH64_RECORD_UNKNOWN;
4550 else
4551 return AARCH64_RECORD_UNKNOWN;
4553 else
4554 return AARCH64_RECORD_UNKNOWN;
4556 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4558 if (record_debug)
4559 debug_printf ("SIMD copy");
4561 /* Advanced SIMD copy instructions. */
4562 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4563 && !bit (aarch64_insn_r->aarch64_insn, 15)
4564 && bit (aarch64_insn_r->aarch64_insn, 10))
4566 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4567 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4568 else
4569 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4571 else
4572 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4574 /* All remaining floating point or advanced SIMD instructions. */
4575 else
4577 if (record_debug)
4578 debug_printf ("all remain");
4580 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4583 if (record_debug)
4584 debug_printf ("\n");
4586 /* Record the V/X register. */
4587 aarch64_insn_r->reg_rec_count++;
4589 /* Some of these instructions may set bits in the FPSR, so record it
4590 too. */
4591 record_buf[1] = AARCH64_FPSR_REGNUM;
4592 aarch64_insn_r->reg_rec_count++;
4594 gdb_assert (aarch64_insn_r->reg_rec_count == 2);
4595 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4596 record_buf);
4597 return AARCH64_RECORD_SUCCESS;
4600 /* Decodes insns type and invokes its record handler. */
4602 static unsigned int
4603 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4605 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4607 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4608 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4609 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4610 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4612 /* Data processing - immediate instructions. */
4613 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4614 return aarch64_record_data_proc_imm (aarch64_insn_r);
4616 /* Branch, exception generation and system instructions. */
4617 if (ins_bit26 && !ins_bit27 && ins_bit28)
4618 return aarch64_record_branch_except_sys (aarch64_insn_r);
4620 /* Load and store instructions. */
4621 if (!ins_bit25 && ins_bit27)
4622 return aarch64_record_load_store (aarch64_insn_r);
4624 /* Data processing - register instructions. */
4625 if (ins_bit25 && !ins_bit26 && ins_bit27)
4626 return aarch64_record_data_proc_reg (aarch64_insn_r);
4628 /* Data processing - SIMD and floating point instructions. */
4629 if (ins_bit25 && ins_bit26 && ins_bit27)
4630 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4632 return AARCH64_RECORD_UNSUPPORTED;
4635 /* Cleans up local record registers and memory allocations. */
4637 static void
4638 deallocate_reg_mem (insn_decode_record *record)
4640 xfree (record->aarch64_regs);
4641 xfree (record->aarch64_mems);
4644 #if GDB_SELF_TEST
4645 namespace selftests {
4647 static void
4648 aarch64_process_record_test (void)
4650 struct gdbarch_info info;
4651 uint32_t ret;
4653 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4655 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4656 SELF_CHECK (gdbarch != NULL);
4658 insn_decode_record aarch64_record;
4660 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4661 aarch64_record.regcache = NULL;
4662 aarch64_record.this_addr = 0;
4663 aarch64_record.gdbarch = gdbarch;
4665 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4666 aarch64_record.aarch64_insn = 0xf9800020;
4667 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4668 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4669 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4670 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4672 deallocate_reg_mem (&aarch64_record);
4675 } // namespace selftests
4676 #endif /* GDB_SELF_TEST */
4678 /* Parse the current instruction and record the values of the registers and
4679 memory that will be changed in current instruction to record_arch_list
4680 return -1 if something is wrong. */
4683 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4684 CORE_ADDR insn_addr)
4686 uint32_t rec_no = 0;
4687 uint8_t insn_size = 4;
4688 uint32_t ret = 0;
4689 gdb_byte buf[insn_size];
4690 insn_decode_record aarch64_record;
4692 memset (&buf[0], 0, insn_size);
4693 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4694 target_read_memory (insn_addr, &buf[0], insn_size);
4695 aarch64_record.aarch64_insn
4696 = (uint32_t) extract_unsigned_integer (&buf[0],
4697 insn_size,
4698 gdbarch_byte_order (gdbarch));
4699 aarch64_record.regcache = regcache;
4700 aarch64_record.this_addr = insn_addr;
4701 aarch64_record.gdbarch = gdbarch;
4703 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4704 if (ret == AARCH64_RECORD_UNSUPPORTED)
4706 gdb_printf (gdb_stderr,
4707 _("Process record does not support instruction "
4708 "0x%0x at address %s.\n"),
4709 aarch64_record.aarch64_insn,
4710 paddress (gdbarch, insn_addr));
4711 ret = -1;
4714 if (0 == ret)
4716 /* Record registers. */
4717 record_full_arch_list_add_reg (aarch64_record.regcache,
4718 AARCH64_PC_REGNUM);
4719 /* Always record register CPSR. */
4720 record_full_arch_list_add_reg (aarch64_record.regcache,
4721 AARCH64_CPSR_REGNUM);
4722 if (aarch64_record.aarch64_regs)
4723 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4724 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4725 aarch64_record.aarch64_regs[rec_no]))
4726 ret = -1;
4728 /* Record memories. */
4729 if (aarch64_record.aarch64_mems)
4730 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4731 if (record_full_arch_list_add_mem
4732 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4733 aarch64_record.aarch64_mems[rec_no].len))
4734 ret = -1;
4736 if (record_full_arch_list_add_end ())
4737 ret = -1;
4740 deallocate_reg_mem (&aarch64_record);
4741 return ret;