Automatic date update in version.in
[binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
blobdb50869626122dd6745251a1156a42efb28ed932
1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
4 Copyright (C) 2009-2022 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "server.h"
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
29 #include "ax.h"
30 #include "tracepoint.h"
31 #include "debug.h"
33 #include <signal.h>
34 #include <sys/user.h>
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
37 #include <inttypes.h>
38 #include <endian.h>
39 #include <sys/uio.h>
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "arch/aarch64-mte-linux.h"
44 #include "linux-aarch32-tdesc.h"
45 #include "linux-aarch64-tdesc.h"
46 #include "nat/aarch64-mte-linux-ptrace.h"
47 #include "nat/aarch64-sve-linux-ptrace.h"
48 #include "tdesc.h"
50 #ifdef HAVE_SYS_REG_H
51 #include <sys/reg.h>
52 #endif
54 #ifdef HAVE_GETAUXVAL
55 #include <sys/auxv.h>
56 #endif
58 /* Linux target op definitions for the AArch64 architecture. */
60 class aarch64_target : public linux_process_target
62 public:
64 const regs_info *get_regs_info () override;
66 int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
68 int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
70 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
72 bool supports_z_point_type (char z_type) override;
74 bool supports_tracepoints () override;
76 bool supports_fast_tracepoints () override;
78 int install_fast_tracepoint_jump_pad
79 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
80 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
81 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
82 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
83 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
84 char *err) override;
86 int get_min_fast_tracepoint_insn_len () override;
88 struct emit_ops *emit_ops () override;
90 bool supports_memory_tagging () override;
92 bool fetch_memtags (CORE_ADDR address, size_t len,
93 gdb::byte_vector &tags, int type) override;
95 bool store_memtags (CORE_ADDR address, size_t len,
96 const gdb::byte_vector &tags, int type) override;
98 protected:
100 void low_arch_setup () override;
102 bool low_cannot_fetch_register (int regno) override;
104 bool low_cannot_store_register (int regno) override;
106 bool low_supports_breakpoints () override;
108 CORE_ADDR low_get_pc (regcache *regcache) override;
110 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
112 bool low_breakpoint_at (CORE_ADDR pc) override;
114 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
115 int size, raw_breakpoint *bp) override;
117 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
118 int size, raw_breakpoint *bp) override;
120 bool low_stopped_by_watchpoint () override;
122 CORE_ADDR low_stopped_data_address () override;
124 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
125 int direction) override;
127 arch_process_info *low_new_process () override;
129 void low_delete_process (arch_process_info *info) override;
131 void low_new_thread (lwp_info *) override;
133 void low_delete_thread (arch_lwp_info *) override;
135 void low_new_fork (process_info *parent, process_info *child) override;
137 void low_prepare_to_resume (lwp_info *lwp) override;
139 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
141 bool low_supports_range_stepping () override;
143 bool low_supports_catch_syscall () override;
145 void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
148 /* The singleton target ops object. */
150 static aarch64_target the_aarch64_target;
152 bool
153 aarch64_target::low_cannot_fetch_register (int regno)
155 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
156 "is not implemented by the target");
159 bool
160 aarch64_target::low_cannot_store_register (int regno)
162 gdb_assert_not_reached ("linux target op low_cannot_store_register "
163 "is not implemented by the target");
166 void
167 aarch64_target::low_prepare_to_resume (lwp_info *lwp)
169 aarch64_linux_prepare_to_resume (lwp);
172 /* Per-process arch-specific data we want to keep. */
174 struct arch_process_info
176 /* Hardware breakpoint/watchpoint data.
177 The reason for them to be per-process rather than per-thread is
178 due to the lack of information in the gdbserver environment;
179 gdbserver is not told that whether a requested hardware
180 breakpoint/watchpoint is thread specific or not, so it has to set
181 each hw bp/wp for every thread in the current process. The
182 higher level bp/wp management in gdb will resume a thread if a hw
183 bp/wp trap is not expected for it. Since the hw bp/wp setting is
184 same for each thread, it is reasonable for the data to live here.
186 struct aarch64_debug_reg_state debug_reg_state;
189 /* Return true if the size of register 0 is 8 byte. */
191 static int
192 is_64bit_tdesc (void)
194 /* We may not have a current thread at this point, so go straight to
195 the process's target description. */
196 return register_size (current_process ()->tdesc, 0) == 8;
199 static void
200 aarch64_fill_gregset (struct regcache *regcache, void *buf)
202 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
203 int i;
205 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
206 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
207 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
208 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
209 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
212 static void
213 aarch64_store_gregset (struct regcache *regcache, const void *buf)
215 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
216 int i;
218 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
219 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
220 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
221 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
222 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
225 static void
226 aarch64_fill_fpregset (struct regcache *regcache, void *buf)
228 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
229 int i;
231 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
232 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
233 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
234 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
237 static void
238 aarch64_store_fpregset (struct regcache *regcache, const void *buf)
240 const struct user_fpsimd_state *regset
241 = (const struct user_fpsimd_state *) buf;
242 int i;
244 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
245 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
246 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
247 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
250 /* Store the pauth registers to regcache. */
252 static void
253 aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
255 uint64_t *pauth_regset = (uint64_t *) buf;
256 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
258 if (pauth_base == 0)
259 return;
261 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
262 &pauth_regset[0]);
263 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
264 &pauth_regset[1]);
267 /* Fill BUF with the MTE registers from the regcache. */
269 static void
270 aarch64_fill_mteregset (struct regcache *regcache, void *buf)
272 uint64_t *mte_regset = (uint64_t *) buf;
273 int mte_base = find_regno (regcache->tdesc, "tag_ctl");
275 collect_register (regcache, mte_base, mte_regset);
278 /* Store the MTE registers to regcache. */
280 static void
281 aarch64_store_mteregset (struct regcache *regcache, const void *buf)
283 uint64_t *mte_regset = (uint64_t *) buf;
284 int mte_base = find_regno (regcache->tdesc, "tag_ctl");
286 /* Tag Control register */
287 supply_register (regcache, mte_base, mte_regset);
290 /* Fill BUF with TLS register from the regcache. */
292 static void
293 aarch64_fill_tlsregset (struct regcache *regcache, void *buf)
295 int tls_regnum = find_regno (regcache->tdesc, "tpidr");
297 collect_register (regcache, tls_regnum, buf);
300 /* Store TLS register to regcache. */
302 static void
303 aarch64_store_tlsregset (struct regcache *regcache, const void *buf)
305 int tls_regnum = find_regno (regcache->tdesc, "tpidr");
307 supply_register (regcache, tls_regnum, buf);
310 bool
311 aarch64_target::low_supports_breakpoints ()
313 return true;
316 /* Implementation of linux target ops method "low_get_pc". */
318 CORE_ADDR
319 aarch64_target::low_get_pc (regcache *regcache)
321 if (register_size (regcache->tdesc, 0) == 8)
322 return linux_get_pc_64bit (regcache);
323 else
324 return linux_get_pc_32bit (regcache);
327 /* Implementation of linux target ops method "low_set_pc". */
329 void
330 aarch64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
332 if (register_size (regcache->tdesc, 0) == 8)
333 linux_set_pc_64bit (regcache, pc);
334 else
335 linux_set_pc_32bit (regcache, pc);
338 #define aarch64_breakpoint_len 4
340 /* AArch64 BRK software debug mode instruction.
341 This instruction needs to match gdb/aarch64-tdep.c
342 (aarch64_default_breakpoint). */
343 static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
345 /* Implementation of linux target ops method "low_breakpoint_at". */
347 bool
348 aarch64_target::low_breakpoint_at (CORE_ADDR where)
350 if (is_64bit_tdesc ())
352 gdb_byte insn[aarch64_breakpoint_len];
354 read_memory (where, (unsigned char *) &insn, aarch64_breakpoint_len);
355 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
356 return true;
358 return false;
360 else
361 return arm_breakpoint_at (where);
364 static void
365 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
367 int i;
369 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
371 state->dr_addr_bp[i] = 0;
372 state->dr_ctrl_bp[i] = 0;
373 state->dr_ref_count_bp[i] = 0;
376 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
378 state->dr_addr_wp[i] = 0;
379 state->dr_ctrl_wp[i] = 0;
380 state->dr_ref_count_wp[i] = 0;
384 /* Return the pointer to the debug register state structure in the
385 current process' arch-specific data area. */
387 struct aarch64_debug_reg_state *
388 aarch64_get_debug_reg_state (pid_t pid)
390 struct process_info *proc = find_process_pid (pid);
392 return &proc->priv->arch_private->debug_reg_state;
395 /* Implementation of target ops method "supports_z_point_type". */
397 bool
398 aarch64_target::supports_z_point_type (char z_type)
400 switch (z_type)
402 case Z_PACKET_SW_BP:
403 case Z_PACKET_HW_BP:
404 case Z_PACKET_WRITE_WP:
405 case Z_PACKET_READ_WP:
406 case Z_PACKET_ACCESS_WP:
407 return true;
408 default:
409 return false;
413 /* Implementation of linux target ops method "low_insert_point".
415 It actually only records the info of the to-be-inserted bp/wp;
416 the actual insertion will happen when threads are resumed. */
419 aarch64_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
420 int len, raw_breakpoint *bp)
422 int ret;
423 enum target_hw_bp_type targ_type;
424 struct aarch64_debug_reg_state *state
425 = aarch64_get_debug_reg_state (pid_of (current_thread));
427 if (show_debug_regs)
428 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
429 (unsigned long) addr, len);
431 /* Determine the type from the raw breakpoint type. */
432 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
434 if (targ_type != hw_execute)
436 if (aarch64_region_ok_for_watchpoint (addr, len))
437 ret = aarch64_handle_watchpoint (targ_type, addr, len,
438 1 /* is_insert */,
439 current_lwp_ptid (), state);
440 else
441 ret = -1;
443 else
445 if (len == 3)
447 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
448 instruction. Set it to 2 to correctly encode length bit
449 mask in hardware/watchpoint control register. */
450 len = 2;
452 ret = aarch64_handle_breakpoint (targ_type, addr, len,
453 1 /* is_insert */, current_lwp_ptid (),
454 state);
457 if (show_debug_regs)
458 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
459 targ_type);
461 return ret;
464 /* Implementation of linux target ops method "low_remove_point".
466 It actually only records the info of the to-be-removed bp/wp,
467 the actual removal will be done when threads are resumed. */
470 aarch64_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
471 int len, raw_breakpoint *bp)
473 int ret;
474 enum target_hw_bp_type targ_type;
475 struct aarch64_debug_reg_state *state
476 = aarch64_get_debug_reg_state (pid_of (current_thread));
478 if (show_debug_regs)
479 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
480 (unsigned long) addr, len);
482 /* Determine the type from the raw breakpoint type. */
483 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
485 /* Set up state pointers. */
486 if (targ_type != hw_execute)
487 ret =
488 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
489 current_lwp_ptid (), state);
490 else
492 if (len == 3)
494 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
495 instruction. Set it to 2 to correctly encode length bit
496 mask in hardware/watchpoint control register. */
497 len = 2;
499 ret = aarch64_handle_breakpoint (targ_type, addr, len,
500 0 /* is_insert */, current_lwp_ptid (),
501 state);
504 if (show_debug_regs)
505 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
506 targ_type);
508 return ret;
511 /* Return the address only having significant bits. This is used to ignore
512 the top byte (TBI). */
514 static CORE_ADDR
515 address_significant (CORE_ADDR addr)
517 /* Clear insignificant bits of a target address and sign extend resulting
518 address. */
519 int addr_bit = 56;
521 CORE_ADDR sign = (CORE_ADDR) 1 << (addr_bit - 1);
522 addr &= ((CORE_ADDR) 1 << addr_bit) - 1;
523 addr = (addr ^ sign) - sign;
525 return addr;
528 /* Implementation of linux target ops method "low_stopped_data_address". */
530 CORE_ADDR
531 aarch64_target::low_stopped_data_address ()
533 siginfo_t siginfo;
534 int pid, i;
535 struct aarch64_debug_reg_state *state;
537 pid = lwpid_of (current_thread);
539 /* Get the siginfo. */
540 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
541 return (CORE_ADDR) 0;
543 /* Need to be a hardware breakpoint/watchpoint trap. */
544 if (siginfo.si_signo != SIGTRAP
545 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
546 return (CORE_ADDR) 0;
548 /* Make sure to ignore the top byte, otherwise we may not recognize a
549 hardware watchpoint hit. The stopped data addresses coming from the
550 kernel can potentially be tagged addresses. */
551 const CORE_ADDR addr_trap
552 = address_significant ((CORE_ADDR) siginfo.si_addr);
554 /* Check if the address matches any watched address. */
555 state = aarch64_get_debug_reg_state (pid_of (current_thread));
556 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
558 const unsigned int offset
559 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
560 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
561 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
562 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
563 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
565 if (state->dr_ref_count_wp[i]
566 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
567 && addr_trap >= addr_watch_aligned
568 && addr_trap < addr_watch + len)
570 /* ADDR_TRAP reports the first address of the memory range
571 accessed by the CPU, regardless of what was the memory
572 range watched. Thus, a large CPU access that straddles
573 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
574 ADDR_TRAP that is lower than the
575 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
577 addr: | 4 | 5 | 6 | 7 | 8 |
578 |---- range watched ----|
579 |----------- range accessed ------------|
581 In this case, ADDR_TRAP will be 4.
583 To match a watchpoint known to GDB core, we must never
584 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
585 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
586 positive on kernels older than 4.10. See PR
587 external/20207. */
588 return addr_orig;
592 return (CORE_ADDR) 0;
595 /* Implementation of linux target ops method "low_stopped_by_watchpoint". */
597 bool
598 aarch64_target::low_stopped_by_watchpoint ()
600 return (low_stopped_data_address () != 0);
603 /* Fetch the thread-local storage pointer for libthread_db. */
605 ps_err_e
606 ps_get_thread_area (struct ps_prochandle *ph,
607 lwpid_t lwpid, int idx, void **base)
609 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
610 is_64bit_tdesc ());
613 /* Implementation of linux target ops method "low_siginfo_fixup". */
615 bool
616 aarch64_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
617 int direction)
619 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
620 if (!is_64bit_tdesc ())
622 if (direction == 0)
623 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
624 native);
625 else
626 aarch64_siginfo_from_compat_siginfo (native,
627 (struct compat_siginfo *) inf);
629 return true;
632 return false;
635 /* Implementation of linux target ops method "low_new_process". */
637 arch_process_info *
638 aarch64_target::low_new_process ()
640 struct arch_process_info *info = XCNEW (struct arch_process_info);
642 aarch64_init_debug_reg_state (&info->debug_reg_state);
644 return info;
647 /* Implementation of linux target ops method "low_delete_process". */
649 void
650 aarch64_target::low_delete_process (arch_process_info *info)
652 xfree (info);
655 void
656 aarch64_target::low_new_thread (lwp_info *lwp)
658 aarch64_linux_new_thread (lwp);
661 void
662 aarch64_target::low_delete_thread (arch_lwp_info *arch_lwp)
664 aarch64_linux_delete_thread (arch_lwp);
667 /* Implementation of linux target ops method "low_new_fork". */
669 void
670 aarch64_target::low_new_fork (process_info *parent,
671 process_info *child)
673 /* These are allocated by linux_add_process. */
674 gdb_assert (parent->priv != NULL
675 && parent->priv->arch_private != NULL);
676 gdb_assert (child->priv != NULL
677 && child->priv->arch_private != NULL);
679 /* Linux kernel before 2.6.33 commit
680 72f674d203cd230426437cdcf7dd6f681dad8b0d
681 will inherit hardware debug registers from parent
682 on fork/vfork/clone. Newer Linux kernels create such tasks with
683 zeroed debug registers.
685 GDB core assumes the child inherits the watchpoints/hw
686 breakpoints of the parent, and will remove them all from the
687 forked off process. Copy the debug registers mirrors into the
688 new process so that all breakpoints and watchpoints can be
689 removed together. The debug registers mirror will become zeroed
690 in the end before detaching the forked off process, thus making
691 this compatible with older Linux kernels too. */
693 *child->priv->arch_private = *parent->priv->arch_private;
696 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
698 static void
699 aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
701 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
704 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
706 static void
707 aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
709 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
712 /* Array containing all the possible register sets for AArch64/Linux. During
713 architecture setup, these will be checked against the HWCAP/HWCAP2 bits for
714 validity and enabled/disabled accordingly.
716 Their sizes are set to 0 here, but they will be adjusted later depending
717 on whether each register set is available or not. */
718 static struct regset_info aarch64_regsets[] =
720 /* GPR registers. */
721 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
722 0, GENERAL_REGS,
723 aarch64_fill_gregset, aarch64_store_gregset },
724 /* Floating Point (FPU) registers. */
725 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
726 0, FP_REGS,
727 aarch64_fill_fpregset, aarch64_store_fpregset
729 /* Scalable Vector Extension (SVE) registers. */
730 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
731 0, EXTENDED_REGS,
732 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
734 /* PAC registers. */
735 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
736 0, OPTIONAL_REGS,
737 nullptr, aarch64_store_pauthregset },
738 /* Tagged address control / MTE registers. */
739 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_TAGGED_ADDR_CTRL,
740 0, OPTIONAL_REGS,
741 aarch64_fill_mteregset, aarch64_store_mteregset },
742 /* TLS register. */
743 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_TLS,
744 0, OPTIONAL_REGS,
745 aarch64_fill_tlsregset, aarch64_store_tlsregset },
746 NULL_REGSET
749 static struct regsets_info aarch64_regsets_info =
751 aarch64_regsets, /* regsets */
752 0, /* num_regsets */
753 nullptr, /* disabled_regsets */
756 static struct regs_info regs_info_aarch64 =
758 nullptr, /* regset_bitmap */
759 nullptr, /* usrregs */
760 &aarch64_regsets_info,
763 /* Given FEATURES, adjust the available register sets by setting their
764 sizes. A size of 0 means the register set is disabled and won't be
765 used. */
767 static void
768 aarch64_adjust_register_sets (const struct aarch64_features &features)
770 struct regset_info *regset;
772 for (regset = aarch64_regsets; regset->size >= 0; regset++)
774 switch (regset->nt_type)
776 case NT_PRSTATUS:
777 /* General purpose registers are always present. */
778 regset->size = sizeof (struct user_pt_regs);
779 break;
780 case NT_FPREGSET:
781 /* This is unavailable when SVE is present. */
782 if (features.vq == 0)
783 regset->size = sizeof (struct user_fpsimd_state);
784 break;
785 case NT_ARM_SVE:
786 if (features.vq > 0)
787 regset->size = SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE);
788 break;
789 case NT_ARM_PAC_MASK:
790 if (features.pauth)
791 regset->size = AARCH64_PAUTH_REGS_SIZE;
792 break;
793 case NT_ARM_TAGGED_ADDR_CTRL:
794 if (features.mte)
795 regset->size = AARCH64_LINUX_SIZEOF_MTE;
796 break;
797 case NT_ARM_TLS:
798 if (features.tls)
799 regset->size = AARCH64_TLS_REGS_SIZE;
800 break;
801 default:
802 gdb_assert_not_reached ("Unknown register set found.");
807 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
808 #define AARCH64_HWCAP_PACA (1 << 30)
810 /* Implementation of linux target ops method "low_arch_setup". */
812 void
813 aarch64_target::low_arch_setup ()
815 unsigned int machine;
816 int is_elf64;
817 int tid;
819 tid = lwpid_of (current_thread);
821 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
823 if (is_elf64)
825 struct aarch64_features features;
827 features.vq = aarch64_sve_get_vq (tid);
828 /* A-profile PAC is 64-bit only. */
829 features.pauth = linux_get_hwcap (8) & AARCH64_HWCAP_PACA;
830 /* A-profile MTE is 64-bit only. */
831 features.mte = linux_get_hwcap2 (8) & HWCAP2_MTE;
832 features.tls = true;
834 current_process ()->tdesc = aarch64_linux_read_description (features);
836 /* Adjust the register sets we should use for this particular set of
837 features. */
838 aarch64_adjust_register_sets (features);
840 else
841 current_process ()->tdesc = aarch32_linux_read_description ();
843 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
846 /* Implementation of linux target ops method "get_regs_info". */
848 const regs_info *
849 aarch64_target::get_regs_info ()
851 if (!is_64bit_tdesc ())
852 return &regs_info_aarch32;
854 /* AArch64 64-bit registers. */
855 return &regs_info_aarch64;
858 /* Implementation of target ops method "supports_tracepoints". */
860 bool
861 aarch64_target::supports_tracepoints ()
863 if (current_thread == NULL)
864 return true;
865 else
867 /* We don't support tracepoints on aarch32 now. */
868 return is_64bit_tdesc ();
872 /* Implementation of linux target ops method "low_get_thread_area". */
875 aarch64_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
877 struct iovec iovec;
878 uint64_t reg;
880 iovec.iov_base = &reg;
881 iovec.iov_len = sizeof (reg);
883 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
884 return -1;
886 *addrp = reg;
888 return 0;
891 bool
892 aarch64_target::low_supports_catch_syscall ()
894 return true;
897 /* Implementation of linux target ops method "low_get_syscall_trapinfo". */
899 void
900 aarch64_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
902 int use_64bit = register_size (regcache->tdesc, 0) == 8;
904 if (use_64bit)
906 long l_sysno;
908 collect_register_by_name (regcache, "x8", &l_sysno);
909 *sysno = (int) l_sysno;
911 else
912 collect_register_by_name (regcache, "r7", sysno);
915 /* List of condition codes that we need. */
917 enum aarch64_condition_codes
919 EQ = 0x0,
920 NE = 0x1,
921 LO = 0x3,
922 GE = 0xa,
923 LT = 0xb,
924 GT = 0xc,
925 LE = 0xd,
928 enum aarch64_operand_type
930 OPERAND_IMMEDIATE,
931 OPERAND_REGISTER,
934 /* Representation of an operand. At this time, it only supports register
935 and immediate types. */
937 struct aarch64_operand
939 /* Type of the operand. */
940 enum aarch64_operand_type type;
942 /* Value of the operand according to the type. */
943 union
945 uint32_t imm;
946 struct aarch64_register reg;
950 /* List of registers that we are currently using, we can add more here as
951 we need to use them. */
953 /* General purpose scratch registers (64 bit). */
954 static const struct aarch64_register x0 = { 0, 1 };
955 static const struct aarch64_register x1 = { 1, 1 };
956 static const struct aarch64_register x2 = { 2, 1 };
957 static const struct aarch64_register x3 = { 3, 1 };
958 static const struct aarch64_register x4 = { 4, 1 };
960 /* General purpose scratch registers (32 bit). */
961 static const struct aarch64_register w0 = { 0, 0 };
962 static const struct aarch64_register w2 = { 2, 0 };
964 /* Intra-procedure scratch registers. */
965 static const struct aarch64_register ip0 = { 16, 1 };
967 /* Special purpose registers. */
968 static const struct aarch64_register fp = { 29, 1 };
969 static const struct aarch64_register lr = { 30, 1 };
970 static const struct aarch64_register sp = { 31, 1 };
971 static const struct aarch64_register xzr = { 31, 1 };
973 /* Dynamically allocate a new register. If we know the register
974 statically, we should make it a global as above instead of using this
975 helper function. */
977 static struct aarch64_register
978 aarch64_register (unsigned num, int is64)
980 return (struct aarch64_register) { num, is64 };
983 /* Helper function to create a register operand, for instructions with
984 different types of operands.
986 For example:
987 p += emit_mov (p, x0, register_operand (x1)); */
989 static struct aarch64_operand
990 register_operand (struct aarch64_register reg)
992 struct aarch64_operand operand;
994 operand.type = OPERAND_REGISTER;
995 operand.reg = reg;
997 return operand;
1000 /* Helper function to create an immediate operand, for instructions with
1001 different types of operands.
1003 For example:
1004 p += emit_mov (p, x0, immediate_operand (12)); */
1006 static struct aarch64_operand
1007 immediate_operand (uint32_t imm)
1009 struct aarch64_operand operand;
1011 operand.type = OPERAND_IMMEDIATE;
1012 operand.imm = imm;
1014 return operand;
1017 /* Helper function to create an offset memory operand.
1019 For example:
1020 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
1022 static struct aarch64_memory_operand
1023 offset_memory_operand (int32_t offset)
1025 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
1028 /* Helper function to create a pre-index memory operand.
1030 For example:
1031 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
1033 static struct aarch64_memory_operand
1034 preindex_memory_operand (int32_t index)
1036 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
1039 /* Helper function to create a post-index memory operand.
1041 For example:
1042 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
1044 static struct aarch64_memory_operand
1045 postindex_memory_operand (int32_t index)
1047 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
1050 /* System control registers. These special registers can be written and
1051 read with the MRS and MSR instructions.
1053 - NZCV: Condition flags. GDB refers to this register under the CPSR
1054 name.
1055 - FPSR: Floating-point status register.
1056 - FPCR: Floating-point control registers.
1057 - TPIDR_EL0: Software thread ID register. */
1059 enum aarch64_system_control_registers
1061 /* op0 op1 crn crm op2 */
1062 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
1063 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
1064 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
1065 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
1068 /* Write a BLR instruction into *BUF.
1070 BLR rn
1072 RN is the register to branch to. */
1074 static int
1075 emit_blr (uint32_t *buf, struct aarch64_register rn)
1077 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
1080 /* Write a RET instruction into *BUF.
1082 RET xn
1084 RN is the register to branch to. */
1086 static int
1087 emit_ret (uint32_t *buf, struct aarch64_register rn)
1089 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
1092 static int
1093 emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
1094 struct aarch64_register rt,
1095 struct aarch64_register rt2,
1096 struct aarch64_register rn,
1097 struct aarch64_memory_operand operand)
1099 uint32_t opc;
1100 uint32_t pre_index;
1101 uint32_t write_back;
1103 if (rt.is64)
1104 opc = ENCODE (2, 2, 30);
1105 else
1106 opc = ENCODE (0, 2, 30);
1108 switch (operand.type)
1110 case MEMORY_OPERAND_OFFSET:
1112 pre_index = ENCODE (1, 1, 24);
1113 write_back = ENCODE (0, 1, 23);
1114 break;
1116 case MEMORY_OPERAND_POSTINDEX:
1118 pre_index = ENCODE (0, 1, 24);
1119 write_back = ENCODE (1, 1, 23);
1120 break;
1122 case MEMORY_OPERAND_PREINDEX:
1124 pre_index = ENCODE (1, 1, 24);
1125 write_back = ENCODE (1, 1, 23);
1126 break;
1128 default:
1129 return 0;
1132 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
1133 | ENCODE (operand.index >> 3, 7, 15)
1134 | ENCODE (rt2.num, 5, 10)
1135 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
1138 /* Write a STP instruction into *BUF.
1140 STP rt, rt2, [rn, #offset]
1141 STP rt, rt2, [rn, #index]!
1142 STP rt, rt2, [rn], #index
1144 RT and RT2 are the registers to store.
1145 RN is the base address register.
1146 OFFSET is the immediate to add to the base address. It is limited to a
1147 -512 .. 504 range (7 bits << 3). */
1149 static int
1150 emit_stp (uint32_t *buf, struct aarch64_register rt,
1151 struct aarch64_register rt2, struct aarch64_register rn,
1152 struct aarch64_memory_operand operand)
1154 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
1157 /* Write a LDP instruction into *BUF.
1159 LDP rt, rt2, [rn, #offset]
1160 LDP rt, rt2, [rn, #index]!
1161 LDP rt, rt2, [rn], #index
1163 RT and RT2 are the registers to store.
1164 RN is the base address register.
1165 OFFSET is the immediate to add to the base address. It is limited to a
1166 -512 .. 504 range (7 bits << 3). */
1168 static int
1169 emit_ldp (uint32_t *buf, struct aarch64_register rt,
1170 struct aarch64_register rt2, struct aarch64_register rn,
1171 struct aarch64_memory_operand operand)
1173 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
1176 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1178 LDP qt, qt2, [rn, #offset]
1180 RT and RT2 are the Q registers to store.
1181 RN is the base address register.
1182 OFFSET is the immediate to add to the base address. It is limited to
1183 -1024 .. 1008 range (7 bits << 4). */
1185 static int
1186 emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1187 struct aarch64_register rn, int32_t offset)
1189 uint32_t opc = ENCODE (2, 2, 30);
1190 uint32_t pre_index = ENCODE (1, 1, 24);
1192 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
1193 | ENCODE (offset >> 4, 7, 15)
1194 | ENCODE (rt2, 5, 10)
1195 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
1198 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1200 STP qt, qt2, [rn, #offset]
1202 RT and RT2 are the Q registers to store.
1203 RN is the base address register.
1204 OFFSET is the immediate to add to the base address. It is limited to
1205 -1024 .. 1008 range (7 bits << 4). */
1207 static int
1208 emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1209 struct aarch64_register rn, int32_t offset)
1211 uint32_t opc = ENCODE (2, 2, 30);
1212 uint32_t pre_index = ENCODE (1, 1, 24);
1214 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
1215 | ENCODE (offset >> 4, 7, 15)
1216 | ENCODE (rt2, 5, 10)
1217 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
1220 /* Write a LDRH instruction into *BUF.
1222 LDRH wt, [xn, #offset]
1223 LDRH wt, [xn, #index]!
1224 LDRH wt, [xn], #index
1226 RT is the register to store.
1227 RN is the base address register.
1228 OFFSET is the immediate to add to the base address. It is limited to
1229 0 .. 32760 range (12 bits << 3). */
1231 static int
1232 emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1233 struct aarch64_register rn,
1234 struct aarch64_memory_operand operand)
1236 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
1239 /* Write a LDRB instruction into *BUF.
1241 LDRB wt, [xn, #offset]
1242 LDRB wt, [xn, #index]!
1243 LDRB wt, [xn], #index
1245 RT is the register to store.
1246 RN is the base address register.
1247 OFFSET is the immediate to add to the base address. It is limited to
1248 0 .. 32760 range (12 bits << 3). */
1250 static int
1251 emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1252 struct aarch64_register rn,
1253 struct aarch64_memory_operand operand)
1255 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
1260 /* Write a STR instruction into *BUF.
1262 STR rt, [rn, #offset]
1263 STR rt, [rn, #index]!
1264 STR rt, [rn], #index
1266 RT is the register to store.
1267 RN is the base address register.
1268 OFFSET is the immediate to add to the base address. It is limited to
1269 0 .. 32760 range (12 bits << 3). */
1271 static int
1272 emit_str (uint32_t *buf, struct aarch64_register rt,
1273 struct aarch64_register rn,
1274 struct aarch64_memory_operand operand)
1276 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
1279 /* Helper function emitting an exclusive load or store instruction. */
1281 static int
1282 emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1283 enum aarch64_opcodes opcode,
1284 struct aarch64_register rs,
1285 struct aarch64_register rt,
1286 struct aarch64_register rt2,
1287 struct aarch64_register rn)
1289 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1290 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1291 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
1294 /* Write a LAXR instruction into *BUF.
1296 LDAXR rt, [xn]
1298 RT is the destination register.
1299 RN is the base address register. */
1301 static int
1302 emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1303 struct aarch64_register rn)
1305 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1306 xzr, rn);
1309 /* Write a STXR instruction into *BUF.
1311 STXR ws, rt, [xn]
1313 RS is the result register, it indicates if the store succeeded or not.
1314 RT is the destination register.
1315 RN is the base address register. */
1317 static int
1318 emit_stxr (uint32_t *buf, struct aarch64_register rs,
1319 struct aarch64_register rt, struct aarch64_register rn)
1321 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1322 xzr, rn);
1325 /* Write a STLR instruction into *BUF.
1327 STLR rt, [xn]
1329 RT is the register to store.
1330 RN is the base address register. */
1332 static int
1333 emit_stlr (uint32_t *buf, struct aarch64_register rt,
1334 struct aarch64_register rn)
1336 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1337 xzr, rn);
1340 /* Helper function for data processing instructions with register sources. */
1342 static int
1343 emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
1344 struct aarch64_register rd,
1345 struct aarch64_register rn,
1346 struct aarch64_register rm)
1348 uint32_t size = ENCODE (rd.is64, 1, 31);
1350 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1351 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1354 /* Helper function for data processing instructions taking either a register
1355 or an immediate. */
1357 static int
1358 emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1359 struct aarch64_register rd,
1360 struct aarch64_register rn,
1361 struct aarch64_operand operand)
1363 uint32_t size = ENCODE (rd.is64, 1, 31);
1364 /* The opcode is different for register and immediate source operands. */
1365 uint32_t operand_opcode;
1367 if (operand.type == OPERAND_IMMEDIATE)
1369 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1370 operand_opcode = ENCODE (8, 4, 25);
1372 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1373 | ENCODE (operand.imm, 12, 10)
1374 | ENCODE (rn.num, 5, 5)
1375 | ENCODE (rd.num, 5, 0));
1377 else
1379 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1380 operand_opcode = ENCODE (5, 4, 25);
1382 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1383 rn, operand.reg);
1387 /* Write an ADD instruction into *BUF.
1389 ADD rd, rn, #imm
1390 ADD rd, rn, rm
1392 This function handles both an immediate and register add.
1394 RD is the destination register.
1395 RN is the input register.
1396 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1397 OPERAND_REGISTER. */
1399 static int
1400 emit_add (uint32_t *buf, struct aarch64_register rd,
1401 struct aarch64_register rn, struct aarch64_operand operand)
1403 return emit_data_processing (buf, ADD, rd, rn, operand);
1406 /* Write a SUB instruction into *BUF.
1408 SUB rd, rn, #imm
1409 SUB rd, rn, rm
1411 This function handles both an immediate and register sub.
1413 RD is the destination register.
1414 RN is the input register.
1415 IMM is the immediate to substract to RN. */
1417 static int
1418 emit_sub (uint32_t *buf, struct aarch64_register rd,
1419 struct aarch64_register rn, struct aarch64_operand operand)
1421 return emit_data_processing (buf, SUB, rd, rn, operand);
1424 /* Write a MOV instruction into *BUF.
1426 MOV rd, #imm
1427 MOV rd, rm
1429 This function handles both a wide immediate move and a register move,
1430 with the condition that the source register is not xzr. xzr and the
1431 stack pointer share the same encoding and this function only supports
1432 the stack pointer.
1434 RD is the destination register.
1435 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1436 OPERAND_REGISTER. */
1438 static int
1439 emit_mov (uint32_t *buf, struct aarch64_register rd,
1440 struct aarch64_operand operand)
1442 if (operand.type == OPERAND_IMMEDIATE)
1444 uint32_t size = ENCODE (rd.is64, 1, 31);
1445 /* Do not shift the immediate. */
1446 uint32_t shift = ENCODE (0, 2, 21);
1448 return aarch64_emit_insn (buf, MOV | size | shift
1449 | ENCODE (operand.imm, 16, 5)
1450 | ENCODE (rd.num, 5, 0));
1452 else
1453 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1456 /* Write a MOVK instruction into *BUF.
1458 MOVK rd, #imm, lsl #shift
1460 RD is the destination register.
1461 IMM is the immediate.
1462 SHIFT is the logical shift left to apply to IMM. */
1464 static int
1465 emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1466 unsigned shift)
1468 uint32_t size = ENCODE (rd.is64, 1, 31);
1470 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1471 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
1474 /* Write instructions into *BUF in order to move ADDR into a register.
1475 ADDR can be a 64-bit value.
1477 This function will emit a series of MOV and MOVK instructions, such as:
1479 MOV xd, #(addr)
1480 MOVK xd, #(addr >> 16), lsl #16
1481 MOVK xd, #(addr >> 32), lsl #32
1482 MOVK xd, #(addr >> 48), lsl #48 */
1484 static int
1485 emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1487 uint32_t *p = buf;
1489 /* The MOV (wide immediate) instruction clears to top bits of the
1490 register. */
1491 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1493 if ((addr >> 16) != 0)
1494 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1495 else
1496 return p - buf;
1498 if ((addr >> 32) != 0)
1499 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1500 else
1501 return p - buf;
1503 if ((addr >> 48) != 0)
1504 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1506 return p - buf;
1509 /* Write a SUBS instruction into *BUF.
1511 SUBS rd, rn, rm
1513 This instruction update the condition flags.
1515 RD is the destination register.
1516 RN and RM are the source registers. */
1518 static int
1519 emit_subs (uint32_t *buf, struct aarch64_register rd,
1520 struct aarch64_register rn, struct aarch64_operand operand)
1522 return emit_data_processing (buf, SUBS, rd, rn, operand);
1525 /* Write a CMP instruction into *BUF.
1527 CMP rn, rm
1529 This instruction is an alias of SUBS xzr, rn, rm.
1531 RN and RM are the registers to compare. */
1533 static int
1534 emit_cmp (uint32_t *buf, struct aarch64_register rn,
1535 struct aarch64_operand operand)
1537 return emit_subs (buf, xzr, rn, operand);
1540 /* Write a AND instruction into *BUF.
1542 AND rd, rn, rm
1544 RD is the destination register.
1545 RN and RM are the source registers. */
1547 static int
1548 emit_and (uint32_t *buf, struct aarch64_register rd,
1549 struct aarch64_register rn, struct aarch64_register rm)
1551 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1554 /* Write a ORR instruction into *BUF.
1556 ORR rd, rn, rm
1558 RD is the destination register.
1559 RN and RM are the source registers. */
1561 static int
1562 emit_orr (uint32_t *buf, struct aarch64_register rd,
1563 struct aarch64_register rn, struct aarch64_register rm)
1565 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1568 /* Write a ORN instruction into *BUF.
1570 ORN rd, rn, rm
1572 RD is the destination register.
1573 RN and RM are the source registers. */
1575 static int
1576 emit_orn (uint32_t *buf, struct aarch64_register rd,
1577 struct aarch64_register rn, struct aarch64_register rm)
1579 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1582 /* Write a EOR instruction into *BUF.
1584 EOR rd, rn, rm
1586 RD is the destination register.
1587 RN and RM are the source registers. */
1589 static int
1590 emit_eor (uint32_t *buf, struct aarch64_register rd,
1591 struct aarch64_register rn, struct aarch64_register rm)
1593 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1596 /* Write a MVN instruction into *BUF.
1598 MVN rd, rm
1600 This is an alias for ORN rd, xzr, rm.
1602 RD is the destination register.
1603 RM is the source register. */
1605 static int
1606 emit_mvn (uint32_t *buf, struct aarch64_register rd,
1607 struct aarch64_register rm)
1609 return emit_orn (buf, rd, xzr, rm);
1612 /* Write a LSLV instruction into *BUF.
1614 LSLV rd, rn, rm
1616 RD is the destination register.
1617 RN and RM are the source registers. */
1619 static int
1620 emit_lslv (uint32_t *buf, struct aarch64_register rd,
1621 struct aarch64_register rn, struct aarch64_register rm)
1623 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1626 /* Write a LSRV instruction into *BUF.
1628 LSRV rd, rn, rm
1630 RD is the destination register.
1631 RN and RM are the source registers. */
1633 static int
1634 emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1635 struct aarch64_register rn, struct aarch64_register rm)
1637 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1640 /* Write a ASRV instruction into *BUF.
1642 ASRV rd, rn, rm
1644 RD is the destination register.
1645 RN and RM are the source registers. */
1647 static int
1648 emit_asrv (uint32_t *buf, struct aarch64_register rd,
1649 struct aarch64_register rn, struct aarch64_register rm)
1651 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1654 /* Write a MUL instruction into *BUF.
1656 MUL rd, rn, rm
1658 RD is the destination register.
1659 RN and RM are the source registers. */
1661 static int
1662 emit_mul (uint32_t *buf, struct aarch64_register rd,
1663 struct aarch64_register rn, struct aarch64_register rm)
1665 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1668 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1670 MRS xt, system_reg
1672 RT is the destination register.
1673 SYSTEM_REG is special purpose register to read. */
1675 static int
1676 emit_mrs (uint32_t *buf, struct aarch64_register rt,
1677 enum aarch64_system_control_registers system_reg)
1679 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1680 | ENCODE (rt.num, 5, 0));
1683 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1685 MSR system_reg, xt
1687 SYSTEM_REG is special purpose register to write.
1688 RT is the input register. */
1690 static int
1691 emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1692 struct aarch64_register rt)
1694 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1695 | ENCODE (rt.num, 5, 0));
1698 /* Write a SEVL instruction into *BUF.
1700 This is a hint instruction telling the hardware to trigger an event. */
1702 static int
1703 emit_sevl (uint32_t *buf)
1705 return aarch64_emit_insn (buf, SEVL);
1708 /* Write a WFE instruction into *BUF.
1710 This is a hint instruction telling the hardware to wait for an event. */
1712 static int
1713 emit_wfe (uint32_t *buf)
1715 return aarch64_emit_insn (buf, WFE);
1718 /* Write a SBFM instruction into *BUF.
1720 SBFM rd, rn, #immr, #imms
1722 This instruction moves the bits from #immr to #imms into the
1723 destination, sign extending the result.
1725 RD is the destination register.
1726 RN is the source register.
1727 IMMR is the bit number to start at (least significant bit).
1728 IMMS is the bit number to stop at (most significant bit). */
1730 static int
1731 emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1732 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1734 uint32_t size = ENCODE (rd.is64, 1, 31);
1735 uint32_t n = ENCODE (rd.is64, 1, 22);
1737 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1738 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1739 | ENCODE (rd.num, 5, 0));
1742 /* Write a SBFX instruction into *BUF.
1744 SBFX rd, rn, #lsb, #width
1746 This instruction moves #width bits from #lsb into the destination, sign
1747 extending the result. This is an alias for:
1749 SBFM rd, rn, #lsb, #(lsb + width - 1)
1751 RD is the destination register.
1752 RN is the source register.
1753 LSB is the bit number to start at (least significant bit).
1754 WIDTH is the number of bits to move. */
1756 static int
1757 emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1758 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1760 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1763 /* Write a UBFM instruction into *BUF.
1765 UBFM rd, rn, #immr, #imms
1767 This instruction moves the bits from #immr to #imms into the
1768 destination, extending the result with zeros.
1770 RD is the destination register.
1771 RN is the source register.
1772 IMMR is the bit number to start at (least significant bit).
1773 IMMS is the bit number to stop at (most significant bit). */
1775 static int
1776 emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1777 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1779 uint32_t size = ENCODE (rd.is64, 1, 31);
1780 uint32_t n = ENCODE (rd.is64, 1, 22);
1782 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1783 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1784 | ENCODE (rd.num, 5, 0));
1787 /* Write a UBFX instruction into *BUF.
1789 UBFX rd, rn, #lsb, #width
1791 This instruction moves #width bits from #lsb into the destination,
1792 extending the result with zeros. This is an alias for:
1794 UBFM rd, rn, #lsb, #(lsb + width - 1)
1796 RD is the destination register.
1797 RN is the source register.
1798 LSB is the bit number to start at (least significant bit).
1799 WIDTH is the number of bits to move. */
1801 static int
1802 emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1803 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1805 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1808 /* Write a CSINC instruction into *BUF.
1810 CSINC rd, rn, rm, cond
1812 This instruction conditionally increments rn or rm and places the result
1813 in rd. rn is chosen is the condition is true.
1815 RD is the destination register.
1816 RN and RM are the source registers.
1817 COND is the encoded condition. */
1819 static int
1820 emit_csinc (uint32_t *buf, struct aarch64_register rd,
1821 struct aarch64_register rn, struct aarch64_register rm,
1822 unsigned cond)
1824 uint32_t size = ENCODE (rd.is64, 1, 31);
1826 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1827 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1828 | ENCODE (rd.num, 5, 0));
1831 /* Write a CSET instruction into *BUF.
1833 CSET rd, cond
1835 This instruction conditionally write 1 or 0 in the destination register.
1836 1 is written if the condition is true. This is an alias for:
1838 CSINC rd, xzr, xzr, !cond
1840 Note that the condition needs to be inverted.
1842 RD is the destination register.
1843 RN and RM are the source registers.
1844 COND is the encoded condition. */
1846 static int
1847 emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1849 /* The least significant bit of the condition needs toggling in order to
1850 invert it. */
1851 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1854 /* Write LEN instructions from BUF into the inferior memory at *TO.
1856 Note instructions are always little endian on AArch64, unlike data. */
1858 static void
1859 append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1861 size_t byte_len = len * sizeof (uint32_t);
1862 #if (__BYTE_ORDER == __BIG_ENDIAN)
1863 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
1864 size_t i;
1866 for (i = 0; i < len; i++)
1867 le_buf[i] = htole32 (buf[i]);
1869 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
1871 xfree (le_buf);
1872 #else
1873 target_write_memory (*to, (const unsigned char *) buf, byte_len);
1874 #endif
1876 *to += byte_len;
1879 /* Sub-class of struct aarch64_insn_data, store information of
1880 instruction relocation for fast tracepoint. Visitor can
1881 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1882 the relocated instructions in buffer pointed by INSN_PTR. */
1884 struct aarch64_insn_relocation_data
1886 struct aarch64_insn_data base;
1888 /* The new address the instruction is relocated to. */
1889 CORE_ADDR new_addr;
1890 /* Pointer to the buffer of relocated instruction(s). */
1891 uint32_t *insn_ptr;
1894 /* Implementation of aarch64_insn_visitor method "b". */
1896 static void
1897 aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1898 struct aarch64_insn_data *data)
1900 struct aarch64_insn_relocation_data *insn_reloc
1901 = (struct aarch64_insn_relocation_data *) data;
1902 int64_t new_offset
1903 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1905 if (can_encode_int32 (new_offset, 28))
1906 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1909 /* Implementation of aarch64_insn_visitor method "b_cond". */
1911 static void
1912 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1913 struct aarch64_insn_data *data)
1915 struct aarch64_insn_relocation_data *insn_reloc
1916 = (struct aarch64_insn_relocation_data *) data;
1917 int64_t new_offset
1918 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1920 if (can_encode_int32 (new_offset, 21))
1922 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1923 new_offset);
1925 else if (can_encode_int32 (new_offset, 28))
1927 /* The offset is out of range for a conditional branch
1928 instruction but not for a unconditional branch. We can use
1929 the following instructions instead:
1931 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1932 B NOT_TAKEN ; Else jump over TAKEN and continue.
1933 TAKEN:
1934 B #(offset - 8)
1935 NOT_TAKEN:
1939 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1940 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1941 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1945 /* Implementation of aarch64_insn_visitor method "cb". */
1947 static void
1948 aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1949 const unsigned rn, int is64,
1950 struct aarch64_insn_data *data)
1952 struct aarch64_insn_relocation_data *insn_reloc
1953 = (struct aarch64_insn_relocation_data *) data;
1954 int64_t new_offset
1955 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1957 if (can_encode_int32 (new_offset, 21))
1959 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1960 aarch64_register (rn, is64), new_offset);
1962 else if (can_encode_int32 (new_offset, 28))
1964 /* The offset is out of range for a compare and branch
1965 instruction but not for a unconditional branch. We can use
1966 the following instructions instead:
1968 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1969 B NOT_TAKEN ; Else jump over TAKEN and continue.
1970 TAKEN:
1971 B #(offset - 8)
1972 NOT_TAKEN:
1975 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1976 aarch64_register (rn, is64), 8);
1977 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1978 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1982 /* Implementation of aarch64_insn_visitor method "tb". */
1984 static void
1985 aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1986 const unsigned rt, unsigned bit,
1987 struct aarch64_insn_data *data)
1989 struct aarch64_insn_relocation_data *insn_reloc
1990 = (struct aarch64_insn_relocation_data *) data;
1991 int64_t new_offset
1992 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1994 if (can_encode_int32 (new_offset, 16))
1996 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1997 aarch64_register (rt, 1), new_offset);
1999 else if (can_encode_int32 (new_offset, 28))
2001 /* The offset is out of range for a test bit and branch
2002 instruction but not for a unconditional branch. We can use
2003 the following instructions instead:
2005 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2006 B NOT_TAKEN ; Else jump over TAKEN and continue.
2007 TAKEN:
2008 B #(offset - 8)
2009 NOT_TAKEN:
2012 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
2013 aarch64_register (rt, 1), 8);
2014 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
2015 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
2016 new_offset - 8);
2020 /* Implementation of aarch64_insn_visitor method "adr". */
2022 static void
2023 aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
2024 const int is_adrp,
2025 struct aarch64_insn_data *data)
2027 struct aarch64_insn_relocation_data *insn_reloc
2028 = (struct aarch64_insn_relocation_data *) data;
2029 /* We know exactly the address the ADR{P,} instruction will compute.
2030 We can just write it to the destination register. */
2031 CORE_ADDR address = data->insn_addr + offset;
2033 if (is_adrp)
2035 /* Clear the lower 12 bits of the offset to get the 4K page. */
2036 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
2037 aarch64_register (rd, 1),
2038 address & ~0xfff);
2040 else
2041 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
2042 aarch64_register (rd, 1), address);
2045 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2047 static void
2048 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
2049 const unsigned rt, const int is64,
2050 struct aarch64_insn_data *data)
2052 struct aarch64_insn_relocation_data *insn_reloc
2053 = (struct aarch64_insn_relocation_data *) data;
2054 CORE_ADDR address = data->insn_addr + offset;
2056 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
2057 aarch64_register (rt, 1), address);
2059 /* We know exactly what address to load from, and what register we
2060 can use:
2062 MOV xd, #(oldloc + offset)
2063 MOVK xd, #((oldloc + offset) >> 16), lsl #16
2066 LDR xd, [xd] ; or LDRSW xd, [xd]
2070 if (is_sw)
2071 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
2072 aarch64_register (rt, 1),
2073 aarch64_register (rt, 1),
2074 offset_memory_operand (0));
2075 else
2076 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
2077 aarch64_register (rt, is64),
2078 aarch64_register (rt, 1),
2079 offset_memory_operand (0));
2082 /* Implementation of aarch64_insn_visitor method "others". */
2084 static void
2085 aarch64_ftrace_insn_reloc_others (const uint32_t insn,
2086 struct aarch64_insn_data *data)
2088 struct aarch64_insn_relocation_data *insn_reloc
2089 = (struct aarch64_insn_relocation_data *) data;
2091 /* The instruction is not PC relative. Just re-emit it at the new
2092 location. */
2093 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
2096 static const struct aarch64_insn_visitor visitor =
2098 aarch64_ftrace_insn_reloc_b,
2099 aarch64_ftrace_insn_reloc_b_cond,
2100 aarch64_ftrace_insn_reloc_cb,
2101 aarch64_ftrace_insn_reloc_tb,
2102 aarch64_ftrace_insn_reloc_adr,
2103 aarch64_ftrace_insn_reloc_ldr_literal,
2104 aarch64_ftrace_insn_reloc_others,
2107 bool
2108 aarch64_target::supports_fast_tracepoints ()
2110 return true;
2113 /* Implementation of target ops method
2114 "install_fast_tracepoint_jump_pad". */
2117 aarch64_target::install_fast_tracepoint_jump_pad
2118 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
2119 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
2120 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
2121 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
2122 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
2123 char *err)
2125 uint32_t buf[256];
2126 uint32_t *p = buf;
2127 int64_t offset;
2128 int i;
2129 uint32_t insn;
2130 CORE_ADDR buildaddr = *jump_entry;
2131 struct aarch64_insn_relocation_data insn_data;
2133 /* We need to save the current state on the stack both to restore it
2134 later and to collect register values when the tracepoint is hit.
2136 The saved registers are pushed in a layout that needs to be in sync
2137 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
2138 the supply_fast_tracepoint_registers function will fill in the
2139 register cache from a pointer to saved registers on the stack we build
2140 here.
2142 For simplicity, we set the size of each cell on the stack to 16 bytes.
2143 This way one cell can hold any register type, from system registers
2144 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
2145 has to be 16 bytes aligned anyway.
2147 Note that the CPSR register does not exist on AArch64. Instead we
2148 can access system bits describing the process state with the
2149 MRS/MSR instructions, namely the condition flags. We save them as
2150 if they are part of a CPSR register because that's how GDB
2151 interprets these system bits. At the moment, only the condition
2152 flags are saved in CPSR (NZCV).
2154 Stack layout, each cell is 16 bytes (descending):
2156 High *-------- SIMD&FP registers from 31 down to 0. --------*
2157 | q31 |
2159 . . 32 cells
2161 | q0 |
2162 *---- General purpose registers from 30 down to 0. ----*
2163 | x30 |
2165 . . 31 cells
2167 | x0 |
2168 *------------- Special purpose registers. -------------*
2169 | SP |
2170 | PC |
2171 | CPSR (NZCV) | 5 cells
2172 | FPSR |
2173 | FPCR | <- SP + 16
2174 *------------- collecting_t object --------------------*
2175 | TPIDR_EL0 | struct tracepoint * |
2176 Low *------------------------------------------------------*
2178 After this stack is set up, we issue a call to the collector, passing
2179 it the saved registers at (SP + 16). */
2181 /* Push SIMD&FP registers on the stack:
2183 SUB sp, sp, #(32 * 16)
2185 STP q30, q31, [sp, #(30 * 16)]
2187 STP q0, q1, [sp]
2190 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
2191 for (i = 30; i >= 0; i -= 2)
2192 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
2194 /* Push general purpose registers on the stack. Note that we do not need
2195 to push x31 as it represents the xzr register and not the stack
2196 pointer in a STR instruction.
2198 SUB sp, sp, #(31 * 16)
2200 STR x30, [sp, #(30 * 16)]
2202 STR x0, [sp]
2205 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
2206 for (i = 30; i >= 0; i -= 1)
2207 p += emit_str (p, aarch64_register (i, 1), sp,
2208 offset_memory_operand (i * 16));
2210 /* Make space for 5 more cells.
2212 SUB sp, sp, #(5 * 16)
2215 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
2218 /* Save SP:
2220 ADD x4, sp, #((32 + 31 + 5) * 16)
2221 STR x4, [sp, #(4 * 16)]
2224 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2225 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2227 /* Save PC (tracepoint address):
2229 MOV x3, #(tpaddr)
2232 STR x3, [sp, #(3 * 16)]
2236 p += emit_mov_addr (p, x3, tpaddr);
2237 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2239 /* Save CPSR (NZCV), FPSR and FPCR:
2241 MRS x2, nzcv
2242 MRS x1, fpsr
2243 MRS x0, fpcr
2245 STR x2, [sp, #(2 * 16)]
2246 STR x1, [sp, #(1 * 16)]
2247 STR x0, [sp, #(0 * 16)]
2250 p += emit_mrs (p, x2, NZCV);
2251 p += emit_mrs (p, x1, FPSR);
2252 p += emit_mrs (p, x0, FPCR);
2253 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2254 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2255 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2257 /* Push the collecting_t object. It consist of the address of the
2258 tracepoint and an ID for the current thread. We get the latter by
2259 reading the tpidr_el0 system register. It corresponds to the
2260 NT_ARM_TLS register accessible with ptrace.
2262 MOV x0, #(tpoint)
2265 MRS x1, tpidr_el0
2267 STP x0, x1, [sp, #-16]!
2271 p += emit_mov_addr (p, x0, tpoint);
2272 p += emit_mrs (p, x1, TPIDR_EL0);
2273 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2275 /* Spin-lock:
2277 The shared memory for the lock is at lockaddr. It will hold zero
2278 if no-one is holding the lock, otherwise it contains the address of
2279 the collecting_t object on the stack of the thread which acquired it.
2281 At this stage, the stack pointer points to this thread's collecting_t
2282 object.
2284 We use the following registers:
2285 - x0: Address of the lock.
2286 - x1: Pointer to collecting_t object.
2287 - x2: Scratch register.
2289 MOV x0, #(lockaddr)
2291 MOV x1, sp
2293 ; Trigger an event local to this core. So the following WFE
2294 ; instruction is ignored.
2295 SEVL
2296 again:
2297 ; Wait for an event. The event is triggered by either the SEVL
2298 ; or STLR instructions (store release).
2301 ; Atomically read at lockaddr. This marks the memory location as
2302 ; exclusive. This instruction also has memory constraints which
2303 ; make sure all previous data reads and writes are done before
2304 ; executing it.
2305 LDAXR x2, [x0]
2307 ; Try again if another thread holds the lock.
2308 CBNZ x2, again
2310 ; We can lock it! Write the address of the collecting_t object.
2311 ; This instruction will fail if the memory location is not marked
2312 ; as exclusive anymore. If it succeeds, it will remove the
2313 ; exclusive mark on the memory location. This way, if another
2314 ; thread executes this instruction before us, we will fail and try
2315 ; all over again.
2316 STXR w2, x1, [x0]
2317 CBNZ w2, again
2321 p += emit_mov_addr (p, x0, lockaddr);
2322 p += emit_mov (p, x1, register_operand (sp));
2324 p += emit_sevl (p);
2325 p += emit_wfe (p);
2326 p += emit_ldaxr (p, x2, x0);
2327 p += emit_cb (p, 1, w2, -2 * 4);
2328 p += emit_stxr (p, w2, x1, x0);
2329 p += emit_cb (p, 1, x2, -4 * 4);
2331 /* Call collector (struct tracepoint *, unsigned char *):
2333 MOV x0, #(tpoint)
2336 ; Saved registers start after the collecting_t object.
2337 ADD x1, sp, #16
2339 ; We use an intra-procedure-call scratch register.
2340 MOV ip0, #(collector)
2343 ; And call back to C!
2344 BLR ip0
2348 p += emit_mov_addr (p, x0, tpoint);
2349 p += emit_add (p, x1, sp, immediate_operand (16));
2351 p += emit_mov_addr (p, ip0, collector);
2352 p += emit_blr (p, ip0);
2354 /* Release the lock.
2356 MOV x0, #(lockaddr)
2359 ; This instruction is a normal store with memory ordering
2360 ; constraints. Thanks to this we do not have to put a data
2361 ; barrier instruction to make sure all data read and writes are done
2362 ; before this instruction is executed. Furthermore, this instruction
2363 ; will trigger an event, letting other threads know they can grab
2364 ; the lock.
2365 STLR xzr, [x0]
2368 p += emit_mov_addr (p, x0, lockaddr);
2369 p += emit_stlr (p, xzr, x0);
2371 /* Free collecting_t object:
2373 ADD sp, sp, #16
2376 p += emit_add (p, sp, sp, immediate_operand (16));
2378 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2379 registers from the stack.
2381 LDR x2, [sp, #(2 * 16)]
2382 LDR x1, [sp, #(1 * 16)]
2383 LDR x0, [sp, #(0 * 16)]
2385 MSR NZCV, x2
2386 MSR FPSR, x1
2387 MSR FPCR, x0
2389 ADD sp, sp #(5 * 16)
2392 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2393 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2394 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2395 p += emit_msr (p, NZCV, x2);
2396 p += emit_msr (p, FPSR, x1);
2397 p += emit_msr (p, FPCR, x0);
2399 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2401 /* Pop general purpose registers:
2403 LDR x0, [sp]
2405 LDR x30, [sp, #(30 * 16)]
2407 ADD sp, sp, #(31 * 16)
2410 for (i = 0; i <= 30; i += 1)
2411 p += emit_ldr (p, aarch64_register (i, 1), sp,
2412 offset_memory_operand (i * 16));
2413 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2415 /* Pop SIMD&FP registers:
2417 LDP q0, q1, [sp]
2419 LDP q30, q31, [sp, #(30 * 16)]
2421 ADD sp, sp, #(32 * 16)
2424 for (i = 0; i <= 30; i += 2)
2425 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2426 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2428 /* Write the code into the inferior memory. */
2429 append_insns (&buildaddr, p - buf, buf);
2431 /* Now emit the relocated instruction. */
2432 *adjusted_insn_addr = buildaddr;
2433 target_read_uint32 (tpaddr, &insn);
2435 insn_data.base.insn_addr = tpaddr;
2436 insn_data.new_addr = buildaddr;
2437 insn_data.insn_ptr = buf;
2439 aarch64_relocate_instruction (insn, &visitor,
2440 (struct aarch64_insn_data *) &insn_data);
2442 /* We may not have been able to relocate the instruction. */
2443 if (insn_data.insn_ptr == buf)
2445 sprintf (err,
2446 "E.Could not relocate instruction from %s to %s.",
2447 core_addr_to_string_nz (tpaddr),
2448 core_addr_to_string_nz (buildaddr));
2449 return 1;
2451 else
2452 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
2453 *adjusted_insn_addr_end = buildaddr;
2455 /* Go back to the start of the buffer. */
2456 p = buf;
2458 /* Emit a branch back from the jump pad. */
2459 offset = (tpaddr + orig_size - buildaddr);
2460 if (!can_encode_int32 (offset, 28))
2462 sprintf (err,
2463 "E.Jump back from jump pad too far from tracepoint "
2464 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2465 offset);
2466 return 1;
2469 p += emit_b (p, 0, offset);
2470 append_insns (&buildaddr, p - buf, buf);
2472 /* Give the caller a branch instruction into the jump pad. */
2473 offset = (*jump_entry - tpaddr);
2474 if (!can_encode_int32 (offset, 28))
2476 sprintf (err,
2477 "E.Jump pad too far from tracepoint "
2478 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2479 offset);
2480 return 1;
2483 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2484 *jjump_pad_insn_size = 4;
2486 /* Return the end address of our pad. */
2487 *jump_entry = buildaddr;
2489 return 0;
2492 /* Helper function writing LEN instructions from START into
2493 current_insn_ptr. */
2495 static void
2496 emit_ops_insns (const uint32_t *start, int len)
2498 CORE_ADDR buildaddr = current_insn_ptr;
2500 threads_debug_printf ("Adding %d instrucions at %s",
2501 len, paddress (buildaddr));
2503 append_insns (&buildaddr, len, start);
2504 current_insn_ptr = buildaddr;
2507 /* Pop a register from the stack. */
2509 static int
2510 emit_pop (uint32_t *buf, struct aarch64_register rt)
2512 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2515 /* Push a register on the stack. */
2517 static int
2518 emit_push (uint32_t *buf, struct aarch64_register rt)
2520 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2523 /* Implementation of emit_ops method "emit_prologue". */
2525 static void
2526 aarch64_emit_prologue (void)
2528 uint32_t buf[16];
2529 uint32_t *p = buf;
2531 /* This function emit a prologue for the following function prototype:
2533 enum eval_result_type f (unsigned char *regs,
2534 ULONGEST *value);
2536 The first argument is a buffer of raw registers. The second
2537 argument is the result of
2538 evaluating the expression, which will be set to whatever is on top of
2539 the stack at the end.
2541 The stack set up by the prologue is as such:
2543 High *------------------------------------------------------*
2544 | LR |
2545 | FP | <- FP
2546 | x1 (ULONGEST *value) |
2547 | x0 (unsigned char *regs) |
2548 Low *------------------------------------------------------*
2550 As we are implementing a stack machine, each opcode can expand the
2551 stack so we never know how far we are from the data saved by this
2552 prologue. In order to be able refer to value and regs later, we save
2553 the current stack pointer in the frame pointer. This way, it is not
2554 clobbered when calling C functions.
2556 Finally, throughout every operation, we are using register x0 as the
2557 top of the stack, and x1 as a scratch register. */
2559 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2560 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2561 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2563 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2566 emit_ops_insns (buf, p - buf);
2569 /* Implementation of emit_ops method "emit_epilogue". */
2571 static void
2572 aarch64_emit_epilogue (void)
2574 uint32_t buf[16];
2575 uint32_t *p = buf;
2577 /* Store the result of the expression (x0) in *value. */
2578 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2579 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2580 p += emit_str (p, x0, x1, offset_memory_operand (0));
2582 /* Restore the previous state. */
2583 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2584 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2586 /* Return expr_eval_no_error. */
2587 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2588 p += emit_ret (p, lr);
2590 emit_ops_insns (buf, p - buf);
2593 /* Implementation of emit_ops method "emit_add". */
2595 static void
2596 aarch64_emit_add (void)
2598 uint32_t buf[16];
2599 uint32_t *p = buf;
2601 p += emit_pop (p, x1);
2602 p += emit_add (p, x0, x1, register_operand (x0));
2604 emit_ops_insns (buf, p - buf);
2607 /* Implementation of emit_ops method "emit_sub". */
2609 static void
2610 aarch64_emit_sub (void)
2612 uint32_t buf[16];
2613 uint32_t *p = buf;
2615 p += emit_pop (p, x1);
2616 p += emit_sub (p, x0, x1, register_operand (x0));
2618 emit_ops_insns (buf, p - buf);
2621 /* Implementation of emit_ops method "emit_mul". */
2623 static void
2624 aarch64_emit_mul (void)
2626 uint32_t buf[16];
2627 uint32_t *p = buf;
2629 p += emit_pop (p, x1);
2630 p += emit_mul (p, x0, x1, x0);
2632 emit_ops_insns (buf, p - buf);
2635 /* Implementation of emit_ops method "emit_lsh". */
2637 static void
2638 aarch64_emit_lsh (void)
2640 uint32_t buf[16];
2641 uint32_t *p = buf;
2643 p += emit_pop (p, x1);
2644 p += emit_lslv (p, x0, x1, x0);
2646 emit_ops_insns (buf, p - buf);
2649 /* Implementation of emit_ops method "emit_rsh_signed". */
2651 static void
2652 aarch64_emit_rsh_signed (void)
2654 uint32_t buf[16];
2655 uint32_t *p = buf;
2657 p += emit_pop (p, x1);
2658 p += emit_asrv (p, x0, x1, x0);
2660 emit_ops_insns (buf, p - buf);
2663 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2665 static void
2666 aarch64_emit_rsh_unsigned (void)
2668 uint32_t buf[16];
2669 uint32_t *p = buf;
2671 p += emit_pop (p, x1);
2672 p += emit_lsrv (p, x0, x1, x0);
2674 emit_ops_insns (buf, p - buf);
2677 /* Implementation of emit_ops method "emit_ext". */
2679 static void
2680 aarch64_emit_ext (int arg)
2682 uint32_t buf[16];
2683 uint32_t *p = buf;
2685 p += emit_sbfx (p, x0, x0, 0, arg);
2687 emit_ops_insns (buf, p - buf);
2690 /* Implementation of emit_ops method "emit_log_not". */
2692 static void
2693 aarch64_emit_log_not (void)
2695 uint32_t buf[16];
2696 uint32_t *p = buf;
2698 /* If the top of the stack is 0, replace it with 1. Else replace it with
2699 0. */
2701 p += emit_cmp (p, x0, immediate_operand (0));
2702 p += emit_cset (p, x0, EQ);
2704 emit_ops_insns (buf, p - buf);
2707 /* Implementation of emit_ops method "emit_bit_and". */
2709 static void
2710 aarch64_emit_bit_and (void)
2712 uint32_t buf[16];
2713 uint32_t *p = buf;
2715 p += emit_pop (p, x1);
2716 p += emit_and (p, x0, x0, x1);
2718 emit_ops_insns (buf, p - buf);
2721 /* Implementation of emit_ops method "emit_bit_or". */
2723 static void
2724 aarch64_emit_bit_or (void)
2726 uint32_t buf[16];
2727 uint32_t *p = buf;
2729 p += emit_pop (p, x1);
2730 p += emit_orr (p, x0, x0, x1);
2732 emit_ops_insns (buf, p - buf);
2735 /* Implementation of emit_ops method "emit_bit_xor". */
2737 static void
2738 aarch64_emit_bit_xor (void)
2740 uint32_t buf[16];
2741 uint32_t *p = buf;
2743 p += emit_pop (p, x1);
2744 p += emit_eor (p, x0, x0, x1);
2746 emit_ops_insns (buf, p - buf);
2749 /* Implementation of emit_ops method "emit_bit_not". */
2751 static void
2752 aarch64_emit_bit_not (void)
2754 uint32_t buf[16];
2755 uint32_t *p = buf;
2757 p += emit_mvn (p, x0, x0);
2759 emit_ops_insns (buf, p - buf);
2762 /* Implementation of emit_ops method "emit_equal". */
2764 static void
2765 aarch64_emit_equal (void)
2767 uint32_t buf[16];
2768 uint32_t *p = buf;
2770 p += emit_pop (p, x1);
2771 p += emit_cmp (p, x0, register_operand (x1));
2772 p += emit_cset (p, x0, EQ);
2774 emit_ops_insns (buf, p - buf);
2777 /* Implementation of emit_ops method "emit_less_signed". */
2779 static void
2780 aarch64_emit_less_signed (void)
2782 uint32_t buf[16];
2783 uint32_t *p = buf;
2785 p += emit_pop (p, x1);
2786 p += emit_cmp (p, x1, register_operand (x0));
2787 p += emit_cset (p, x0, LT);
2789 emit_ops_insns (buf, p - buf);
2792 /* Implementation of emit_ops method "emit_less_unsigned". */
2794 static void
2795 aarch64_emit_less_unsigned (void)
2797 uint32_t buf[16];
2798 uint32_t *p = buf;
2800 p += emit_pop (p, x1);
2801 p += emit_cmp (p, x1, register_operand (x0));
2802 p += emit_cset (p, x0, LO);
2804 emit_ops_insns (buf, p - buf);
2807 /* Implementation of emit_ops method "emit_ref". */
2809 static void
2810 aarch64_emit_ref (int size)
2812 uint32_t buf[16];
2813 uint32_t *p = buf;
2815 switch (size)
2817 case 1:
2818 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2819 break;
2820 case 2:
2821 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2822 break;
2823 case 4:
2824 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2825 break;
2826 case 8:
2827 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2828 break;
2829 default:
2830 /* Unknown size, bail on compilation. */
2831 emit_error = 1;
2832 break;
2835 emit_ops_insns (buf, p - buf);
2838 /* Implementation of emit_ops method "emit_if_goto". */
2840 static void
2841 aarch64_emit_if_goto (int *offset_p, int *size_p)
2843 uint32_t buf[16];
2844 uint32_t *p = buf;
2846 /* The Z flag is set or cleared here. */
2847 p += emit_cmp (p, x0, immediate_operand (0));
2848 /* This instruction must not change the Z flag. */
2849 p += emit_pop (p, x0);
2850 /* Branch over the next instruction if x0 == 0. */
2851 p += emit_bcond (p, EQ, 8);
2853 /* The NOP instruction will be patched with an unconditional branch. */
2854 if (offset_p)
2855 *offset_p = (p - buf) * 4;
2856 if (size_p)
2857 *size_p = 4;
2858 p += emit_nop (p);
2860 emit_ops_insns (buf, p - buf);
2863 /* Implementation of emit_ops method "emit_goto". */
2865 static void
2866 aarch64_emit_goto (int *offset_p, int *size_p)
2868 uint32_t buf[16];
2869 uint32_t *p = buf;
2871 /* The NOP instruction will be patched with an unconditional branch. */
2872 if (offset_p)
2873 *offset_p = 0;
2874 if (size_p)
2875 *size_p = 4;
2876 p += emit_nop (p);
2878 emit_ops_insns (buf, p - buf);
2881 /* Implementation of emit_ops method "write_goto_address". */
2883 static void
2884 aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2886 uint32_t insn;
2888 emit_b (&insn, 0, to - from);
2889 append_insns (&from, 1, &insn);
2892 /* Implementation of emit_ops method "emit_const". */
2894 static void
2895 aarch64_emit_const (LONGEST num)
2897 uint32_t buf[16];
2898 uint32_t *p = buf;
2900 p += emit_mov_addr (p, x0, num);
2902 emit_ops_insns (buf, p - buf);
2905 /* Implementation of emit_ops method "emit_call". */
2907 static void
2908 aarch64_emit_call (CORE_ADDR fn)
2910 uint32_t buf[16];
2911 uint32_t *p = buf;
2913 p += emit_mov_addr (p, ip0, fn);
2914 p += emit_blr (p, ip0);
2916 emit_ops_insns (buf, p - buf);
2919 /* Implementation of emit_ops method "emit_reg". */
2921 static void
2922 aarch64_emit_reg (int reg)
2924 uint32_t buf[16];
2925 uint32_t *p = buf;
2927 /* Set x0 to unsigned char *regs. */
2928 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2929 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2930 p += emit_mov (p, x1, immediate_operand (reg));
2932 emit_ops_insns (buf, p - buf);
2934 aarch64_emit_call (get_raw_reg_func_addr ());
2937 /* Implementation of emit_ops method "emit_pop". */
2939 static void
2940 aarch64_emit_pop (void)
2942 uint32_t buf[16];
2943 uint32_t *p = buf;
2945 p += emit_pop (p, x0);
2947 emit_ops_insns (buf, p - buf);
2950 /* Implementation of emit_ops method "emit_stack_flush". */
2952 static void
2953 aarch64_emit_stack_flush (void)
2955 uint32_t buf[16];
2956 uint32_t *p = buf;
2958 p += emit_push (p, x0);
2960 emit_ops_insns (buf, p - buf);
2963 /* Implementation of emit_ops method "emit_zero_ext". */
2965 static void
2966 aarch64_emit_zero_ext (int arg)
2968 uint32_t buf[16];
2969 uint32_t *p = buf;
2971 p += emit_ubfx (p, x0, x0, 0, arg);
2973 emit_ops_insns (buf, p - buf);
2976 /* Implementation of emit_ops method "emit_swap". */
2978 static void
2979 aarch64_emit_swap (void)
2981 uint32_t buf[16];
2982 uint32_t *p = buf;
2984 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2985 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2986 p += emit_mov (p, x0, register_operand (x1));
2988 emit_ops_insns (buf, p - buf);
2991 /* Implementation of emit_ops method "emit_stack_adjust". */
2993 static void
2994 aarch64_emit_stack_adjust (int n)
2996 /* This is not needed with our design. */
2997 uint32_t buf[16];
2998 uint32_t *p = buf;
3000 p += emit_add (p, sp, sp, immediate_operand (n * 16));
3002 emit_ops_insns (buf, p - buf);
3005 /* Implementation of emit_ops method "emit_int_call_1". */
3007 static void
3008 aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
3010 uint32_t buf[16];
3011 uint32_t *p = buf;
3013 p += emit_mov (p, x0, immediate_operand (arg1));
3015 emit_ops_insns (buf, p - buf);
3017 aarch64_emit_call (fn);
3020 /* Implementation of emit_ops method "emit_void_call_2". */
3022 static void
3023 aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
3025 uint32_t buf[16];
3026 uint32_t *p = buf;
3028 /* Push x0 on the stack. */
3029 aarch64_emit_stack_flush ();
3031 /* Setup arguments for the function call:
3033 x0: arg1
3034 x1: top of the stack
3036 MOV x1, x0
3037 MOV x0, #arg1 */
3039 p += emit_mov (p, x1, register_operand (x0));
3040 p += emit_mov (p, x0, immediate_operand (arg1));
3042 emit_ops_insns (buf, p - buf);
3044 aarch64_emit_call (fn);
3046 /* Restore x0. */
3047 aarch64_emit_pop ();
3050 /* Implementation of emit_ops method "emit_eq_goto". */
3052 static void
3053 aarch64_emit_eq_goto (int *offset_p, int *size_p)
3055 uint32_t buf[16];
3056 uint32_t *p = buf;
3058 p += emit_pop (p, x1);
3059 p += emit_cmp (p, x1, register_operand (x0));
3060 /* Branch over the next instruction if x0 != x1. */
3061 p += emit_bcond (p, NE, 8);
3062 /* The NOP instruction will be patched with an unconditional branch. */
3063 if (offset_p)
3064 *offset_p = (p - buf) * 4;
3065 if (size_p)
3066 *size_p = 4;
3067 p += emit_nop (p);
3069 emit_ops_insns (buf, p - buf);
3072 /* Implementation of emit_ops method "emit_ne_goto". */
3074 static void
3075 aarch64_emit_ne_goto (int *offset_p, int *size_p)
3077 uint32_t buf[16];
3078 uint32_t *p = buf;
3080 p += emit_pop (p, x1);
3081 p += emit_cmp (p, x1, register_operand (x0));
3082 /* Branch over the next instruction if x0 == x1. */
3083 p += emit_bcond (p, EQ, 8);
3084 /* The NOP instruction will be patched with an unconditional branch. */
3085 if (offset_p)
3086 *offset_p = (p - buf) * 4;
3087 if (size_p)
3088 *size_p = 4;
3089 p += emit_nop (p);
3091 emit_ops_insns (buf, p - buf);
3094 /* Implementation of emit_ops method "emit_lt_goto". */
3096 static void
3097 aarch64_emit_lt_goto (int *offset_p, int *size_p)
3099 uint32_t buf[16];
3100 uint32_t *p = buf;
3102 p += emit_pop (p, x1);
3103 p += emit_cmp (p, x1, register_operand (x0));
3104 /* Branch over the next instruction if x0 >= x1. */
3105 p += emit_bcond (p, GE, 8);
3106 /* The NOP instruction will be patched with an unconditional branch. */
3107 if (offset_p)
3108 *offset_p = (p - buf) * 4;
3109 if (size_p)
3110 *size_p = 4;
3111 p += emit_nop (p);
3113 emit_ops_insns (buf, p - buf);
3116 /* Implementation of emit_ops method "emit_le_goto". */
3118 static void
3119 aarch64_emit_le_goto (int *offset_p, int *size_p)
3121 uint32_t buf[16];
3122 uint32_t *p = buf;
3124 p += emit_pop (p, x1);
3125 p += emit_cmp (p, x1, register_operand (x0));
3126 /* Branch over the next instruction if x0 > x1. */
3127 p += emit_bcond (p, GT, 8);
3128 /* The NOP instruction will be patched with an unconditional branch. */
3129 if (offset_p)
3130 *offset_p = (p - buf) * 4;
3131 if (size_p)
3132 *size_p = 4;
3133 p += emit_nop (p);
3135 emit_ops_insns (buf, p - buf);
3138 /* Implementation of emit_ops method "emit_gt_goto". */
3140 static void
3141 aarch64_emit_gt_goto (int *offset_p, int *size_p)
3143 uint32_t buf[16];
3144 uint32_t *p = buf;
3146 p += emit_pop (p, x1);
3147 p += emit_cmp (p, x1, register_operand (x0));
3148 /* Branch over the next instruction if x0 <= x1. */
3149 p += emit_bcond (p, LE, 8);
3150 /* The NOP instruction will be patched with an unconditional branch. */
3151 if (offset_p)
3152 *offset_p = (p - buf) * 4;
3153 if (size_p)
3154 *size_p = 4;
3155 p += emit_nop (p);
3157 emit_ops_insns (buf, p - buf);
3160 /* Implementation of emit_ops method "emit_ge_got". */
3162 static void
3163 aarch64_emit_ge_got (int *offset_p, int *size_p)
3165 uint32_t buf[16];
3166 uint32_t *p = buf;
3168 p += emit_pop (p, x1);
3169 p += emit_cmp (p, x1, register_operand (x0));
3170 /* Branch over the next instruction if x0 <= x1. */
3171 p += emit_bcond (p, LT, 8);
3172 /* The NOP instruction will be patched with an unconditional branch. */
3173 if (offset_p)
3174 *offset_p = (p - buf) * 4;
3175 if (size_p)
3176 *size_p = 4;
3177 p += emit_nop (p);
3179 emit_ops_insns (buf, p - buf);
3182 static struct emit_ops aarch64_emit_ops_impl =
3184 aarch64_emit_prologue,
3185 aarch64_emit_epilogue,
3186 aarch64_emit_add,
3187 aarch64_emit_sub,
3188 aarch64_emit_mul,
3189 aarch64_emit_lsh,
3190 aarch64_emit_rsh_signed,
3191 aarch64_emit_rsh_unsigned,
3192 aarch64_emit_ext,
3193 aarch64_emit_log_not,
3194 aarch64_emit_bit_and,
3195 aarch64_emit_bit_or,
3196 aarch64_emit_bit_xor,
3197 aarch64_emit_bit_not,
3198 aarch64_emit_equal,
3199 aarch64_emit_less_signed,
3200 aarch64_emit_less_unsigned,
3201 aarch64_emit_ref,
3202 aarch64_emit_if_goto,
3203 aarch64_emit_goto,
3204 aarch64_write_goto_address,
3205 aarch64_emit_const,
3206 aarch64_emit_call,
3207 aarch64_emit_reg,
3208 aarch64_emit_pop,
3209 aarch64_emit_stack_flush,
3210 aarch64_emit_zero_ext,
3211 aarch64_emit_swap,
3212 aarch64_emit_stack_adjust,
3213 aarch64_emit_int_call_1,
3214 aarch64_emit_void_call_2,
3215 aarch64_emit_eq_goto,
3216 aarch64_emit_ne_goto,
3217 aarch64_emit_lt_goto,
3218 aarch64_emit_le_goto,
3219 aarch64_emit_gt_goto,
3220 aarch64_emit_ge_got,
3223 /* Implementation of target ops method "emit_ops". */
3225 emit_ops *
3226 aarch64_target::emit_ops ()
3228 return &aarch64_emit_ops_impl;
3231 /* Implementation of target ops method
3232 "get_min_fast_tracepoint_insn_len". */
3235 aarch64_target::get_min_fast_tracepoint_insn_len ()
3237 return 4;
3240 /* Implementation of linux target ops method "low_supports_range_stepping". */
3242 bool
3243 aarch64_target::low_supports_range_stepping ()
3245 return true;
3248 /* Implementation of target ops method "sw_breakpoint_from_kind". */
3250 const gdb_byte *
3251 aarch64_target::sw_breakpoint_from_kind (int kind, int *size)
3253 if (is_64bit_tdesc ())
3255 *size = aarch64_breakpoint_len;
3256 return aarch64_breakpoint;
3258 else
3259 return arm_sw_breakpoint_from_kind (kind, size);
3262 /* Implementation of target ops method "breakpoint_kind_from_pc". */
3265 aarch64_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
3267 if (is_64bit_tdesc ())
3268 return aarch64_breakpoint_len;
3269 else
3270 return arm_breakpoint_kind_from_pc (pcptr);
3273 /* Implementation of the target ops method
3274 "breakpoint_kind_from_current_state". */
3277 aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
3279 if (is_64bit_tdesc ())
3280 return aarch64_breakpoint_len;
3281 else
3282 return arm_breakpoint_kind_from_current_state (pcptr);
3285 /* Returns true if memory tagging is supported. */
3286 bool
3287 aarch64_target::supports_memory_tagging ()
3289 if (current_thread == NULL)
3291 /* We don't have any processes running, so don't attempt to
3292 use linux_get_hwcap2 as it will try to fetch the current
3293 thread id. Instead, just fetch the auxv from the self
3294 PID. */
3295 #ifdef HAVE_GETAUXVAL
3296 return (getauxval (AT_HWCAP2) & HWCAP2_MTE) != 0;
3297 #else
3298 return true;
3299 #endif
3302 return (linux_get_hwcap2 (8) & HWCAP2_MTE) != 0;
3305 bool
3306 aarch64_target::fetch_memtags (CORE_ADDR address, size_t len,
3307 gdb::byte_vector &tags, int type)
3309 /* Allocation tags are per-process, so any tid is fine. */
3310 int tid = lwpid_of (current_thread);
3312 /* Allocation tag? */
3313 if (type == static_cast <int> (aarch64_memtag_type::mte_allocation))
3314 return aarch64_mte_fetch_memtags (tid, address, len, tags);
3316 return false;
3319 bool
3320 aarch64_target::store_memtags (CORE_ADDR address, size_t len,
3321 const gdb::byte_vector &tags, int type)
3323 /* Allocation tags are per-process, so any tid is fine. */
3324 int tid = lwpid_of (current_thread);
3326 /* Allocation tag? */
3327 if (type == static_cast <int> (aarch64_memtag_type::mte_allocation))
3328 return aarch64_mte_store_memtags (tid, address, len, tags);
3330 return false;
3333 /* The linux target ops object. */
3335 linux_process_target *the_linux_target = &the_aarch64_target;
3337 void
3338 initialize_low_arch (void)
3340 initialize_low_arch_aarch32 ();
3342 initialize_regsets_info (&aarch64_regsets_info);