1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2024 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "linux-low.h"
23 #include "nat/aarch64-linux.h"
24 #include "nat/aarch64-linux-hw-point.h"
25 #include "arch/aarch64-insn.h"
26 #include "linux-aarch32-low.h"
27 #include "elf/common.h"
29 #include "tracepoint.h"
34 #include "nat/gdb_ptrace.h"
35 #include <asm/ptrace.h>
40 #include "gdb_proc_service.h"
41 #include "arch/aarch64.h"
42 #include "arch/aarch64-mte-linux.h"
43 #include "arch/aarch64-scalable-linux.h"
44 #include "linux-aarch32-tdesc.h"
45 #include "linux-aarch64-tdesc.h"
46 #include "nat/aarch64-mte-linux-ptrace.h"
47 #include "nat/aarch64-scalable-linux-ptrace.h"
58 /* Linux target op definitions for the AArch64 architecture. */
60 class aarch64_target
: public linux_process_target
64 const regs_info
*get_regs_info () override
;
66 int breakpoint_kind_from_pc (CORE_ADDR
*pcptr
) override
;
68 int breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
) override
;
70 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
72 bool supports_z_point_type (char z_type
) override
;
74 bool supports_tracepoints () override
;
76 bool supports_fast_tracepoints () override
;
78 int install_fast_tracepoint_jump_pad
79 (CORE_ADDR tpoint
, CORE_ADDR tpaddr
, CORE_ADDR collector
,
80 CORE_ADDR lockaddr
, ULONGEST orig_size
, CORE_ADDR
*jump_entry
,
81 CORE_ADDR
*trampoline
, ULONGEST
*trampoline_size
,
82 unsigned char *jjump_pad_insn
, ULONGEST
*jjump_pad_insn_size
,
83 CORE_ADDR
*adjusted_insn_addr
, CORE_ADDR
*adjusted_insn_addr_end
,
86 int get_min_fast_tracepoint_insn_len () override
;
88 struct emit_ops
*emit_ops () override
;
90 bool supports_memory_tagging () override
;
92 bool fetch_memtags (CORE_ADDR address
, size_t len
,
93 gdb::byte_vector
&tags
, int type
) override
;
95 bool store_memtags (CORE_ADDR address
, size_t len
,
96 const gdb::byte_vector
&tags
, int type
) override
;
100 void low_arch_setup () override
;
102 bool low_cannot_fetch_register (int regno
) override
;
104 bool low_cannot_store_register (int regno
) override
;
106 bool low_supports_breakpoints () override
;
108 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
110 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
112 bool low_breakpoint_at (CORE_ADDR pc
) override
;
114 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
115 int size
, raw_breakpoint
*bp
) override
;
117 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
118 int size
, raw_breakpoint
*bp
) override
;
120 bool low_stopped_by_watchpoint () override
;
122 CORE_ADDR
low_stopped_data_address () override
;
124 bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
125 int direction
) override
;
127 arch_process_info
*low_new_process () override
;
129 void low_delete_process (arch_process_info
*info
) override
;
131 void low_new_thread (lwp_info
*) override
;
133 void low_delete_thread (arch_lwp_info
*) override
;
135 void low_new_fork (process_info
*parent
, process_info
*child
) override
;
137 void low_prepare_to_resume (lwp_info
*lwp
) override
;
139 int low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
) override
;
141 bool low_supports_range_stepping () override
;
143 bool low_supports_catch_syscall () override
;
145 void low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
) override
;
148 /* The singleton target ops object. */
150 static aarch64_target the_aarch64_target
;
153 aarch64_target::low_cannot_fetch_register (int regno
)
155 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
156 "is not implemented by the target");
160 aarch64_target::low_cannot_store_register (int regno
)
162 gdb_assert_not_reached ("linux target op low_cannot_store_register "
163 "is not implemented by the target");
167 aarch64_target::low_prepare_to_resume (lwp_info
*lwp
)
169 aarch64_linux_prepare_to_resume (lwp
);
172 /* Per-process arch-specific data we want to keep. */
174 struct arch_process_info
176 /* Hardware breakpoint/watchpoint data.
177 The reason for them to be per-process rather than per-thread is
178 due to the lack of information in the gdbserver environment;
179 gdbserver is not told that whether a requested hardware
180 breakpoint/watchpoint is thread specific or not, so it has to set
181 each hw bp/wp for every thread in the current process. The
182 higher level bp/wp management in gdb will resume a thread if a hw
183 bp/wp trap is not expected for it. Since the hw bp/wp setting is
184 same for each thread, it is reasonable for the data to live here.
186 struct aarch64_debug_reg_state debug_reg_state
;
189 /* Return true if the size of register 0 is 8 byte. */
192 is_64bit_tdesc (void)
194 /* We may not have a current thread at this point, so go straight to
195 the process's target description. */
196 return register_size (current_process ()->tdesc
, 0) == 8;
200 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
202 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
205 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
206 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
207 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
208 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
209 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
213 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
215 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
218 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
219 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
220 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
221 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
222 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
226 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
228 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
231 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
232 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
233 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
234 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
238 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
240 const struct user_fpsimd_state
*regset
241 = (const struct user_fpsimd_state
*) buf
;
244 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
245 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
246 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
247 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
250 /* Store the pauth registers to regcache. */
253 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
255 uint64_t *pauth_regset
= (uint64_t *) buf
;
256 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
261 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
263 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
267 /* Fill BUF with the MTE registers from the regcache. */
270 aarch64_fill_mteregset (struct regcache
*regcache
, void *buf
)
272 uint64_t *mte_regset
= (uint64_t *) buf
;
273 int mte_base
= find_regno (regcache
->tdesc
, "tag_ctl");
275 collect_register (regcache
, mte_base
, mte_regset
);
278 /* Store the MTE registers to regcache. */
281 aarch64_store_mteregset (struct regcache
*regcache
, const void *buf
)
283 uint64_t *mte_regset
= (uint64_t *) buf
;
284 int mte_base
= find_regno (regcache
->tdesc
, "tag_ctl");
286 /* Tag Control register */
287 supply_register (regcache
, mte_base
, mte_regset
);
290 /* Fill BUF with TLS register from the regcache. */
293 aarch64_fill_tlsregset (struct regcache
*regcache
, void *buf
)
295 gdb_byte
*tls_buf
= (gdb_byte
*) buf
;
296 int tls_regnum
= find_regno (regcache
->tdesc
, "tpidr");
298 collect_register (regcache
, tls_regnum
, tls_buf
);
300 /* Read TPIDR2, if it exists. */
301 std::optional
<int> regnum
= find_regno_no_throw (regcache
->tdesc
, "tpidr2");
303 if (regnum
.has_value ())
304 collect_register (regcache
, *regnum
, tls_buf
+ sizeof (uint64_t));
307 /* Store TLS register to regcache. */
310 aarch64_store_tlsregset (struct regcache
*regcache
, const void *buf
)
312 gdb_byte
*tls_buf
= (gdb_byte
*) buf
;
313 int tls_regnum
= find_regno (regcache
->tdesc
, "tpidr");
315 supply_register (regcache
, tls_regnum
, tls_buf
);
317 /* Write TPIDR2, if it exists. */
318 std::optional
<int> regnum
= find_regno_no_throw (regcache
->tdesc
, "tpidr2");
320 if (regnum
.has_value ())
321 supply_register (regcache
, *regnum
, tls_buf
+ sizeof (uint64_t));
325 aarch64_target::low_supports_breakpoints ()
330 /* Implementation of linux target ops method "low_get_pc". */
333 aarch64_target::low_get_pc (regcache
*regcache
)
335 if (register_size (regcache
->tdesc
, 0) == 8)
336 return linux_get_pc_64bit (regcache
);
338 return linux_get_pc_32bit (regcache
);
341 /* Implementation of linux target ops method "low_set_pc". */
344 aarch64_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
346 if (register_size (regcache
->tdesc
, 0) == 8)
347 linux_set_pc_64bit (regcache
, pc
);
349 linux_set_pc_32bit (regcache
, pc
);
352 #define aarch64_breakpoint_len 4
354 /* AArch64 BRK software debug mode instruction.
355 This instruction needs to match gdb/aarch64-tdep.c
356 (aarch64_default_breakpoint). */
357 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
359 /* Implementation of linux target ops method "low_breakpoint_at". */
362 aarch64_target::low_breakpoint_at (CORE_ADDR where
)
364 if (is_64bit_tdesc ())
366 gdb_byte insn
[aarch64_breakpoint_len
];
368 read_memory (where
, (unsigned char *) &insn
, aarch64_breakpoint_len
);
369 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
375 return arm_breakpoint_at (where
);
379 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
383 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
385 state
->dr_addr_bp
[i
] = 0;
386 state
->dr_ctrl_bp
[i
] = 0;
387 state
->dr_ref_count_bp
[i
] = 0;
390 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
392 state
->dr_addr_wp
[i
] = 0;
393 state
->dr_ctrl_wp
[i
] = 0;
394 state
->dr_ref_count_wp
[i
] = 0;
398 /* Return the pointer to the debug register state structure in the
399 current process' arch-specific data area. */
401 struct aarch64_debug_reg_state
*
402 aarch64_get_debug_reg_state (pid_t pid
)
404 struct process_info
*proc
= find_process_pid (pid
);
406 return &proc
->priv
->arch_private
->debug_reg_state
;
409 /* Implementation of target ops method "supports_z_point_type". */
412 aarch64_target::supports_z_point_type (char z_type
)
418 case Z_PACKET_WRITE_WP
:
419 case Z_PACKET_READ_WP
:
420 case Z_PACKET_ACCESS_WP
:
427 /* Implementation of linux target ops method "low_insert_point".
429 It actually only records the info of the to-be-inserted bp/wp;
430 the actual insertion will happen when threads are resumed. */
433 aarch64_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
434 int len
, raw_breakpoint
*bp
)
437 enum target_hw_bp_type targ_type
;
438 struct aarch64_debug_reg_state
*state
439 = aarch64_get_debug_reg_state (pid_of (current_thread
));
442 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
443 (unsigned long) addr
, len
);
445 /* Determine the type from the raw breakpoint type. */
446 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
448 if (targ_type
!= hw_execute
)
450 if (aarch64_region_ok_for_watchpoint (addr
, len
))
451 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
453 current_lwp_ptid (), state
);
461 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
462 instruction. Set it to 2 to correctly encode length bit
463 mask in hardware/watchpoint control register. */
466 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
467 1 /* is_insert */, current_lwp_ptid (),
472 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
478 /* Implementation of linux target ops method "low_remove_point".
480 It actually only records the info of the to-be-removed bp/wp,
481 the actual removal will be done when threads are resumed. */
484 aarch64_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
485 int len
, raw_breakpoint
*bp
)
488 enum target_hw_bp_type targ_type
;
489 struct aarch64_debug_reg_state
*state
490 = aarch64_get_debug_reg_state (pid_of (current_thread
));
493 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
494 (unsigned long) addr
, len
);
496 /* Determine the type from the raw breakpoint type. */
497 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
499 /* Set up state pointers. */
500 if (targ_type
!= hw_execute
)
502 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
503 current_lwp_ptid (), state
);
508 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
509 instruction. Set it to 2 to correctly encode length bit
510 mask in hardware/watchpoint control register. */
513 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
514 0 /* is_insert */, current_lwp_ptid (),
519 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
526 aarch64_remove_non_address_bits (CORE_ADDR pointer
)
528 /* By default, we assume TBI and discard the top 8 bits plus the
529 VA range select bit (55). */
530 CORE_ADDR mask
= AARCH64_TOP_BITS_MASK
;
532 /* Check if PAC is available for this target. */
533 if (tdesc_contains_feature (current_process ()->tdesc
,
534 "org.gnu.gdb.aarch64.pauth"))
536 /* Fetch the PAC masks. These masks are per-process, so we can just
537 fetch data from whatever thread we have at the moment.
539 Also, we have both a code mask and a data mask. For now they are the
540 same, but this may change in the future. */
542 struct regcache
*regs
= get_thread_regcache (current_thread
, 1);
543 CORE_ADDR dmask
= regcache_raw_get_unsigned_by_name (regs
, "pauth_dmask");
544 CORE_ADDR cmask
= regcache_raw_get_unsigned_by_name (regs
, "pauth_cmask");
545 mask
|= aarch64_mask_from_pac_registers (cmask
, dmask
);
548 return aarch64_remove_top_bits (pointer
, mask
);
551 /* Implementation of linux target ops method "low_stopped_data_address". */
554 aarch64_target::low_stopped_data_address ()
558 struct aarch64_debug_reg_state
*state
;
560 pid
= lwpid_of (current_thread
);
562 /* Get the siginfo. */
563 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
564 return (CORE_ADDR
) 0;
566 /* Need to be a hardware breakpoint/watchpoint trap. */
567 if (siginfo
.si_signo
!= SIGTRAP
568 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
569 return (CORE_ADDR
) 0;
571 /* Make sure to ignore the top byte, otherwise we may not recognize a
572 hardware watchpoint hit. The stopped data addresses coming from the
573 kernel can potentially be tagged addresses. */
574 const CORE_ADDR addr_trap
575 = aarch64_remove_non_address_bits ((CORE_ADDR
) siginfo
.si_addr
);
577 /* Check if the address matches any watched address. */
578 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
580 if (aarch64_stopped_data_address (state
, addr_trap
, &result
))
583 return (CORE_ADDR
) 0;
586 /* Implementation of linux target ops method "low_stopped_by_watchpoint". */
589 aarch64_target::low_stopped_by_watchpoint ()
591 return (low_stopped_data_address () != 0);
594 /* Fetch the thread-local storage pointer for libthread_db. */
597 ps_get_thread_area (struct ps_prochandle
*ph
,
598 lwpid_t lwpid
, int idx
, void **base
)
600 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
604 /* Implementation of linux target ops method "low_siginfo_fixup". */
607 aarch64_target::low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
610 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
611 if (!is_64bit_tdesc ())
614 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
617 aarch64_siginfo_from_compat_siginfo (native
,
618 (struct compat_siginfo
*) inf
);
626 /* Implementation of linux target ops method "low_new_process". */
629 aarch64_target::low_new_process ()
631 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
633 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
638 /* Implementation of linux target ops method "low_delete_process". */
641 aarch64_target::low_delete_process (arch_process_info
*info
)
647 aarch64_target::low_new_thread (lwp_info
*lwp
)
649 aarch64_linux_new_thread (lwp
);
653 aarch64_target::low_delete_thread (arch_lwp_info
*arch_lwp
)
655 aarch64_linux_delete_thread (arch_lwp
);
658 /* Implementation of linux target ops method "low_new_fork". */
661 aarch64_target::low_new_fork (process_info
*parent
,
664 /* These are allocated by linux_add_process. */
665 gdb_assert (parent
->priv
!= NULL
666 && parent
->priv
->arch_private
!= NULL
);
667 gdb_assert (child
->priv
!= NULL
668 && child
->priv
->arch_private
!= NULL
);
670 /* Linux kernel before 2.6.33 commit
671 72f674d203cd230426437cdcf7dd6f681dad8b0d
672 will inherit hardware debug registers from parent
673 on fork/vfork/clone. Newer Linux kernels create such tasks with
674 zeroed debug registers.
676 GDB core assumes the child inherits the watchpoints/hw
677 breakpoints of the parent, and will remove them all from the
678 forked off process. Copy the debug registers mirrors into the
679 new process so that all breakpoints and watchpoints can be
680 removed together. The debug registers mirror will become zeroed
681 in the end before detaching the forked off process, thus making
682 this compatible with older Linux kernels too. */
684 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
687 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
690 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
,
691 ATTRIBUTE_UNUSED
const void *buf
)
693 /* BUF is unused here since we collect the data straight from a ptrace
694 request in aarch64_sve_regs_copy_to_reg_buf, therefore bypassing
695 gdbserver's own call to ptrace. */
697 int tid
= lwpid_of (current_thread
);
699 /* Update the register cache. aarch64_sve_regs_copy_to_reg_buf handles
700 fetching the NT_ARM_SVE state from thread TID. */
701 aarch64_sve_regs_copy_to_reg_buf (tid
, regcache
);
704 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
707 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
709 int tid
= lwpid_of (current_thread
);
711 /* Update the thread SVE state. aarch64_sve_regs_copy_from_reg_buf
712 handles writing the SVE/FPSIMD state back to thread TID. */
713 aarch64_sve_regs_copy_from_reg_buf (tid
, regcache
);
715 /* We need to return the expected data in BUF, so copy whatever the kernel
716 already has to BUF. */
717 gdb::byte_vector sve_state
= aarch64_fetch_sve_regset (tid
);
718 memcpy (buf
, sve_state
.data (), sve_state
.size ());
721 /* Wrapper for aarch64_za_regs_copy_to_reg_buf, to help copying NT_ARM_ZA
722 state from the thread (BUF) to the register cache. */
725 aarch64_za_regs_copy_to_regcache (struct regcache
*regcache
,
726 ATTRIBUTE_UNUSED
const void *buf
)
728 /* BUF is unused here since we collect the data straight from a ptrace
729 request, therefore bypassing gdbserver's own call to ptrace. */
730 int tid
= lwpid_of (current_thread
);
732 int za_regnum
= find_regno (regcache
->tdesc
, "za");
733 int svg_regnum
= find_regno (regcache
->tdesc
, "svg");
734 int svcr_regnum
= find_regno (regcache
->tdesc
, "svcr");
736 /* Update the register cache. aarch64_za_regs_copy_to_reg_buf handles
737 fetching the NT_ARM_ZA state from thread TID. */
738 aarch64_za_regs_copy_to_reg_buf (tid
, regcache
, za_regnum
, svg_regnum
,
742 /* Wrapper for aarch64_za_regs_copy_from_reg_buf, to help copying NT_ARM_ZA
743 state from the register cache to the thread (BUF). */
746 aarch64_za_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
748 int tid
= lwpid_of (current_thread
);
750 int za_regnum
= find_regno (regcache
->tdesc
, "za");
751 int svg_regnum
= find_regno (regcache
->tdesc
, "svg");
752 int svcr_regnum
= find_regno (regcache
->tdesc
, "svcr");
754 /* Update the thread NT_ARM_ZA state. aarch64_za_regs_copy_from_reg_buf
755 handles writing the ZA state back to thread TID. */
756 aarch64_za_regs_copy_from_reg_buf (tid
, regcache
, za_regnum
, svg_regnum
,
759 /* We need to return the expected data in BUF, so copy whatever the kernel
760 already has to BUF. */
762 /* Obtain a dump of ZA from ptrace. */
763 gdb::byte_vector za_state
= aarch64_fetch_za_regset (tid
);
764 memcpy (buf
, za_state
.data (), za_state
.size ());
767 /* Wrapper for aarch64_zt_regs_copy_to_reg_buf, to help copying NT_ARM_ZT
768 state from the thread (BUF) to the register cache. */
771 aarch64_zt_regs_copy_to_regcache (struct regcache
*regcache
,
772 ATTRIBUTE_UNUSED
const void *buf
)
774 /* BUF is unused here since we collect the data straight from a ptrace
775 request, therefore bypassing gdbserver's own call to ptrace. */
776 int tid
= lwpid_of (current_thread
);
778 int zt_regnum
= find_regno (regcache
->tdesc
, "zt0");
780 /* Update the register cache. aarch64_zt_regs_copy_to_reg_buf handles
781 fetching the NT_ARM_ZT state from thread TID. */
782 aarch64_zt_regs_copy_to_reg_buf (tid
, regcache
, zt_regnum
);
785 /* Wrapper for aarch64_zt_regs_copy_from_reg_buf, to help copying NT_ARM_ZT
786 state from the register cache to the thread (BUF). */
789 aarch64_zt_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
791 int tid
= lwpid_of (current_thread
);
793 int zt_regnum
= find_regno (regcache
->tdesc
, "zt0");
795 /* Update the thread NT_ARM_ZT state. aarch64_zt_regs_copy_from_reg_buf
796 handles writing the ZT state back to thread TID. */
797 aarch64_zt_regs_copy_from_reg_buf (tid
, regcache
, zt_regnum
);
799 /* We need to return the expected data in BUF, so copy whatever the kernel
800 already has to BUF. */
802 /* Obtain a dump of NT_ARM_ZT from ptrace. */
803 gdb::byte_vector zt_state
= aarch64_fetch_zt_regset (tid
);
804 memcpy (buf
, zt_state
.data (), zt_state
.size ());
807 /* Array containing all the possible register sets for AArch64/Linux. During
808 architecture setup, these will be checked against the HWCAP/HWCAP2 bits for
809 validity and enabled/disabled accordingly.
811 Their sizes are set to 0 here, but they will be adjusted later depending
812 on whether each register set is available or not. */
813 static struct regset_info aarch64_regsets
[] =
816 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
818 aarch64_fill_gregset
, aarch64_store_gregset
},
819 /* Floating Point (FPU) registers. */
820 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
822 aarch64_fill_fpregset
, aarch64_store_fpregset
824 /* Scalable Vector Extension (SVE) registers. */
825 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
827 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
829 /* Scalable Matrix Extension (SME) ZA register. */
830 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_ZA
,
832 aarch64_za_regs_copy_from_regcache
, aarch64_za_regs_copy_to_regcache
834 /* Scalable Matrix Extension 2 (SME2) ZT registers. */
835 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_ZT
,
837 aarch64_zt_regs_copy_from_regcache
, aarch64_zt_regs_copy_to_regcache
840 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
842 nullptr, aarch64_store_pauthregset
},
843 /* Tagged address control / MTE registers. */
844 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_TAGGED_ADDR_CTRL
,
846 aarch64_fill_mteregset
, aarch64_store_mteregset
},
848 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_TLS
,
850 aarch64_fill_tlsregset
, aarch64_store_tlsregset
},
854 static struct regsets_info aarch64_regsets_info
=
856 aarch64_regsets
, /* regsets */
858 nullptr, /* disabled_regsets */
861 static struct regs_info regs_info_aarch64
=
863 nullptr, /* regset_bitmap */
864 nullptr, /* usrregs */
865 &aarch64_regsets_info
,
868 /* Given FEATURES, adjust the available register sets by setting their
869 sizes. A size of 0 means the register set is disabled and won't be
873 aarch64_adjust_register_sets (const struct aarch64_features
&features
)
875 struct regset_info
*regset
;
877 for (regset
= aarch64_regsets
; regset
->size
>= 0; regset
++)
879 switch (regset
->nt_type
)
882 /* General purpose registers are always present. */
883 regset
->size
= sizeof (struct user_pt_regs
);
886 /* This is unavailable when SVE is present. */
887 if (features
.vq
== 0)
888 regset
->size
= sizeof (struct user_fpsimd_state
);
892 regset
->size
= SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
);
894 case NT_ARM_PAC_MASK
:
896 regset
->size
= AARCH64_PAUTH_REGS_SIZE
;
898 case NT_ARM_TAGGED_ADDR_CTRL
:
900 regset
->size
= AARCH64_LINUX_SIZEOF_MTE
;
903 if (features
.tls
> 0)
904 regset
->size
= AARCH64_TLS_REGISTER_SIZE
* features
.tls
;
907 if (features
.svq
> 0)
908 regset
->size
= ZA_PT_SIZE (features
.svq
);
912 regset
->size
= AARCH64_SME2_ZT0_SIZE
;
915 gdb_assert_not_reached ("Unknown register set found.");
920 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
921 #define AARCH64_HWCAP_PACA (1 << 30)
923 /* Implementation of linux target ops method "low_arch_setup". */
926 aarch64_target::low_arch_setup ()
928 unsigned int machine
;
932 tid
= lwpid_of (current_thread
);
934 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
938 struct aarch64_features features
;
939 int pid
= current_thread
->id
.pid ();
941 features
.vq
= aarch64_sve_get_vq (tid
);
942 /* A-profile PAC is 64-bit only. */
943 features
.pauth
= linux_get_hwcap (pid
, 8) & AARCH64_HWCAP_PACA
;
944 /* A-profile MTE is 64-bit only. */
945 features
.mte
= linux_get_hwcap2 (pid
, 8) & HWCAP2_MTE
;
946 features
.tls
= aarch64_tls_register_count (tid
);
948 /* Scalable Matrix Extension feature and size check. */
949 if (linux_get_hwcap2 (pid
, 8) & HWCAP2_SME
)
950 features
.svq
= aarch64_za_get_svq (tid
);
952 /* Scalable Matrix Extension 2 feature check. */
953 CORE_ADDR hwcap2
= linux_get_hwcap2 (pid
, 8);
954 if ((hwcap2
& HWCAP2_SME2
) || (hwcap2
& HWCAP2_SME2P1
))
956 /* Make sure ptrace supports NT_ARM_ZT. */
957 features
.sme2
= supports_zt_registers (tid
);
960 current_process ()->tdesc
= aarch64_linux_read_description (features
);
962 /* Adjust the register sets we should use for this particular set of
964 aarch64_adjust_register_sets (features
);
967 current_process ()->tdesc
= aarch32_linux_read_description ();
969 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
972 /* Implementation of linux target ops method "get_regs_info". */
975 aarch64_target::get_regs_info ()
977 if (!is_64bit_tdesc ())
978 return ®s_info_aarch32
;
980 /* AArch64 64-bit registers. */
981 return ®s_info_aarch64
;
984 /* Implementation of target ops method "supports_tracepoints". */
987 aarch64_target::supports_tracepoints ()
989 if (current_thread
== NULL
)
993 /* We don't support tracepoints on aarch32 now. */
994 return is_64bit_tdesc ();
998 /* Implementation of linux target ops method "low_get_thread_area". */
1001 aarch64_target::low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
1006 iovec
.iov_base
= ®
;
1007 iovec
.iov_len
= sizeof (reg
);
1009 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
1018 aarch64_target::low_supports_catch_syscall ()
1023 /* Implementation of linux target ops method "low_get_syscall_trapinfo". */
1026 aarch64_target::low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
)
1028 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1034 collect_register_by_name (regcache
, "x8", &l_sysno
);
1035 *sysno
= (int) l_sysno
;
1038 collect_register_by_name (regcache
, "r7", sysno
);
1041 /* List of condition codes that we need. */
1043 enum aarch64_condition_codes
1054 enum aarch64_operand_type
1060 /* Representation of an operand. At this time, it only supports register
1061 and immediate types. */
1063 struct aarch64_operand
1065 /* Type of the operand. */
1066 enum aarch64_operand_type type
;
1068 /* Value of the operand according to the type. */
1072 struct aarch64_register reg
;
1076 /* List of registers that we are currently using, we can add more here as
1077 we need to use them. */
1079 /* General purpose scratch registers (64 bit). */
1080 static const struct aarch64_register x0
= { 0, 1 };
1081 static const struct aarch64_register x1
= { 1, 1 };
1082 static const struct aarch64_register x2
= { 2, 1 };
1083 static const struct aarch64_register x3
= { 3, 1 };
1084 static const struct aarch64_register x4
= { 4, 1 };
1086 /* General purpose scratch registers (32 bit). */
1087 static const struct aarch64_register w0
= { 0, 0 };
1088 static const struct aarch64_register w2
= { 2, 0 };
1090 /* Intra-procedure scratch registers. */
1091 static const struct aarch64_register ip0
= { 16, 1 };
1093 /* Special purpose registers. */
1094 static const struct aarch64_register fp
= { 29, 1 };
1095 static const struct aarch64_register lr
= { 30, 1 };
1096 static const struct aarch64_register sp
= { 31, 1 };
1097 static const struct aarch64_register xzr
= { 31, 1 };
1099 /* Dynamically allocate a new register. If we know the register
1100 statically, we should make it a global as above instead of using this
1103 static struct aarch64_register
1104 aarch64_register (unsigned num
, int is64
)
1106 return (struct aarch64_register
) { num
, is64
};
1109 /* Helper function to create a register operand, for instructions with
1110 different types of operands.
1113 p += emit_mov (p, x0, register_operand (x1)); */
1115 static struct aarch64_operand
1116 register_operand (struct aarch64_register reg
)
1118 struct aarch64_operand operand
;
1120 operand
.type
= OPERAND_REGISTER
;
1126 /* Helper function to create an immediate operand, for instructions with
1127 different types of operands.
1130 p += emit_mov (p, x0, immediate_operand (12)); */
1132 static struct aarch64_operand
1133 immediate_operand (uint32_t imm
)
1135 struct aarch64_operand operand
;
1137 operand
.type
= OPERAND_IMMEDIATE
;
1143 /* Helper function to create an offset memory operand.
1146 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
1148 static struct aarch64_memory_operand
1149 offset_memory_operand (int32_t offset
)
1151 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
1154 /* Helper function to create a pre-index memory operand.
1157 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
1159 static struct aarch64_memory_operand
1160 preindex_memory_operand (int32_t index
)
1162 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
1165 /* Helper function to create a post-index memory operand.
1168 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
1170 static struct aarch64_memory_operand
1171 postindex_memory_operand (int32_t index
)
1173 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
1176 /* System control registers. These special registers can be written and
1177 read with the MRS and MSR instructions.
1179 - NZCV: Condition flags. GDB refers to this register under the CPSR
1181 - FPSR: Floating-point status register.
1182 - FPCR: Floating-point control registers.
1183 - TPIDR_EL0: Software thread ID register. */
1185 enum aarch64_system_control_registers
1187 /* op0 op1 crn crm op2 */
1188 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
1189 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
1190 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
1191 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
1194 /* Write a BLR instruction into *BUF.
1198 RN is the register to branch to. */
1201 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
1203 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
1206 /* Write a RET instruction into *BUF.
1210 RN is the register to branch to. */
1213 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
1215 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
1219 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
1220 struct aarch64_register rt
,
1221 struct aarch64_register rt2
,
1222 struct aarch64_register rn
,
1223 struct aarch64_memory_operand operand
)
1227 uint32_t write_back
;
1230 opc
= ENCODE (2, 2, 30);
1232 opc
= ENCODE (0, 2, 30);
1234 switch (operand
.type
)
1236 case MEMORY_OPERAND_OFFSET
:
1238 pre_index
= ENCODE (1, 1, 24);
1239 write_back
= ENCODE (0, 1, 23);
1242 case MEMORY_OPERAND_POSTINDEX
:
1244 pre_index
= ENCODE (0, 1, 24);
1245 write_back
= ENCODE (1, 1, 23);
1248 case MEMORY_OPERAND_PREINDEX
:
1250 pre_index
= ENCODE (1, 1, 24);
1251 write_back
= ENCODE (1, 1, 23);
1258 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
1259 | ENCODE (operand
.index
>> 3, 7, 15)
1260 | ENCODE (rt2
.num
, 5, 10)
1261 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1264 /* Write a STP instruction into *BUF.
1266 STP rt, rt2, [rn, #offset]
1267 STP rt, rt2, [rn, #index]!
1268 STP rt, rt2, [rn], #index
1270 RT and RT2 are the registers to store.
1271 RN is the base address register.
1272 OFFSET is the immediate to add to the base address. It is limited to a
1273 -512 .. 504 range (7 bits << 3). */
1276 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
1277 struct aarch64_register rt2
, struct aarch64_register rn
,
1278 struct aarch64_memory_operand operand
)
1280 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
1283 /* Write a LDP instruction into *BUF.
1285 LDP rt, rt2, [rn, #offset]
1286 LDP rt, rt2, [rn, #index]!
1287 LDP rt, rt2, [rn], #index
1289 RT and RT2 are the registers to store.
1290 RN is the base address register.
1291 OFFSET is the immediate to add to the base address. It is limited to a
1292 -512 .. 504 range (7 bits << 3). */
1295 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
1296 struct aarch64_register rt2
, struct aarch64_register rn
,
1297 struct aarch64_memory_operand operand
)
1299 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
1302 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1304 LDP qt, qt2, [rn, #offset]
1306 RT and RT2 are the Q registers to store.
1307 RN is the base address register.
1308 OFFSET is the immediate to add to the base address. It is limited to
1309 -1024 .. 1008 range (7 bits << 4). */
1312 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1313 struct aarch64_register rn
, int32_t offset
)
1315 uint32_t opc
= ENCODE (2, 2, 30);
1316 uint32_t pre_index
= ENCODE (1, 1, 24);
1318 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
1319 | ENCODE (offset
>> 4, 7, 15)
1320 | ENCODE (rt2
, 5, 10)
1321 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1324 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1326 STP qt, qt2, [rn, #offset]
1328 RT and RT2 are the Q registers to store.
1329 RN is the base address register.
1330 OFFSET is the immediate to add to the base address. It is limited to
1331 -1024 .. 1008 range (7 bits << 4). */
1334 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1335 struct aarch64_register rn
, int32_t offset
)
1337 uint32_t opc
= ENCODE (2, 2, 30);
1338 uint32_t pre_index
= ENCODE (1, 1, 24);
1340 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
1341 | ENCODE (offset
>> 4, 7, 15)
1342 | ENCODE (rt2
, 5, 10)
1343 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1346 /* Write a LDRH instruction into *BUF.
1348 LDRH wt, [xn, #offset]
1349 LDRH wt, [xn, #index]!
1350 LDRH wt, [xn], #index
1352 RT is the register to store.
1353 RN is the base address register.
1354 OFFSET is the immediate to add to the base address. It is limited to
1355 0 .. 32760 range (12 bits << 3). */
1358 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
1359 struct aarch64_register rn
,
1360 struct aarch64_memory_operand operand
)
1362 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
1365 /* Write a LDRB instruction into *BUF.
1367 LDRB wt, [xn, #offset]
1368 LDRB wt, [xn, #index]!
1369 LDRB wt, [xn], #index
1371 RT is the register to store.
1372 RN is the base address register.
1373 OFFSET is the immediate to add to the base address. It is limited to
1374 0 .. 32760 range (12 bits << 3). */
1377 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1378 struct aarch64_register rn
,
1379 struct aarch64_memory_operand operand
)
1381 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1386 /* Write a STR instruction into *BUF.
1388 STR rt, [rn, #offset]
1389 STR rt, [rn, #index]!
1390 STR rt, [rn], #index
1392 RT is the register to store.
1393 RN is the base address register.
1394 OFFSET is the immediate to add to the base address. It is limited to
1395 0 .. 32760 range (12 bits << 3). */
1398 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1399 struct aarch64_register rn
,
1400 struct aarch64_memory_operand operand
)
1402 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1405 /* Helper function emitting an exclusive load or store instruction. */
1408 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1409 enum aarch64_opcodes opcode
,
1410 struct aarch64_register rs
,
1411 struct aarch64_register rt
,
1412 struct aarch64_register rt2
,
1413 struct aarch64_register rn
)
1415 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1416 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1417 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1420 /* Write a LAXR instruction into *BUF.
1424 RT is the destination register.
1425 RN is the base address register. */
1428 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1429 struct aarch64_register rn
)
1431 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1435 /* Write a STXR instruction into *BUF.
1439 RS is the result register, it indicates if the store succeeded or not.
1440 RT is the destination register.
1441 RN is the base address register. */
1444 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1445 struct aarch64_register rt
, struct aarch64_register rn
)
1447 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1451 /* Write a STLR instruction into *BUF.
1455 RT is the register to store.
1456 RN is the base address register. */
1459 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1460 struct aarch64_register rn
)
1462 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1466 /* Helper function for data processing instructions with register sources. */
1469 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1470 struct aarch64_register rd
,
1471 struct aarch64_register rn
,
1472 struct aarch64_register rm
)
1474 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1476 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1477 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1480 /* Helper function for data processing instructions taking either a register
1484 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1485 struct aarch64_register rd
,
1486 struct aarch64_register rn
,
1487 struct aarch64_operand operand
)
1489 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1490 /* The opcode is different for register and immediate source operands. */
1491 uint32_t operand_opcode
;
1493 if (operand
.type
== OPERAND_IMMEDIATE
)
1495 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1496 operand_opcode
= ENCODE (8, 4, 25);
1498 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1499 | ENCODE (operand
.imm
, 12, 10)
1500 | ENCODE (rn
.num
, 5, 5)
1501 | ENCODE (rd
.num
, 5, 0));
1505 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1506 operand_opcode
= ENCODE (5, 4, 25);
1508 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1513 /* Write an ADD instruction into *BUF.
1518 This function handles both an immediate and register add.
1520 RD is the destination register.
1521 RN is the input register.
1522 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1523 OPERAND_REGISTER. */
1526 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1527 struct aarch64_register rn
, struct aarch64_operand operand
)
1529 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1532 /* Write a SUB instruction into *BUF.
1537 This function handles both an immediate and register sub.
1539 RD is the destination register.
1540 RN is the input register.
1541 IMM is the immediate to substract to RN. */
1544 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1545 struct aarch64_register rn
, struct aarch64_operand operand
)
1547 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1550 /* Write a MOV instruction into *BUF.
1555 This function handles both a wide immediate move and a register move,
1556 with the condition that the source register is not xzr. xzr and the
1557 stack pointer share the same encoding and this function only supports
1560 RD is the destination register.
1561 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1562 OPERAND_REGISTER. */
1565 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1566 struct aarch64_operand operand
)
1568 if (operand
.type
== OPERAND_IMMEDIATE
)
1570 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1571 /* Do not shift the immediate. */
1572 uint32_t shift
= ENCODE (0, 2, 21);
1574 return aarch64_emit_insn (buf
, MOV
| size
| shift
1575 | ENCODE (operand
.imm
, 16, 5)
1576 | ENCODE (rd
.num
, 5, 0));
1579 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1582 /* Write a MOVK instruction into *BUF.
1584 MOVK rd, #imm, lsl #shift
1586 RD is the destination register.
1587 IMM is the immediate.
1588 SHIFT is the logical shift left to apply to IMM. */
1591 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1594 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1596 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1597 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1600 /* Write instructions into *BUF in order to move ADDR into a register.
1601 ADDR can be a 64-bit value.
1603 This function will emit a series of MOV and MOVK instructions, such as:
1606 MOVK xd, #(addr >> 16), lsl #16
1607 MOVK xd, #(addr >> 32), lsl #32
1608 MOVK xd, #(addr >> 48), lsl #48 */
1611 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1615 /* The MOV (wide immediate) instruction clears to top bits of the
1617 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1619 if ((addr
>> 16) != 0)
1620 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1624 if ((addr
>> 32) != 0)
1625 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1629 if ((addr
>> 48) != 0)
1630 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1635 /* Write a SUBS instruction into *BUF.
1639 This instruction update the condition flags.
1641 RD is the destination register.
1642 RN and RM are the source registers. */
1645 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1646 struct aarch64_register rn
, struct aarch64_operand operand
)
1648 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1651 /* Write a CMP instruction into *BUF.
1655 This instruction is an alias of SUBS xzr, rn, rm.
1657 RN and RM are the registers to compare. */
1660 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1661 struct aarch64_operand operand
)
1663 return emit_subs (buf
, xzr
, rn
, operand
);
1666 /* Write a AND instruction into *BUF.
1670 RD is the destination register.
1671 RN and RM are the source registers. */
1674 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1675 struct aarch64_register rn
, struct aarch64_register rm
)
1677 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1680 /* Write a ORR instruction into *BUF.
1684 RD is the destination register.
1685 RN and RM are the source registers. */
1688 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1689 struct aarch64_register rn
, struct aarch64_register rm
)
1691 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1694 /* Write a ORN instruction into *BUF.
1698 RD is the destination register.
1699 RN and RM are the source registers. */
1702 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1703 struct aarch64_register rn
, struct aarch64_register rm
)
1705 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1708 /* Write a EOR instruction into *BUF.
1712 RD is the destination register.
1713 RN and RM are the source registers. */
1716 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1717 struct aarch64_register rn
, struct aarch64_register rm
)
1719 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1722 /* Write a MVN instruction into *BUF.
1726 This is an alias for ORN rd, xzr, rm.
1728 RD is the destination register.
1729 RM is the source register. */
1732 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1733 struct aarch64_register rm
)
1735 return emit_orn (buf
, rd
, xzr
, rm
);
1738 /* Write a LSLV instruction into *BUF.
1742 RD is the destination register.
1743 RN and RM are the source registers. */
1746 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1747 struct aarch64_register rn
, struct aarch64_register rm
)
1749 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1752 /* Write a LSRV instruction into *BUF.
1756 RD is the destination register.
1757 RN and RM are the source registers. */
1760 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1761 struct aarch64_register rn
, struct aarch64_register rm
)
1763 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1766 /* Write a ASRV instruction into *BUF.
1770 RD is the destination register.
1771 RN and RM are the source registers. */
1774 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1775 struct aarch64_register rn
, struct aarch64_register rm
)
1777 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1780 /* Write a MUL instruction into *BUF.
1784 RD is the destination register.
1785 RN and RM are the source registers. */
1788 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1789 struct aarch64_register rn
, struct aarch64_register rm
)
1791 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1794 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1798 RT is the destination register.
1799 SYSTEM_REG is special purpose register to read. */
1802 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1803 enum aarch64_system_control_registers system_reg
)
1805 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1806 | ENCODE (rt
.num
, 5, 0));
1809 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1813 SYSTEM_REG is special purpose register to write.
1814 RT is the input register. */
1817 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1818 struct aarch64_register rt
)
1820 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1821 | ENCODE (rt
.num
, 5, 0));
1824 /* Write a SEVL instruction into *BUF.
1826 This is a hint instruction telling the hardware to trigger an event. */
1829 emit_sevl (uint32_t *buf
)
1831 return aarch64_emit_insn (buf
, SEVL
);
1834 /* Write a WFE instruction into *BUF.
1836 This is a hint instruction telling the hardware to wait for an event. */
1839 emit_wfe (uint32_t *buf
)
1841 return aarch64_emit_insn (buf
, WFE
);
1844 /* Write a SBFM instruction into *BUF.
1846 SBFM rd, rn, #immr, #imms
1848 This instruction moves the bits from #immr to #imms into the
1849 destination, sign extending the result.
1851 RD is the destination register.
1852 RN is the source register.
1853 IMMR is the bit number to start at (least significant bit).
1854 IMMS is the bit number to stop at (most significant bit). */
1857 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1858 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1860 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1861 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1863 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1864 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1865 | ENCODE (rd
.num
, 5, 0));
1868 /* Write a SBFX instruction into *BUF.
1870 SBFX rd, rn, #lsb, #width
1872 This instruction moves #width bits from #lsb into the destination, sign
1873 extending the result. This is an alias for:
1875 SBFM rd, rn, #lsb, #(lsb + width - 1)
1877 RD is the destination register.
1878 RN is the source register.
1879 LSB is the bit number to start at (least significant bit).
1880 WIDTH is the number of bits to move. */
1883 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1884 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1886 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1889 /* Write a UBFM instruction into *BUF.
1891 UBFM rd, rn, #immr, #imms
1893 This instruction moves the bits from #immr to #imms into the
1894 destination, extending the result with zeros.
1896 RD is the destination register.
1897 RN is the source register.
1898 IMMR is the bit number to start at (least significant bit).
1899 IMMS is the bit number to stop at (most significant bit). */
1902 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1903 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1905 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1906 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1908 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1909 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1910 | ENCODE (rd
.num
, 5, 0));
1913 /* Write a UBFX instruction into *BUF.
1915 UBFX rd, rn, #lsb, #width
1917 This instruction moves #width bits from #lsb into the destination,
1918 extending the result with zeros. This is an alias for:
1920 UBFM rd, rn, #lsb, #(lsb + width - 1)
1922 RD is the destination register.
1923 RN is the source register.
1924 LSB is the bit number to start at (least significant bit).
1925 WIDTH is the number of bits to move. */
1928 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1929 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1931 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1934 /* Write a CSINC instruction into *BUF.
1936 CSINC rd, rn, rm, cond
1938 This instruction conditionally increments rn or rm and places the result
1939 in rd. rn is chosen is the condition is true.
1941 RD is the destination register.
1942 RN and RM are the source registers.
1943 COND is the encoded condition. */
1946 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1947 struct aarch64_register rn
, struct aarch64_register rm
,
1950 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1952 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1953 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1954 | ENCODE (rd
.num
, 5, 0));
1957 /* Write a CSET instruction into *BUF.
1961 This instruction conditionally write 1 or 0 in the destination register.
1962 1 is written if the condition is true. This is an alias for:
1964 CSINC rd, xzr, xzr, !cond
1966 Note that the condition needs to be inverted.
1968 RD is the destination register.
1969 RN and RM are the source registers.
1970 COND is the encoded condition. */
1973 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1975 /* The least significant bit of the condition needs toggling in order to
1977 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1980 /* Write LEN instructions from BUF into the inferior memory at *TO.
1982 Note instructions are always little endian on AArch64, unlike data. */
1985 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1987 size_t byte_len
= len
* sizeof (uint32_t);
1988 #if (__BYTE_ORDER == __BIG_ENDIAN)
1989 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1992 for (i
= 0; i
< len
; i
++)
1993 le_buf
[i
] = htole32 (buf
[i
]);
1995 target_write_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1999 target_write_memory (*to
, (const unsigned char *) buf
, byte_len
);
2005 /* Sub-class of struct aarch64_insn_data, store information of
2006 instruction relocation for fast tracepoint. Visitor can
2007 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
2008 the relocated instructions in buffer pointed by INSN_PTR. */
2010 struct aarch64_insn_relocation_data
2012 struct aarch64_insn_data base
;
2014 /* The new address the instruction is relocated to. */
2016 /* Pointer to the buffer of relocated instruction(s). */
2020 /* Implementation of aarch64_insn_visitor method "b". */
2023 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
2024 struct aarch64_insn_data
*data
)
2026 struct aarch64_insn_relocation_data
*insn_reloc
2027 = (struct aarch64_insn_relocation_data
*) data
;
2029 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
2031 if (can_encode_int32 (new_offset
, 28))
2032 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
2035 /* Implementation of aarch64_insn_visitor method "b_cond". */
2038 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
2039 struct aarch64_insn_data
*data
)
2041 struct aarch64_insn_relocation_data
*insn_reloc
2042 = (struct aarch64_insn_relocation_data
*) data
;
2044 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
2046 if (can_encode_int32 (new_offset
, 21))
2048 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
2051 else if (can_encode_int32 (new_offset
, 28))
2053 /* The offset is out of range for a conditional branch
2054 instruction but not for a unconditional branch. We can use
2055 the following instructions instead:
2057 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2058 B NOT_TAKEN ; Else jump over TAKEN and continue.
2065 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
2066 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
2067 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
2071 /* Implementation of aarch64_insn_visitor method "cb". */
2074 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
2075 const unsigned rn
, int is64
,
2076 struct aarch64_insn_data
*data
)
2078 struct aarch64_insn_relocation_data
*insn_reloc
2079 = (struct aarch64_insn_relocation_data
*) data
;
2081 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
2083 if (can_encode_int32 (new_offset
, 21))
2085 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
2086 aarch64_register (rn
, is64
), new_offset
);
2088 else if (can_encode_int32 (new_offset
, 28))
2090 /* The offset is out of range for a compare and branch
2091 instruction but not for a unconditional branch. We can use
2092 the following instructions instead:
2094 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2095 B NOT_TAKEN ; Else jump over TAKEN and continue.
2101 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
2102 aarch64_register (rn
, is64
), 8);
2103 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
2104 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
2108 /* Implementation of aarch64_insn_visitor method "tb". */
2111 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
2112 const unsigned rt
, unsigned bit
,
2113 struct aarch64_insn_data
*data
)
2115 struct aarch64_insn_relocation_data
*insn_reloc
2116 = (struct aarch64_insn_relocation_data
*) data
;
2118 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
2120 if (can_encode_int32 (new_offset
, 16))
2122 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
2123 aarch64_register (rt
, 1), new_offset
);
2125 else if (can_encode_int32 (new_offset
, 28))
2127 /* The offset is out of range for a test bit and branch
2128 instruction but not for a unconditional branch. We can use
2129 the following instructions instead:
2131 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2132 B NOT_TAKEN ; Else jump over TAKEN and continue.
2138 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
2139 aarch64_register (rt
, 1), 8);
2140 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
2141 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
2146 /* Implementation of aarch64_insn_visitor method "adr". */
2149 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
2151 struct aarch64_insn_data
*data
)
2153 struct aarch64_insn_relocation_data
*insn_reloc
2154 = (struct aarch64_insn_relocation_data
*) data
;
2155 /* We know exactly the address the ADR{P,} instruction will compute.
2156 We can just write it to the destination register. */
2157 CORE_ADDR address
= data
->insn_addr
+ offset
;
2161 /* Clear the lower 12 bits of the offset to get the 4K page. */
2162 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
2163 aarch64_register (rd
, 1),
2167 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
2168 aarch64_register (rd
, 1), address
);
2171 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2174 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
2175 const unsigned rt
, const int is64
,
2176 struct aarch64_insn_data
*data
)
2178 struct aarch64_insn_relocation_data
*insn_reloc
2179 = (struct aarch64_insn_relocation_data
*) data
;
2180 CORE_ADDR address
= data
->insn_addr
+ offset
;
2182 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
2183 aarch64_register (rt
, 1), address
);
2185 /* We know exactly what address to load from, and what register we
2188 MOV xd, #(oldloc + offset)
2189 MOVK xd, #((oldloc + offset) >> 16), lsl #16
2192 LDR xd, [xd] ; or LDRSW xd, [xd]
2197 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
2198 aarch64_register (rt
, 1),
2199 aarch64_register (rt
, 1),
2200 offset_memory_operand (0));
2202 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
2203 aarch64_register (rt
, is64
),
2204 aarch64_register (rt
, 1),
2205 offset_memory_operand (0));
2208 /* Implementation of aarch64_insn_visitor method "others". */
2211 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
2212 struct aarch64_insn_data
*data
)
2214 struct aarch64_insn_relocation_data
*insn_reloc
2215 = (struct aarch64_insn_relocation_data
*) data
;
2217 /* The instruction is not PC relative. Just re-emit it at the new
2219 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
2222 static const struct aarch64_insn_visitor visitor
=
2224 aarch64_ftrace_insn_reloc_b
,
2225 aarch64_ftrace_insn_reloc_b_cond
,
2226 aarch64_ftrace_insn_reloc_cb
,
2227 aarch64_ftrace_insn_reloc_tb
,
2228 aarch64_ftrace_insn_reloc_adr
,
2229 aarch64_ftrace_insn_reloc_ldr_literal
,
2230 aarch64_ftrace_insn_reloc_others
,
2234 aarch64_target::supports_fast_tracepoints ()
2239 /* Implementation of target ops method
2240 "install_fast_tracepoint_jump_pad". */
2243 aarch64_target::install_fast_tracepoint_jump_pad
2244 (CORE_ADDR tpoint
, CORE_ADDR tpaddr
, CORE_ADDR collector
,
2245 CORE_ADDR lockaddr
, ULONGEST orig_size
, CORE_ADDR
*jump_entry
,
2246 CORE_ADDR
*trampoline
, ULONGEST
*trampoline_size
,
2247 unsigned char *jjump_pad_insn
, ULONGEST
*jjump_pad_insn_size
,
2248 CORE_ADDR
*adjusted_insn_addr
, CORE_ADDR
*adjusted_insn_addr_end
,
2256 CORE_ADDR buildaddr
= *jump_entry
;
2257 struct aarch64_insn_relocation_data insn_data
;
2259 /* We need to save the current state on the stack both to restore it
2260 later and to collect register values when the tracepoint is hit.
2262 The saved registers are pushed in a layout that needs to be in sync
2263 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
2264 the supply_fast_tracepoint_registers function will fill in the
2265 register cache from a pointer to saved registers on the stack we build
2268 For simplicity, we set the size of each cell on the stack to 16 bytes.
2269 This way one cell can hold any register type, from system registers
2270 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
2271 has to be 16 bytes aligned anyway.
2273 Note that the CPSR register does not exist on AArch64. Instead we
2274 can access system bits describing the process state with the
2275 MRS/MSR instructions, namely the condition flags. We save them as
2276 if they are part of a CPSR register because that's how GDB
2277 interprets these system bits. At the moment, only the condition
2278 flags are saved in CPSR (NZCV).
2280 Stack layout, each cell is 16 bytes (descending):
2282 High *-------- SIMD&FP registers from 31 down to 0. --------*
2288 *---- General purpose registers from 30 down to 0. ----*
2294 *------------- Special purpose registers. -------------*
2297 | CPSR (NZCV) | 5 cells
2300 *------------- collecting_t object --------------------*
2301 | TPIDR_EL0 | struct tracepoint * |
2302 Low *------------------------------------------------------*
2304 After this stack is set up, we issue a call to the collector, passing
2305 it the saved registers at (SP + 16). */
2307 /* Push SIMD&FP registers on the stack:
2309 SUB sp, sp, #(32 * 16)
2311 STP q30, q31, [sp, #(30 * 16)]
2316 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
2317 for (i
= 30; i
>= 0; i
-= 2)
2318 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2320 /* Push general purpose registers on the stack. Note that we do not need
2321 to push x31 as it represents the xzr register and not the stack
2322 pointer in a STR instruction.
2324 SUB sp, sp, #(31 * 16)
2326 STR x30, [sp, #(30 * 16)]
2331 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
2332 for (i
= 30; i
>= 0; i
-= 1)
2333 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
2334 offset_memory_operand (i
* 16));
2336 /* Make space for 5 more cells.
2338 SUB sp, sp, #(5 * 16)
2341 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
2346 ADD x4, sp, #((32 + 31 + 5) * 16)
2347 STR x4, [sp, #(4 * 16)]
2350 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
2351 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
2353 /* Save PC (tracepoint address):
2358 STR x3, [sp, #(3 * 16)]
2362 p
+= emit_mov_addr (p
, x3
, tpaddr
);
2363 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
2365 /* Save CPSR (NZCV), FPSR and FPCR:
2371 STR x2, [sp, #(2 * 16)]
2372 STR x1, [sp, #(1 * 16)]
2373 STR x0, [sp, #(0 * 16)]
2376 p
+= emit_mrs (p
, x2
, NZCV
);
2377 p
+= emit_mrs (p
, x1
, FPSR
);
2378 p
+= emit_mrs (p
, x0
, FPCR
);
2379 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2380 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2381 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2383 /* Push the collecting_t object. It consist of the address of the
2384 tracepoint and an ID for the current thread. We get the latter by
2385 reading the tpidr_el0 system register. It corresponds to the
2386 NT_ARM_TLS register accessible with ptrace.
2393 STP x0, x1, [sp, #-16]!
2397 p
+= emit_mov_addr (p
, x0
, tpoint
);
2398 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2399 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2403 The shared memory for the lock is at lockaddr. It will hold zero
2404 if no-one is holding the lock, otherwise it contains the address of
2405 the collecting_t object on the stack of the thread which acquired it.
2407 At this stage, the stack pointer points to this thread's collecting_t
2410 We use the following registers:
2411 - x0: Address of the lock.
2412 - x1: Pointer to collecting_t object.
2413 - x2: Scratch register.
2419 ; Trigger an event local to this core. So the following WFE
2420 ; instruction is ignored.
2423 ; Wait for an event. The event is triggered by either the SEVL
2424 ; or STLR instructions (store release).
2427 ; Atomically read at lockaddr. This marks the memory location as
2428 ; exclusive. This instruction also has memory constraints which
2429 ; make sure all previous data reads and writes are done before
2433 ; Try again if another thread holds the lock.
2436 ; We can lock it! Write the address of the collecting_t object.
2437 ; This instruction will fail if the memory location is not marked
2438 ; as exclusive anymore. If it succeeds, it will remove the
2439 ; exclusive mark on the memory location. This way, if another
2440 ; thread executes this instruction before us, we will fail and try
2447 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2448 p
+= emit_mov (p
, x1
, register_operand (sp
));
2452 p
+= emit_ldaxr (p
, x2
, x0
);
2453 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2454 p
+= emit_stxr (p
, w2
, x1
, x0
);
2455 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2457 /* Call collector (struct tracepoint *, unsigned char *):
2462 ; Saved registers start after the collecting_t object.
2465 ; We use an intra-procedure-call scratch register.
2466 MOV ip0, #(collector)
2469 ; And call back to C!
2474 p
+= emit_mov_addr (p
, x0
, tpoint
);
2475 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2477 p
+= emit_mov_addr (p
, ip0
, collector
);
2478 p
+= emit_blr (p
, ip0
);
2480 /* Release the lock.
2485 ; This instruction is a normal store with memory ordering
2486 ; constraints. Thanks to this we do not have to put a data
2487 ; barrier instruction to make sure all data read and writes are done
2488 ; before this instruction is executed. Furthermore, this instruction
2489 ; will trigger an event, letting other threads know they can grab
2494 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2495 p
+= emit_stlr (p
, xzr
, x0
);
2497 /* Free collecting_t object:
2502 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2504 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2505 registers from the stack.
2507 LDR x2, [sp, #(2 * 16)]
2508 LDR x1, [sp, #(1 * 16)]
2509 LDR x0, [sp, #(0 * 16)]
2515 ADD sp, sp #(5 * 16)
2518 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2519 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2520 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2521 p
+= emit_msr (p
, NZCV
, x2
);
2522 p
+= emit_msr (p
, FPSR
, x1
);
2523 p
+= emit_msr (p
, FPCR
, x0
);
2525 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2527 /* Pop general purpose registers:
2531 LDR x30, [sp, #(30 * 16)]
2533 ADD sp, sp, #(31 * 16)
2536 for (i
= 0; i
<= 30; i
+= 1)
2537 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2538 offset_memory_operand (i
* 16));
2539 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2541 /* Pop SIMD&FP registers:
2545 LDP q30, q31, [sp, #(30 * 16)]
2547 ADD sp, sp, #(32 * 16)
2550 for (i
= 0; i
<= 30; i
+= 2)
2551 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2552 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2554 /* Write the code into the inferior memory. */
2555 append_insns (&buildaddr
, p
- buf
, buf
);
2557 /* Now emit the relocated instruction. */
2558 *adjusted_insn_addr
= buildaddr
;
2559 target_read_uint32 (tpaddr
, &insn
);
2561 insn_data
.base
.insn_addr
= tpaddr
;
2562 insn_data
.new_addr
= buildaddr
;
2563 insn_data
.insn_ptr
= buf
;
2565 aarch64_relocate_instruction (insn
, &visitor
,
2566 (struct aarch64_insn_data
*) &insn_data
);
2568 /* We may not have been able to relocate the instruction. */
2569 if (insn_data
.insn_ptr
== buf
)
2572 "E.Could not relocate instruction from %s to %s.",
2573 core_addr_to_string_nz (tpaddr
),
2574 core_addr_to_string_nz (buildaddr
));
2578 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2579 *adjusted_insn_addr_end
= buildaddr
;
2581 /* Go back to the start of the buffer. */
2584 /* Emit a branch back from the jump pad. */
2585 offset
= (tpaddr
+ orig_size
- buildaddr
);
2586 if (!can_encode_int32 (offset
, 28))
2589 "E.Jump back from jump pad too far from tracepoint "
2590 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2595 p
+= emit_b (p
, 0, offset
);
2596 append_insns (&buildaddr
, p
- buf
, buf
);
2598 /* Give the caller a branch instruction into the jump pad. */
2599 offset
= (*jump_entry
- tpaddr
);
2600 if (!can_encode_int32 (offset
, 28))
2603 "E.Jump pad too far from tracepoint "
2604 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2609 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2610 *jjump_pad_insn_size
= 4;
2612 /* Return the end address of our pad. */
2613 *jump_entry
= buildaddr
;
2618 /* Helper function writing LEN instructions from START into
2619 current_insn_ptr. */
2622 emit_ops_insns (const uint32_t *start
, int len
)
2624 CORE_ADDR buildaddr
= current_insn_ptr
;
2626 threads_debug_printf ("Adding %d instructions at %s",
2627 len
, paddress (buildaddr
));
2629 append_insns (&buildaddr
, len
, start
);
2630 current_insn_ptr
= buildaddr
;
2633 /* Pop a register from the stack. */
2636 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2638 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2641 /* Push a register on the stack. */
2644 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2646 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2649 /* Implementation of emit_ops method "emit_prologue". */
2652 aarch64_emit_prologue (void)
2657 /* This function emit a prologue for the following function prototype:
2659 enum eval_result_type f (unsigned char *regs,
2662 The first argument is a buffer of raw registers. The second
2663 argument is the result of
2664 evaluating the expression, which will be set to whatever is on top of
2665 the stack at the end.
2667 The stack set up by the prologue is as such:
2669 High *------------------------------------------------------*
2672 | x1 (ULONGEST *value) |
2673 | x0 (unsigned char *regs) |
2674 Low *------------------------------------------------------*
2676 As we are implementing a stack machine, each opcode can expand the
2677 stack so we never know how far we are from the data saved by this
2678 prologue. In order to be able refer to value and regs later, we save
2679 the current stack pointer in the frame pointer. This way, it is not
2680 clobbered when calling C functions.
2682 Finally, throughout every operation, we are using register x0 as the
2683 top of the stack, and x1 as a scratch register. */
2685 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2686 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2687 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2689 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2692 emit_ops_insns (buf
, p
- buf
);
2695 /* Implementation of emit_ops method "emit_epilogue". */
2698 aarch64_emit_epilogue (void)
2703 /* Store the result of the expression (x0) in *value. */
2704 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2705 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2706 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2708 /* Restore the previous state. */
2709 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2710 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2712 /* Return expr_eval_no_error. */
2713 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2714 p
+= emit_ret (p
, lr
);
2716 emit_ops_insns (buf
, p
- buf
);
2719 /* Implementation of emit_ops method "emit_add". */
2722 aarch64_emit_add (void)
2727 p
+= emit_pop (p
, x1
);
2728 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2730 emit_ops_insns (buf
, p
- buf
);
2733 /* Implementation of emit_ops method "emit_sub". */
2736 aarch64_emit_sub (void)
2741 p
+= emit_pop (p
, x1
);
2742 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2744 emit_ops_insns (buf
, p
- buf
);
2747 /* Implementation of emit_ops method "emit_mul". */
2750 aarch64_emit_mul (void)
2755 p
+= emit_pop (p
, x1
);
2756 p
+= emit_mul (p
, x0
, x1
, x0
);
2758 emit_ops_insns (buf
, p
- buf
);
2761 /* Implementation of emit_ops method "emit_lsh". */
2764 aarch64_emit_lsh (void)
2769 p
+= emit_pop (p
, x1
);
2770 p
+= emit_lslv (p
, x0
, x1
, x0
);
2772 emit_ops_insns (buf
, p
- buf
);
2775 /* Implementation of emit_ops method "emit_rsh_signed". */
2778 aarch64_emit_rsh_signed (void)
2783 p
+= emit_pop (p
, x1
);
2784 p
+= emit_asrv (p
, x0
, x1
, x0
);
2786 emit_ops_insns (buf
, p
- buf
);
2789 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2792 aarch64_emit_rsh_unsigned (void)
2797 p
+= emit_pop (p
, x1
);
2798 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2800 emit_ops_insns (buf
, p
- buf
);
2803 /* Implementation of emit_ops method "emit_ext". */
2806 aarch64_emit_ext (int arg
)
2811 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2813 emit_ops_insns (buf
, p
- buf
);
2816 /* Implementation of emit_ops method "emit_log_not". */
2819 aarch64_emit_log_not (void)
2824 /* If the top of the stack is 0, replace it with 1. Else replace it with
2827 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2828 p
+= emit_cset (p
, x0
, EQ
);
2830 emit_ops_insns (buf
, p
- buf
);
2833 /* Implementation of emit_ops method "emit_bit_and". */
2836 aarch64_emit_bit_and (void)
2841 p
+= emit_pop (p
, x1
);
2842 p
+= emit_and (p
, x0
, x0
, x1
);
2844 emit_ops_insns (buf
, p
- buf
);
2847 /* Implementation of emit_ops method "emit_bit_or". */
2850 aarch64_emit_bit_or (void)
2855 p
+= emit_pop (p
, x1
);
2856 p
+= emit_orr (p
, x0
, x0
, x1
);
2858 emit_ops_insns (buf
, p
- buf
);
2861 /* Implementation of emit_ops method "emit_bit_xor". */
2864 aarch64_emit_bit_xor (void)
2869 p
+= emit_pop (p
, x1
);
2870 p
+= emit_eor (p
, x0
, x0
, x1
);
2872 emit_ops_insns (buf
, p
- buf
);
2875 /* Implementation of emit_ops method "emit_bit_not". */
2878 aarch64_emit_bit_not (void)
2883 p
+= emit_mvn (p
, x0
, x0
);
2885 emit_ops_insns (buf
, p
- buf
);
2888 /* Implementation of emit_ops method "emit_equal". */
2891 aarch64_emit_equal (void)
2896 p
+= emit_pop (p
, x1
);
2897 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2898 p
+= emit_cset (p
, x0
, EQ
);
2900 emit_ops_insns (buf
, p
- buf
);
2903 /* Implementation of emit_ops method "emit_less_signed". */
2906 aarch64_emit_less_signed (void)
2911 p
+= emit_pop (p
, x1
);
2912 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2913 p
+= emit_cset (p
, x0
, LT
);
2915 emit_ops_insns (buf
, p
- buf
);
2918 /* Implementation of emit_ops method "emit_less_unsigned". */
2921 aarch64_emit_less_unsigned (void)
2926 p
+= emit_pop (p
, x1
);
2927 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2928 p
+= emit_cset (p
, x0
, LO
);
2930 emit_ops_insns (buf
, p
- buf
);
2933 /* Implementation of emit_ops method "emit_ref". */
2936 aarch64_emit_ref (int size
)
2944 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2947 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2950 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2953 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2956 /* Unknown size, bail on compilation. */
2961 emit_ops_insns (buf
, p
- buf
);
2964 /* Implementation of emit_ops method "emit_if_goto". */
2967 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2972 /* The Z flag is set or cleared here. */
2973 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2974 /* This instruction must not change the Z flag. */
2975 p
+= emit_pop (p
, x0
);
2976 /* Branch over the next instruction if x0 == 0. */
2977 p
+= emit_bcond (p
, EQ
, 8);
2979 /* The NOP instruction will be patched with an unconditional branch. */
2981 *offset_p
= (p
- buf
) * 4;
2986 emit_ops_insns (buf
, p
- buf
);
2989 /* Implementation of emit_ops method "emit_goto". */
2992 aarch64_emit_goto (int *offset_p
, int *size_p
)
2997 /* The NOP instruction will be patched with an unconditional branch. */
3004 emit_ops_insns (buf
, p
- buf
);
3007 /* Implementation of emit_ops method "write_goto_address". */
3010 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
3014 emit_b (&insn
, 0, to
- from
);
3015 append_insns (&from
, 1, &insn
);
3018 /* Implementation of emit_ops method "emit_const". */
3021 aarch64_emit_const (LONGEST num
)
3026 p
+= emit_mov_addr (p
, x0
, num
);
3028 emit_ops_insns (buf
, p
- buf
);
3031 /* Implementation of emit_ops method "emit_call". */
3034 aarch64_emit_call (CORE_ADDR fn
)
3039 p
+= emit_mov_addr (p
, ip0
, fn
);
3040 p
+= emit_blr (p
, ip0
);
3042 emit_ops_insns (buf
, p
- buf
);
3045 /* Implementation of emit_ops method "emit_reg". */
3048 aarch64_emit_reg (int reg
)
3053 /* Set x0 to unsigned char *regs. */
3054 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
3055 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
3056 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
3058 emit_ops_insns (buf
, p
- buf
);
3060 aarch64_emit_call (get_raw_reg_func_addr ());
3063 /* Implementation of emit_ops method "emit_pop". */
3066 aarch64_emit_pop (void)
3071 p
+= emit_pop (p
, x0
);
3073 emit_ops_insns (buf
, p
- buf
);
3076 /* Implementation of emit_ops method "emit_stack_flush". */
3079 aarch64_emit_stack_flush (void)
3084 p
+= emit_push (p
, x0
);
3086 emit_ops_insns (buf
, p
- buf
);
3089 /* Implementation of emit_ops method "emit_zero_ext". */
3092 aarch64_emit_zero_ext (int arg
)
3097 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
3099 emit_ops_insns (buf
, p
- buf
);
3102 /* Implementation of emit_ops method "emit_swap". */
3105 aarch64_emit_swap (void)
3110 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
3111 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
3112 p
+= emit_mov (p
, x0
, register_operand (x1
));
3114 emit_ops_insns (buf
, p
- buf
);
3117 /* Implementation of emit_ops method "emit_stack_adjust". */
3120 aarch64_emit_stack_adjust (int n
)
3122 /* This is not needed with our design. */
3126 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
3128 emit_ops_insns (buf
, p
- buf
);
3131 /* Implementation of emit_ops method "emit_int_call_1". */
3134 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
3139 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
3141 emit_ops_insns (buf
, p
- buf
);
3143 aarch64_emit_call (fn
);
3146 /* Implementation of emit_ops method "emit_void_call_2". */
3149 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3154 /* Push x0 on the stack. */
3155 aarch64_emit_stack_flush ();
3157 /* Setup arguments for the function call:
3160 x1: top of the stack
3165 p
+= emit_mov (p
, x1
, register_operand (x0
));
3166 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
3168 emit_ops_insns (buf
, p
- buf
);
3170 aarch64_emit_call (fn
);
3173 aarch64_emit_pop ();
3176 /* Implementation of emit_ops method "emit_eq_goto". */
3179 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
3184 p
+= emit_pop (p
, x1
);
3185 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3186 /* Branch over the next instruction if x0 != x1. */
3187 p
+= emit_bcond (p
, NE
, 8);
3188 /* The NOP instruction will be patched with an unconditional branch. */
3190 *offset_p
= (p
- buf
) * 4;
3195 emit_ops_insns (buf
, p
- buf
);
3198 /* Implementation of emit_ops method "emit_ne_goto". */
3201 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
3206 p
+= emit_pop (p
, x1
);
3207 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3208 /* Branch over the next instruction if x0 == x1. */
3209 p
+= emit_bcond (p
, EQ
, 8);
3210 /* The NOP instruction will be patched with an unconditional branch. */
3212 *offset_p
= (p
- buf
) * 4;
3217 emit_ops_insns (buf
, p
- buf
);
3220 /* Implementation of emit_ops method "emit_lt_goto". */
3223 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
3228 p
+= emit_pop (p
, x1
);
3229 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3230 /* Branch over the next instruction if x0 >= x1. */
3231 p
+= emit_bcond (p
, GE
, 8);
3232 /* The NOP instruction will be patched with an unconditional branch. */
3234 *offset_p
= (p
- buf
) * 4;
3239 emit_ops_insns (buf
, p
- buf
);
3242 /* Implementation of emit_ops method "emit_le_goto". */
3245 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
3250 p
+= emit_pop (p
, x1
);
3251 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3252 /* Branch over the next instruction if x0 > x1. */
3253 p
+= emit_bcond (p
, GT
, 8);
3254 /* The NOP instruction will be patched with an unconditional branch. */
3256 *offset_p
= (p
- buf
) * 4;
3261 emit_ops_insns (buf
, p
- buf
);
3264 /* Implementation of emit_ops method "emit_gt_goto". */
3267 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
3272 p
+= emit_pop (p
, x1
);
3273 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3274 /* Branch over the next instruction if x0 <= x1. */
3275 p
+= emit_bcond (p
, LE
, 8);
3276 /* The NOP instruction will be patched with an unconditional branch. */
3278 *offset_p
= (p
- buf
) * 4;
3283 emit_ops_insns (buf
, p
- buf
);
3286 /* Implementation of emit_ops method "emit_ge_got". */
3289 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
3294 p
+= emit_pop (p
, x1
);
3295 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3296 /* Branch over the next instruction if x0 <= x1. */
3297 p
+= emit_bcond (p
, LT
, 8);
3298 /* The NOP instruction will be patched with an unconditional branch. */
3300 *offset_p
= (p
- buf
) * 4;
3305 emit_ops_insns (buf
, p
- buf
);
3308 static struct emit_ops aarch64_emit_ops_impl
=
3310 aarch64_emit_prologue
,
3311 aarch64_emit_epilogue
,
3316 aarch64_emit_rsh_signed
,
3317 aarch64_emit_rsh_unsigned
,
3319 aarch64_emit_log_not
,
3320 aarch64_emit_bit_and
,
3321 aarch64_emit_bit_or
,
3322 aarch64_emit_bit_xor
,
3323 aarch64_emit_bit_not
,
3325 aarch64_emit_less_signed
,
3326 aarch64_emit_less_unsigned
,
3328 aarch64_emit_if_goto
,
3330 aarch64_write_goto_address
,
3335 aarch64_emit_stack_flush
,
3336 aarch64_emit_zero_ext
,
3338 aarch64_emit_stack_adjust
,
3339 aarch64_emit_int_call_1
,
3340 aarch64_emit_void_call_2
,
3341 aarch64_emit_eq_goto
,
3342 aarch64_emit_ne_goto
,
3343 aarch64_emit_lt_goto
,
3344 aarch64_emit_le_goto
,
3345 aarch64_emit_gt_goto
,
3346 aarch64_emit_ge_got
,
3349 /* Implementation of target ops method "emit_ops". */
3352 aarch64_target::emit_ops ()
3354 return &aarch64_emit_ops_impl
;
3357 /* Implementation of target ops method
3358 "get_min_fast_tracepoint_insn_len". */
3361 aarch64_target::get_min_fast_tracepoint_insn_len ()
3366 /* Implementation of linux target ops method "low_supports_range_stepping". */
3369 aarch64_target::low_supports_range_stepping ()
3374 /* Implementation of target ops method "sw_breakpoint_from_kind". */
3377 aarch64_target::sw_breakpoint_from_kind (int kind
, int *size
)
3379 if (is_64bit_tdesc ())
3381 *size
= aarch64_breakpoint_len
;
3382 return aarch64_breakpoint
;
3385 return arm_sw_breakpoint_from_kind (kind
, size
);
3388 /* Implementation of target ops method "breakpoint_kind_from_pc". */
3391 aarch64_target::breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3393 if (is_64bit_tdesc ())
3394 return aarch64_breakpoint_len
;
3396 return arm_breakpoint_kind_from_pc (pcptr
);
3399 /* Implementation of the target ops method
3400 "breakpoint_kind_from_current_state". */
3403 aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3405 if (is_64bit_tdesc ())
3406 return aarch64_breakpoint_len
;
3408 return arm_breakpoint_kind_from_current_state (pcptr
);
3411 /* Returns true if memory tagging is supported. */
3413 aarch64_target::supports_memory_tagging ()
3415 if (current_thread
== NULL
)
3417 /* We don't have any processes running, so don't attempt to
3418 use linux_get_hwcap2 as it will try to fetch the current
3419 thread id. Instead, just fetch the auxv from the self
3421 #ifdef HAVE_GETAUXVAL
3422 return (getauxval (AT_HWCAP2
) & HWCAP2_MTE
) != 0;
3428 return (linux_get_hwcap2 (current_thread
->id
.pid (), 8) & HWCAP2_MTE
) != 0;
3432 aarch64_target::fetch_memtags (CORE_ADDR address
, size_t len
,
3433 gdb::byte_vector
&tags
, int type
)
3435 /* Allocation tags are per-process, so any tid is fine. */
3436 int tid
= lwpid_of (current_thread
);
3438 /* Allocation tag? */
3439 if (type
== static_cast <int> (aarch64_memtag_type::mte_allocation
))
3440 return aarch64_mte_fetch_memtags (tid
, address
, len
, tags
);
3446 aarch64_target::store_memtags (CORE_ADDR address
, size_t len
,
3447 const gdb::byte_vector
&tags
, int type
)
3449 /* Allocation tags are per-process, so any tid is fine. */
3450 int tid
= lwpid_of (current_thread
);
3452 /* Allocation tag? */
3453 if (type
== static_cast <int> (aarch64_memtag_type::mte_allocation
))
3454 return aarch64_mte_store_memtags (tid
, address
, len
, tags
);
3459 /* The linux target ops object. */
3461 linux_process_target
*the_linux_target
= &the_aarch64_target
;
3464 initialize_low_arch (void)
3466 initialize_low_arch_aarch32 ();
3468 initialize_regsets_info (&aarch64_regsets_info
);