1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2019 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
34 #include "nat/gdb_ptrace.h"
35 #include <asm/ptrace.h>
40 #include "gdb_proc_service.h"
41 #include "arch/aarch64.h"
42 #include "linux-aarch64-tdesc.h"
43 #include "nat/aarch64-sve-linux-ptrace.h"
50 /* Per-process arch-specific data we want to keep. */
52 struct arch_process_info
54 /* Hardware breakpoint/watchpoint data.
55 The reason for them to be per-process rather than per-thread is
56 due to the lack of information in the gdbserver environment;
57 gdbserver is not told that whether a requested hardware
58 breakpoint/watchpoint is thread specific or not, so it has to set
59 each hw bp/wp for every thread in the current process. The
60 higher level bp/wp management in gdb will resume a thread if a hw
61 bp/wp trap is not expected for it. Since the hw bp/wp setting is
62 same for each thread, it is reasonable for the data to live here.
64 struct aarch64_debug_reg_state debug_reg_state
;
67 /* Return true if the size of register 0 is 8 byte. */
72 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
74 return register_size (regcache
->tdesc
, 0) == 8;
77 /* Return true if the regcache contains the number of SVE registers. */
82 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
84 return regcache
->tdesc
->reg_defs
.size () == AARCH64_SVE_NUM_REGS
;
88 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
90 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
93 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
94 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
95 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
96 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
97 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
101 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
103 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
106 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
107 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
108 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
109 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
110 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
114 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
116 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
119 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
120 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
121 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
122 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
126 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
128 const struct user_fpsimd_state
*regset
129 = (const struct user_fpsimd_state
*) buf
;
132 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
133 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
134 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
135 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
138 /* Store the pauth registers to regcache. */
141 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
143 uint64_t *pauth_regset
= (uint64_t *) buf
;
144 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
149 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
151 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
155 /* Enable miscellaneous debugging output. The name is historical - it
156 was originally used to debug LinuxThreads support. */
157 extern int debug_threads
;
159 /* Implementation of linux_target_ops method "get_pc". */
162 aarch64_get_pc (struct regcache
*regcache
)
164 if (register_size (regcache
->tdesc
, 0) == 8)
165 return linux_get_pc_64bit (regcache
);
167 return linux_get_pc_32bit (regcache
);
170 /* Implementation of linux_target_ops method "set_pc". */
173 aarch64_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
175 if (register_size (regcache
->tdesc
, 0) == 8)
176 linux_set_pc_64bit (regcache
, pc
);
178 linux_set_pc_32bit (regcache
, pc
);
181 #define aarch64_breakpoint_len 4
183 /* AArch64 BRK software debug mode instruction.
184 This instruction needs to match gdb/aarch64-tdep.c
185 (aarch64_default_breakpoint). */
186 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
188 /* Implementation of linux_target_ops method "breakpoint_at". */
191 aarch64_breakpoint_at (CORE_ADDR where
)
193 if (is_64bit_tdesc ())
195 gdb_byte insn
[aarch64_breakpoint_len
];
197 (*the_target
->read_memory
) (where
, (unsigned char *) &insn
,
198 aarch64_breakpoint_len
);
199 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
205 return arm_breakpoint_at (where
);
209 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
213 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
215 state
->dr_addr_bp
[i
] = 0;
216 state
->dr_ctrl_bp
[i
] = 0;
217 state
->dr_ref_count_bp
[i
] = 0;
220 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
222 state
->dr_addr_wp
[i
] = 0;
223 state
->dr_ctrl_wp
[i
] = 0;
224 state
->dr_ref_count_wp
[i
] = 0;
228 /* Return the pointer to the debug register state structure in the
229 current process' arch-specific data area. */
231 struct aarch64_debug_reg_state
*
232 aarch64_get_debug_reg_state (pid_t pid
)
234 struct process_info
*proc
= find_process_pid (pid
);
236 return &proc
->priv
->arch_private
->debug_reg_state
;
239 /* Implementation of linux_target_ops method "supports_z_point_type". */
242 aarch64_supports_z_point_type (char z_type
)
248 case Z_PACKET_WRITE_WP
:
249 case Z_PACKET_READ_WP
:
250 case Z_PACKET_ACCESS_WP
:
257 /* Implementation of linux_target_ops method "insert_point".
259 It actually only records the info of the to-be-inserted bp/wp;
260 the actual insertion will happen when threads are resumed. */
263 aarch64_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
264 int len
, struct raw_breakpoint
*bp
)
267 enum target_hw_bp_type targ_type
;
268 struct aarch64_debug_reg_state
*state
269 = aarch64_get_debug_reg_state (pid_of (current_thread
));
272 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
273 (unsigned long) addr
, len
);
275 /* Determine the type from the raw breakpoint type. */
276 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
278 if (targ_type
!= hw_execute
)
280 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
281 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
282 1 /* is_insert */, state
);
290 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
291 instruction. Set it to 2 to correctly encode length bit
292 mask in hardware/watchpoint control register. */
295 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
296 1 /* is_insert */, state
);
300 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
306 /* Implementation of linux_target_ops method "remove_point".
308 It actually only records the info of the to-be-removed bp/wp,
309 the actual removal will be done when threads are resumed. */
312 aarch64_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
313 int len
, struct raw_breakpoint
*bp
)
316 enum target_hw_bp_type targ_type
;
317 struct aarch64_debug_reg_state
*state
318 = aarch64_get_debug_reg_state (pid_of (current_thread
));
321 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
322 (unsigned long) addr
, len
);
324 /* Determine the type from the raw breakpoint type. */
325 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
327 /* Set up state pointers. */
328 if (targ_type
!= hw_execute
)
330 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
336 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
337 instruction. Set it to 2 to correctly encode length bit
338 mask in hardware/watchpoint control register. */
341 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
342 0 /* is_insert */, state
);
346 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
352 /* Implementation of linux_target_ops method "stopped_data_address". */
355 aarch64_stopped_data_address (void)
359 struct aarch64_debug_reg_state
*state
;
361 pid
= lwpid_of (current_thread
);
363 /* Get the siginfo. */
364 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
365 return (CORE_ADDR
) 0;
367 /* Need to be a hardware breakpoint/watchpoint trap. */
368 if (siginfo
.si_signo
!= SIGTRAP
369 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
370 return (CORE_ADDR
) 0;
372 /* Check if the address matches any watched address. */
373 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
374 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
376 const unsigned int offset
377 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
378 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
379 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
380 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
381 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
382 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
384 if (state
->dr_ref_count_wp
[i
]
385 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
386 && addr_trap
>= addr_watch_aligned
387 && addr_trap
< addr_watch
+ len
)
389 /* ADDR_TRAP reports the first address of the memory range
390 accessed by the CPU, regardless of what was the memory
391 range watched. Thus, a large CPU access that straddles
392 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
393 ADDR_TRAP that is lower than the
394 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
396 addr: | 4 | 5 | 6 | 7 | 8 |
397 |---- range watched ----|
398 |----------- range accessed ------------|
400 In this case, ADDR_TRAP will be 4.
402 To match a watchpoint known to GDB core, we must never
403 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
404 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
405 positive on kernels older than 4.10. See PR
411 return (CORE_ADDR
) 0;
414 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
417 aarch64_stopped_by_watchpoint (void)
419 if (aarch64_stopped_data_address () != 0)
425 /* Fetch the thread-local storage pointer for libthread_db. */
428 ps_get_thread_area (struct ps_prochandle
*ph
,
429 lwpid_t lwpid
, int idx
, void **base
)
431 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
435 /* Implementation of linux_target_ops method "siginfo_fixup". */
438 aarch64_linux_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
, int direction
)
440 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
441 if (!is_64bit_tdesc ())
444 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
447 aarch64_siginfo_from_compat_siginfo (native
,
448 (struct compat_siginfo
*) inf
);
456 /* Implementation of linux_target_ops method "new_process". */
458 static struct arch_process_info
*
459 aarch64_linux_new_process (void)
461 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
463 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
468 /* Implementation of linux_target_ops method "delete_process". */
471 aarch64_linux_delete_process (struct arch_process_info
*info
)
476 /* Implementation of linux_target_ops method "linux_new_fork". */
479 aarch64_linux_new_fork (struct process_info
*parent
,
480 struct process_info
*child
)
482 /* These are allocated by linux_add_process. */
483 gdb_assert (parent
->priv
!= NULL
484 && parent
->priv
->arch_private
!= NULL
);
485 gdb_assert (child
->priv
!= NULL
486 && child
->priv
->arch_private
!= NULL
);
488 /* Linux kernel before 2.6.33 commit
489 72f674d203cd230426437cdcf7dd6f681dad8b0d
490 will inherit hardware debug registers from parent
491 on fork/vfork/clone. Newer Linux kernels create such tasks with
492 zeroed debug registers.
494 GDB core assumes the child inherits the watchpoints/hw
495 breakpoints of the parent, and will remove them all from the
496 forked off process. Copy the debug registers mirrors into the
497 new process so that all breakpoints and watchpoints can be
498 removed together. The debug registers mirror will become zeroed
499 in the end before detaching the forked off process, thus making
500 this compatible with older Linux kernels too. */
502 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
505 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
506 #define AARCH64_HWCAP_PACA (1 << 30)
508 /* Implementation of linux_target_ops method "arch_setup". */
511 aarch64_arch_setup (void)
513 unsigned int machine
;
517 tid
= lwpid_of (current_thread
);
519 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
523 uint64_t vq
= aarch64_sve_get_vq (tid
);
524 unsigned long hwcap
= linux_get_hwcap (8);
525 bool pauth_p
= hwcap
& AARCH64_HWCAP_PACA
;
527 current_process ()->tdesc
= aarch64_linux_read_description (vq
, pauth_p
);
530 current_process ()->tdesc
= tdesc_arm_with_neon
;
532 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
535 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
538 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
540 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
543 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
546 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
548 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
551 static struct regset_info aarch64_regsets
[] =
553 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
554 sizeof (struct user_pt_regs
), GENERAL_REGS
,
555 aarch64_fill_gregset
, aarch64_store_gregset
},
556 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
557 sizeof (struct user_fpsimd_state
), FP_REGS
,
558 aarch64_fill_fpregset
, aarch64_store_fpregset
560 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
561 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
562 NULL
, aarch64_store_pauthregset
},
566 static struct regsets_info aarch64_regsets_info
=
568 aarch64_regsets
, /* regsets */
570 NULL
, /* disabled_regsets */
573 static struct regs_info regs_info_aarch64
=
575 NULL
, /* regset_bitmap */
577 &aarch64_regsets_info
,
580 static struct regset_info aarch64_sve_regsets
[] =
582 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
583 sizeof (struct user_pt_regs
), GENERAL_REGS
,
584 aarch64_fill_gregset
, aarch64_store_gregset
},
585 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
586 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
587 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
589 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
590 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
591 NULL
, aarch64_store_pauthregset
},
595 static struct regsets_info aarch64_sve_regsets_info
=
597 aarch64_sve_regsets
, /* regsets. */
598 0, /* num_regsets. */
599 NULL
, /* disabled_regsets. */
602 static struct regs_info regs_info_aarch64_sve
=
604 NULL
, /* regset_bitmap. */
606 &aarch64_sve_regsets_info
,
609 /* Implementation of linux_target_ops method "regs_info". */
611 static const struct regs_info
*
612 aarch64_regs_info (void)
614 if (!is_64bit_tdesc ())
615 return ®s_info_aarch32
;
618 return ®s_info_aarch64_sve
;
620 return ®s_info_aarch64
;
623 /* Implementation of linux_target_ops method "supports_tracepoints". */
626 aarch64_supports_tracepoints (void)
628 if (current_thread
== NULL
)
632 /* We don't support tracepoints on aarch32 now. */
633 return is_64bit_tdesc ();
637 /* Implementation of linux_target_ops method "get_thread_area". */
640 aarch64_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
645 iovec
.iov_base
= ®
;
646 iovec
.iov_len
= sizeof (reg
);
648 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
656 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
659 aarch64_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
661 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
667 collect_register_by_name (regcache
, "x8", &l_sysno
);
668 *sysno
= (int) l_sysno
;
671 collect_register_by_name (regcache
, "r7", sysno
);
674 /* List of condition codes that we need. */
676 enum aarch64_condition_codes
687 enum aarch64_operand_type
693 /* Representation of an operand. At this time, it only supports register
694 and immediate types. */
696 struct aarch64_operand
698 /* Type of the operand. */
699 enum aarch64_operand_type type
;
701 /* Value of the operand according to the type. */
705 struct aarch64_register reg
;
709 /* List of registers that we are currently using, we can add more here as
710 we need to use them. */
712 /* General purpose scratch registers (64 bit). */
713 static const struct aarch64_register x0
= { 0, 1 };
714 static const struct aarch64_register x1
= { 1, 1 };
715 static const struct aarch64_register x2
= { 2, 1 };
716 static const struct aarch64_register x3
= { 3, 1 };
717 static const struct aarch64_register x4
= { 4, 1 };
719 /* General purpose scratch registers (32 bit). */
720 static const struct aarch64_register w0
= { 0, 0 };
721 static const struct aarch64_register w2
= { 2, 0 };
723 /* Intra-procedure scratch registers. */
724 static const struct aarch64_register ip0
= { 16, 1 };
726 /* Special purpose registers. */
727 static const struct aarch64_register fp
= { 29, 1 };
728 static const struct aarch64_register lr
= { 30, 1 };
729 static const struct aarch64_register sp
= { 31, 1 };
730 static const struct aarch64_register xzr
= { 31, 1 };
732 /* Dynamically allocate a new register. If we know the register
733 statically, we should make it a global as above instead of using this
736 static struct aarch64_register
737 aarch64_register (unsigned num
, int is64
)
739 return (struct aarch64_register
) { num
, is64
};
742 /* Helper function to create a register operand, for instructions with
743 different types of operands.
746 p += emit_mov (p, x0, register_operand (x1)); */
748 static struct aarch64_operand
749 register_operand (struct aarch64_register reg
)
751 struct aarch64_operand operand
;
753 operand
.type
= OPERAND_REGISTER
;
759 /* Helper function to create an immediate operand, for instructions with
760 different types of operands.
763 p += emit_mov (p, x0, immediate_operand (12)); */
765 static struct aarch64_operand
766 immediate_operand (uint32_t imm
)
768 struct aarch64_operand operand
;
770 operand
.type
= OPERAND_IMMEDIATE
;
776 /* Helper function to create an offset memory operand.
779 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
781 static struct aarch64_memory_operand
782 offset_memory_operand (int32_t offset
)
784 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
787 /* Helper function to create a pre-index memory operand.
790 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
792 static struct aarch64_memory_operand
793 preindex_memory_operand (int32_t index
)
795 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
798 /* Helper function to create a post-index memory operand.
801 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
803 static struct aarch64_memory_operand
804 postindex_memory_operand (int32_t index
)
806 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
809 /* System control registers. These special registers can be written and
810 read with the MRS and MSR instructions.
812 - NZCV: Condition flags. GDB refers to this register under the CPSR
814 - FPSR: Floating-point status register.
815 - FPCR: Floating-point control registers.
816 - TPIDR_EL0: Software thread ID register. */
818 enum aarch64_system_control_registers
820 /* op0 op1 crn crm op2 */
821 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
822 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
823 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
824 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
827 /* Write a BLR instruction into *BUF.
831 RN is the register to branch to. */
834 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
836 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
839 /* Write a RET instruction into *BUF.
843 RN is the register to branch to. */
846 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
848 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
852 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
853 struct aarch64_register rt
,
854 struct aarch64_register rt2
,
855 struct aarch64_register rn
,
856 struct aarch64_memory_operand operand
)
863 opc
= ENCODE (2, 2, 30);
865 opc
= ENCODE (0, 2, 30);
867 switch (operand
.type
)
869 case MEMORY_OPERAND_OFFSET
:
871 pre_index
= ENCODE (1, 1, 24);
872 write_back
= ENCODE (0, 1, 23);
875 case MEMORY_OPERAND_POSTINDEX
:
877 pre_index
= ENCODE (0, 1, 24);
878 write_back
= ENCODE (1, 1, 23);
881 case MEMORY_OPERAND_PREINDEX
:
883 pre_index
= ENCODE (1, 1, 24);
884 write_back
= ENCODE (1, 1, 23);
891 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
892 | ENCODE (operand
.index
>> 3, 7, 15)
893 | ENCODE (rt2
.num
, 5, 10)
894 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
897 /* Write a STP instruction into *BUF.
899 STP rt, rt2, [rn, #offset]
900 STP rt, rt2, [rn, #index]!
901 STP rt, rt2, [rn], #index
903 RT and RT2 are the registers to store.
904 RN is the base address register.
905 OFFSET is the immediate to add to the base address. It is limited to a
906 -512 .. 504 range (7 bits << 3). */
909 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
910 struct aarch64_register rt2
, struct aarch64_register rn
,
911 struct aarch64_memory_operand operand
)
913 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
916 /* Write a LDP instruction into *BUF.
918 LDP rt, rt2, [rn, #offset]
919 LDP rt, rt2, [rn, #index]!
920 LDP rt, rt2, [rn], #index
922 RT and RT2 are the registers to store.
923 RN is the base address register.
924 OFFSET is the immediate to add to the base address. It is limited to a
925 -512 .. 504 range (7 bits << 3). */
928 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
929 struct aarch64_register rt2
, struct aarch64_register rn
,
930 struct aarch64_memory_operand operand
)
932 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
935 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
937 LDP qt, qt2, [rn, #offset]
939 RT and RT2 are the Q registers to store.
940 RN is the base address register.
941 OFFSET is the immediate to add to the base address. It is limited to
942 -1024 .. 1008 range (7 bits << 4). */
945 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
946 struct aarch64_register rn
, int32_t offset
)
948 uint32_t opc
= ENCODE (2, 2, 30);
949 uint32_t pre_index
= ENCODE (1, 1, 24);
951 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
952 | ENCODE (offset
>> 4, 7, 15)
953 | ENCODE (rt2
, 5, 10)
954 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
957 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
959 STP qt, qt2, [rn, #offset]
961 RT and RT2 are the Q registers to store.
962 RN is the base address register.
963 OFFSET is the immediate to add to the base address. It is limited to
964 -1024 .. 1008 range (7 bits << 4). */
967 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
968 struct aarch64_register rn
, int32_t offset
)
970 uint32_t opc
= ENCODE (2, 2, 30);
971 uint32_t pre_index
= ENCODE (1, 1, 24);
973 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
974 | ENCODE (offset
>> 4, 7, 15)
975 | ENCODE (rt2
, 5, 10)
976 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
979 /* Write a LDRH instruction into *BUF.
981 LDRH wt, [xn, #offset]
982 LDRH wt, [xn, #index]!
983 LDRH wt, [xn], #index
985 RT is the register to store.
986 RN is the base address register.
987 OFFSET is the immediate to add to the base address. It is limited to
988 0 .. 32760 range (12 bits << 3). */
991 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
992 struct aarch64_register rn
,
993 struct aarch64_memory_operand operand
)
995 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
998 /* Write a LDRB instruction into *BUF.
1000 LDRB wt, [xn, #offset]
1001 LDRB wt, [xn, #index]!
1002 LDRB wt, [xn], #index
1004 RT is the register to store.
1005 RN is the base address register.
1006 OFFSET is the immediate to add to the base address. It is limited to
1007 0 .. 32760 range (12 bits << 3). */
1010 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1011 struct aarch64_register rn
,
1012 struct aarch64_memory_operand operand
)
1014 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1019 /* Write a STR instruction into *BUF.
1021 STR rt, [rn, #offset]
1022 STR rt, [rn, #index]!
1023 STR rt, [rn], #index
1025 RT is the register to store.
1026 RN is the base address register.
1027 OFFSET is the immediate to add to the base address. It is limited to
1028 0 .. 32760 range (12 bits << 3). */
1031 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1032 struct aarch64_register rn
,
1033 struct aarch64_memory_operand operand
)
1035 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1038 /* Helper function emitting an exclusive load or store instruction. */
1041 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1042 enum aarch64_opcodes opcode
,
1043 struct aarch64_register rs
,
1044 struct aarch64_register rt
,
1045 struct aarch64_register rt2
,
1046 struct aarch64_register rn
)
1048 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1049 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1050 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1053 /* Write a LAXR instruction into *BUF.
1057 RT is the destination register.
1058 RN is the base address register. */
1061 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1062 struct aarch64_register rn
)
1064 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1068 /* Write a STXR instruction into *BUF.
1072 RS is the result register, it indicates if the store succeeded or not.
1073 RT is the destination register.
1074 RN is the base address register. */
1077 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1078 struct aarch64_register rt
, struct aarch64_register rn
)
1080 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1084 /* Write a STLR instruction into *BUF.
1088 RT is the register to store.
1089 RN is the base address register. */
1092 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1093 struct aarch64_register rn
)
1095 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1099 /* Helper function for data processing instructions with register sources. */
1102 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1103 struct aarch64_register rd
,
1104 struct aarch64_register rn
,
1105 struct aarch64_register rm
)
1107 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1109 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1110 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1113 /* Helper function for data processing instructions taking either a register
1117 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1118 struct aarch64_register rd
,
1119 struct aarch64_register rn
,
1120 struct aarch64_operand operand
)
1122 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1123 /* The opcode is different for register and immediate source operands. */
1124 uint32_t operand_opcode
;
1126 if (operand
.type
== OPERAND_IMMEDIATE
)
1128 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1129 operand_opcode
= ENCODE (8, 4, 25);
1131 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1132 | ENCODE (operand
.imm
, 12, 10)
1133 | ENCODE (rn
.num
, 5, 5)
1134 | ENCODE (rd
.num
, 5, 0));
1138 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1139 operand_opcode
= ENCODE (5, 4, 25);
1141 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1146 /* Write an ADD instruction into *BUF.
1151 This function handles both an immediate and register add.
1153 RD is the destination register.
1154 RN is the input register.
1155 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1156 OPERAND_REGISTER. */
1159 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1160 struct aarch64_register rn
, struct aarch64_operand operand
)
1162 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1165 /* Write a SUB instruction into *BUF.
1170 This function handles both an immediate and register sub.
1172 RD is the destination register.
1173 RN is the input register.
1174 IMM is the immediate to substract to RN. */
1177 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1178 struct aarch64_register rn
, struct aarch64_operand operand
)
1180 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1183 /* Write a MOV instruction into *BUF.
1188 This function handles both a wide immediate move and a register move,
1189 with the condition that the source register is not xzr. xzr and the
1190 stack pointer share the same encoding and this function only supports
1193 RD is the destination register.
1194 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1195 OPERAND_REGISTER. */
1198 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1199 struct aarch64_operand operand
)
1201 if (operand
.type
== OPERAND_IMMEDIATE
)
1203 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1204 /* Do not shift the immediate. */
1205 uint32_t shift
= ENCODE (0, 2, 21);
1207 return aarch64_emit_insn (buf
, MOV
| size
| shift
1208 | ENCODE (operand
.imm
, 16, 5)
1209 | ENCODE (rd
.num
, 5, 0));
1212 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1215 /* Write a MOVK instruction into *BUF.
1217 MOVK rd, #imm, lsl #shift
1219 RD is the destination register.
1220 IMM is the immediate.
1221 SHIFT is the logical shift left to apply to IMM. */
1224 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1227 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1229 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1230 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1233 /* Write instructions into *BUF in order to move ADDR into a register.
1234 ADDR can be a 64-bit value.
1236 This function will emit a series of MOV and MOVK instructions, such as:
1239 MOVK xd, #(addr >> 16), lsl #16
1240 MOVK xd, #(addr >> 32), lsl #32
1241 MOVK xd, #(addr >> 48), lsl #48 */
1244 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1248 /* The MOV (wide immediate) instruction clears to top bits of the
1250 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1252 if ((addr
>> 16) != 0)
1253 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1257 if ((addr
>> 32) != 0)
1258 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1262 if ((addr
>> 48) != 0)
1263 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1268 /* Write a SUBS instruction into *BUF.
1272 This instruction update the condition flags.
1274 RD is the destination register.
1275 RN and RM are the source registers. */
1278 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1279 struct aarch64_register rn
, struct aarch64_operand operand
)
1281 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1284 /* Write a CMP instruction into *BUF.
1288 This instruction is an alias of SUBS xzr, rn, rm.
1290 RN and RM are the registers to compare. */
1293 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1294 struct aarch64_operand operand
)
1296 return emit_subs (buf
, xzr
, rn
, operand
);
1299 /* Write a AND instruction into *BUF.
1303 RD is the destination register.
1304 RN and RM are the source registers. */
1307 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1308 struct aarch64_register rn
, struct aarch64_register rm
)
1310 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1313 /* Write a ORR instruction into *BUF.
1317 RD is the destination register.
1318 RN and RM are the source registers. */
1321 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1322 struct aarch64_register rn
, struct aarch64_register rm
)
1324 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1327 /* Write a ORN instruction into *BUF.
1331 RD is the destination register.
1332 RN and RM are the source registers. */
1335 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1336 struct aarch64_register rn
, struct aarch64_register rm
)
1338 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1341 /* Write a EOR instruction into *BUF.
1345 RD is the destination register.
1346 RN and RM are the source registers. */
1349 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1350 struct aarch64_register rn
, struct aarch64_register rm
)
1352 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1355 /* Write a MVN instruction into *BUF.
1359 This is an alias for ORN rd, xzr, rm.
1361 RD is the destination register.
1362 RM is the source register. */
1365 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1366 struct aarch64_register rm
)
1368 return emit_orn (buf
, rd
, xzr
, rm
);
1371 /* Write a LSLV instruction into *BUF.
1375 RD is the destination register.
1376 RN and RM are the source registers. */
1379 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1380 struct aarch64_register rn
, struct aarch64_register rm
)
1382 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1385 /* Write a LSRV instruction into *BUF.
1389 RD is the destination register.
1390 RN and RM are the source registers. */
1393 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1394 struct aarch64_register rn
, struct aarch64_register rm
)
1396 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1399 /* Write a ASRV instruction into *BUF.
1403 RD is the destination register.
1404 RN and RM are the source registers. */
1407 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1408 struct aarch64_register rn
, struct aarch64_register rm
)
1410 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1413 /* Write a MUL instruction into *BUF.
1417 RD is the destination register.
1418 RN and RM are the source registers. */
1421 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1422 struct aarch64_register rn
, struct aarch64_register rm
)
1424 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1427 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1431 RT is the destination register.
1432 SYSTEM_REG is special purpose register to read. */
1435 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1436 enum aarch64_system_control_registers system_reg
)
1438 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1439 | ENCODE (rt
.num
, 5, 0));
1442 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1446 SYSTEM_REG is special purpose register to write.
1447 RT is the input register. */
1450 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1451 struct aarch64_register rt
)
1453 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1454 | ENCODE (rt
.num
, 5, 0));
1457 /* Write a SEVL instruction into *BUF.
1459 This is a hint instruction telling the hardware to trigger an event. */
1462 emit_sevl (uint32_t *buf
)
1464 return aarch64_emit_insn (buf
, SEVL
);
1467 /* Write a WFE instruction into *BUF.
1469 This is a hint instruction telling the hardware to wait for an event. */
1472 emit_wfe (uint32_t *buf
)
1474 return aarch64_emit_insn (buf
, WFE
);
1477 /* Write a SBFM instruction into *BUF.
1479 SBFM rd, rn, #immr, #imms
1481 This instruction moves the bits from #immr to #imms into the
1482 destination, sign extending the result.
1484 RD is the destination register.
1485 RN is the source register.
1486 IMMR is the bit number to start at (least significant bit).
1487 IMMS is the bit number to stop at (most significant bit). */
1490 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1491 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1493 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1494 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1496 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1497 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1498 | ENCODE (rd
.num
, 5, 0));
1501 /* Write a SBFX instruction into *BUF.
1503 SBFX rd, rn, #lsb, #width
1505 This instruction moves #width bits from #lsb into the destination, sign
1506 extending the result. This is an alias for:
1508 SBFM rd, rn, #lsb, #(lsb + width - 1)
1510 RD is the destination register.
1511 RN is the source register.
1512 LSB is the bit number to start at (least significant bit).
1513 WIDTH is the number of bits to move. */
1516 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1517 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1519 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1522 /* Write a UBFM instruction into *BUF.
1524 UBFM rd, rn, #immr, #imms
1526 This instruction moves the bits from #immr to #imms into the
1527 destination, extending the result with zeros.
1529 RD is the destination register.
1530 RN is the source register.
1531 IMMR is the bit number to start at (least significant bit).
1532 IMMS is the bit number to stop at (most significant bit). */
1535 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1536 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1538 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1539 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1541 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1542 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1543 | ENCODE (rd
.num
, 5, 0));
1546 /* Write a UBFX instruction into *BUF.
1548 UBFX rd, rn, #lsb, #width
1550 This instruction moves #width bits from #lsb into the destination,
1551 extending the result with zeros. This is an alias for:
1553 UBFM rd, rn, #lsb, #(lsb + width - 1)
1555 RD is the destination register.
1556 RN is the source register.
1557 LSB is the bit number to start at (least significant bit).
1558 WIDTH is the number of bits to move. */
1561 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1562 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1564 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1567 /* Write a CSINC instruction into *BUF.
1569 CSINC rd, rn, rm, cond
1571 This instruction conditionally increments rn or rm and places the result
1572 in rd. rn is chosen is the condition is true.
1574 RD is the destination register.
1575 RN and RM are the source registers.
1576 COND is the encoded condition. */
1579 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1580 struct aarch64_register rn
, struct aarch64_register rm
,
1583 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1585 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1586 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1587 | ENCODE (rd
.num
, 5, 0));
1590 /* Write a CSET instruction into *BUF.
1594 This instruction conditionally write 1 or 0 in the destination register.
1595 1 is written if the condition is true. This is an alias for:
1597 CSINC rd, xzr, xzr, !cond
1599 Note that the condition needs to be inverted.
1601 RD is the destination register.
1602 RN and RM are the source registers.
1603 COND is the encoded condition. */
1606 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1608 /* The least significant bit of the condition needs toggling in order to
1610 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1613 /* Write LEN instructions from BUF into the inferior memory at *TO.
1615 Note instructions are always little endian on AArch64, unlike data. */
1618 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1620 size_t byte_len
= len
* sizeof (uint32_t);
1621 #if (__BYTE_ORDER == __BIG_ENDIAN)
1622 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1625 for (i
= 0; i
< len
; i
++)
1626 le_buf
[i
] = htole32 (buf
[i
]);
1628 write_inferior_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1632 write_inferior_memory (*to
, (const unsigned char *) buf
, byte_len
);
1638 /* Sub-class of struct aarch64_insn_data, store information of
1639 instruction relocation for fast tracepoint. Visitor can
1640 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1641 the relocated instructions in buffer pointed by INSN_PTR. */
1643 struct aarch64_insn_relocation_data
1645 struct aarch64_insn_data base
;
1647 /* The new address the instruction is relocated to. */
1649 /* Pointer to the buffer of relocated instruction(s). */
1653 /* Implementation of aarch64_insn_visitor method "b". */
1656 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1657 struct aarch64_insn_data
*data
)
1659 struct aarch64_insn_relocation_data
*insn_reloc
1660 = (struct aarch64_insn_relocation_data
*) data
;
1662 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1664 if (can_encode_int32 (new_offset
, 28))
1665 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1668 /* Implementation of aarch64_insn_visitor method "b_cond". */
1671 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1672 struct aarch64_insn_data
*data
)
1674 struct aarch64_insn_relocation_data
*insn_reloc
1675 = (struct aarch64_insn_relocation_data
*) data
;
1677 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1679 if (can_encode_int32 (new_offset
, 21))
1681 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1684 else if (can_encode_int32 (new_offset
, 28))
1686 /* The offset is out of range for a conditional branch
1687 instruction but not for a unconditional branch. We can use
1688 the following instructions instead:
1690 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1691 B NOT_TAKEN ; Else jump over TAKEN and continue.
1698 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1699 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1700 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1704 /* Implementation of aarch64_insn_visitor method "cb". */
1707 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1708 const unsigned rn
, int is64
,
1709 struct aarch64_insn_data
*data
)
1711 struct aarch64_insn_relocation_data
*insn_reloc
1712 = (struct aarch64_insn_relocation_data
*) data
;
1714 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1716 if (can_encode_int32 (new_offset
, 21))
1718 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1719 aarch64_register (rn
, is64
), new_offset
);
1721 else if (can_encode_int32 (new_offset
, 28))
1723 /* The offset is out of range for a compare and branch
1724 instruction but not for a unconditional branch. We can use
1725 the following instructions instead:
1727 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1728 B NOT_TAKEN ; Else jump over TAKEN and continue.
1734 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1735 aarch64_register (rn
, is64
), 8);
1736 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1737 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1741 /* Implementation of aarch64_insn_visitor method "tb". */
1744 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1745 const unsigned rt
, unsigned bit
,
1746 struct aarch64_insn_data
*data
)
1748 struct aarch64_insn_relocation_data
*insn_reloc
1749 = (struct aarch64_insn_relocation_data
*) data
;
1751 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1753 if (can_encode_int32 (new_offset
, 16))
1755 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1756 aarch64_register (rt
, 1), new_offset
);
1758 else if (can_encode_int32 (new_offset
, 28))
1760 /* The offset is out of range for a test bit and branch
1761 instruction but not for a unconditional branch. We can use
1762 the following instructions instead:
1764 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1765 B NOT_TAKEN ; Else jump over TAKEN and continue.
1771 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1772 aarch64_register (rt
, 1), 8);
1773 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1774 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1779 /* Implementation of aarch64_insn_visitor method "adr". */
1782 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1784 struct aarch64_insn_data
*data
)
1786 struct aarch64_insn_relocation_data
*insn_reloc
1787 = (struct aarch64_insn_relocation_data
*) data
;
1788 /* We know exactly the address the ADR{P,} instruction will compute.
1789 We can just write it to the destination register. */
1790 CORE_ADDR address
= data
->insn_addr
+ offset
;
1794 /* Clear the lower 12 bits of the offset to get the 4K page. */
1795 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1796 aarch64_register (rd
, 1),
1800 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1801 aarch64_register (rd
, 1), address
);
1804 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1807 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
1808 const unsigned rt
, const int is64
,
1809 struct aarch64_insn_data
*data
)
1811 struct aarch64_insn_relocation_data
*insn_reloc
1812 = (struct aarch64_insn_relocation_data
*) data
;
1813 CORE_ADDR address
= data
->insn_addr
+ offset
;
1815 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1816 aarch64_register (rt
, 1), address
);
1818 /* We know exactly what address to load from, and what register we
1821 MOV xd, #(oldloc + offset)
1822 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1825 LDR xd, [xd] ; or LDRSW xd, [xd]
1830 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
1831 aarch64_register (rt
, 1),
1832 aarch64_register (rt
, 1),
1833 offset_memory_operand (0));
1835 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
1836 aarch64_register (rt
, is64
),
1837 aarch64_register (rt
, 1),
1838 offset_memory_operand (0));
1841 /* Implementation of aarch64_insn_visitor method "others". */
1844 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
1845 struct aarch64_insn_data
*data
)
1847 struct aarch64_insn_relocation_data
*insn_reloc
1848 = (struct aarch64_insn_relocation_data
*) data
;
1850 /* The instruction is not PC relative. Just re-emit it at the new
1852 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
1855 static const struct aarch64_insn_visitor visitor
=
1857 aarch64_ftrace_insn_reloc_b
,
1858 aarch64_ftrace_insn_reloc_b_cond
,
1859 aarch64_ftrace_insn_reloc_cb
,
1860 aarch64_ftrace_insn_reloc_tb
,
1861 aarch64_ftrace_insn_reloc_adr
,
1862 aarch64_ftrace_insn_reloc_ldr_literal
,
1863 aarch64_ftrace_insn_reloc_others
,
1866 /* Implementation of linux_target_ops method
1867 "install_fast_tracepoint_jump_pad". */
1870 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1872 CORE_ADDR collector
,
1875 CORE_ADDR
*jump_entry
,
1876 CORE_ADDR
*trampoline
,
1877 ULONGEST
*trampoline_size
,
1878 unsigned char *jjump_pad_insn
,
1879 ULONGEST
*jjump_pad_insn_size
,
1880 CORE_ADDR
*adjusted_insn_addr
,
1881 CORE_ADDR
*adjusted_insn_addr_end
,
1889 CORE_ADDR buildaddr
= *jump_entry
;
1890 struct aarch64_insn_relocation_data insn_data
;
1892 /* We need to save the current state on the stack both to restore it
1893 later and to collect register values when the tracepoint is hit.
1895 The saved registers are pushed in a layout that needs to be in sync
1896 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1897 the supply_fast_tracepoint_registers function will fill in the
1898 register cache from a pointer to saved registers on the stack we build
1901 For simplicity, we set the size of each cell on the stack to 16 bytes.
1902 This way one cell can hold any register type, from system registers
1903 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1904 has to be 16 bytes aligned anyway.
1906 Note that the CPSR register does not exist on AArch64. Instead we
1907 can access system bits describing the process state with the
1908 MRS/MSR instructions, namely the condition flags. We save them as
1909 if they are part of a CPSR register because that's how GDB
1910 interprets these system bits. At the moment, only the condition
1911 flags are saved in CPSR (NZCV).
1913 Stack layout, each cell is 16 bytes (descending):
1915 High *-------- SIMD&FP registers from 31 down to 0. --------*
1921 *---- General purpose registers from 30 down to 0. ----*
1927 *------------- Special purpose registers. -------------*
1930 | CPSR (NZCV) | 5 cells
1933 *------------- collecting_t object --------------------*
1934 | TPIDR_EL0 | struct tracepoint * |
1935 Low *------------------------------------------------------*
1937 After this stack is set up, we issue a call to the collector, passing
1938 it the saved registers at (SP + 16). */
1940 /* Push SIMD&FP registers on the stack:
1942 SUB sp, sp, #(32 * 16)
1944 STP q30, q31, [sp, #(30 * 16)]
1949 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
1950 for (i
= 30; i
>= 0; i
-= 2)
1951 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
1953 /* Push general puspose registers on the stack. Note that we do not need
1954 to push x31 as it represents the xzr register and not the stack
1955 pointer in a STR instruction.
1957 SUB sp, sp, #(31 * 16)
1959 STR x30, [sp, #(30 * 16)]
1964 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
1965 for (i
= 30; i
>= 0; i
-= 1)
1966 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
1967 offset_memory_operand (i
* 16));
1969 /* Make space for 5 more cells.
1971 SUB sp, sp, #(5 * 16)
1974 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
1979 ADD x4, sp, #((32 + 31 + 5) * 16)
1980 STR x4, [sp, #(4 * 16)]
1983 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
1984 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
1986 /* Save PC (tracepoint address):
1991 STR x3, [sp, #(3 * 16)]
1995 p
+= emit_mov_addr (p
, x3
, tpaddr
);
1996 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
1998 /* Save CPSR (NZCV), FPSR and FPCR:
2004 STR x2, [sp, #(2 * 16)]
2005 STR x1, [sp, #(1 * 16)]
2006 STR x0, [sp, #(0 * 16)]
2009 p
+= emit_mrs (p
, x2
, NZCV
);
2010 p
+= emit_mrs (p
, x1
, FPSR
);
2011 p
+= emit_mrs (p
, x0
, FPCR
);
2012 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2013 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2014 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2016 /* Push the collecting_t object. It consist of the address of the
2017 tracepoint and an ID for the current thread. We get the latter by
2018 reading the tpidr_el0 system register. It corresponds to the
2019 NT_ARM_TLS register accessible with ptrace.
2026 STP x0, x1, [sp, #-16]!
2030 p
+= emit_mov_addr (p
, x0
, tpoint
);
2031 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2032 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2036 The shared memory for the lock is at lockaddr. It will hold zero
2037 if no-one is holding the lock, otherwise it contains the address of
2038 the collecting_t object on the stack of the thread which acquired it.
2040 At this stage, the stack pointer points to this thread's collecting_t
2043 We use the following registers:
2044 - x0: Address of the lock.
2045 - x1: Pointer to collecting_t object.
2046 - x2: Scratch register.
2052 ; Trigger an event local to this core. So the following WFE
2053 ; instruction is ignored.
2056 ; Wait for an event. The event is triggered by either the SEVL
2057 ; or STLR instructions (store release).
2060 ; Atomically read at lockaddr. This marks the memory location as
2061 ; exclusive. This instruction also has memory constraints which
2062 ; make sure all previous data reads and writes are done before
2066 ; Try again if another thread holds the lock.
2069 ; We can lock it! Write the address of the collecting_t object.
2070 ; This instruction will fail if the memory location is not marked
2071 ; as exclusive anymore. If it succeeds, it will remove the
2072 ; exclusive mark on the memory location. This way, if another
2073 ; thread executes this instruction before us, we will fail and try
2080 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2081 p
+= emit_mov (p
, x1
, register_operand (sp
));
2085 p
+= emit_ldaxr (p
, x2
, x0
);
2086 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2087 p
+= emit_stxr (p
, w2
, x1
, x0
);
2088 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2090 /* Call collector (struct tracepoint *, unsigned char *):
2095 ; Saved registers start after the collecting_t object.
2098 ; We use an intra-procedure-call scratch register.
2099 MOV ip0, #(collector)
2102 ; And call back to C!
2107 p
+= emit_mov_addr (p
, x0
, tpoint
);
2108 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2110 p
+= emit_mov_addr (p
, ip0
, collector
);
2111 p
+= emit_blr (p
, ip0
);
2113 /* Release the lock.
2118 ; This instruction is a normal store with memory ordering
2119 ; constraints. Thanks to this we do not have to put a data
2120 ; barrier instruction to make sure all data read and writes are done
2121 ; before this instruction is executed. Furthermore, this instrucion
2122 ; will trigger an event, letting other threads know they can grab
2127 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2128 p
+= emit_stlr (p
, xzr
, x0
);
2130 /* Free collecting_t object:
2135 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2137 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2138 registers from the stack.
2140 LDR x2, [sp, #(2 * 16)]
2141 LDR x1, [sp, #(1 * 16)]
2142 LDR x0, [sp, #(0 * 16)]
2148 ADD sp, sp #(5 * 16)
2151 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2152 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2153 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2154 p
+= emit_msr (p
, NZCV
, x2
);
2155 p
+= emit_msr (p
, FPSR
, x1
);
2156 p
+= emit_msr (p
, FPCR
, x0
);
2158 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2160 /* Pop general purpose registers:
2164 LDR x30, [sp, #(30 * 16)]
2166 ADD sp, sp, #(31 * 16)
2169 for (i
= 0; i
<= 30; i
+= 1)
2170 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2171 offset_memory_operand (i
* 16));
2172 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2174 /* Pop SIMD&FP registers:
2178 LDP q30, q31, [sp, #(30 * 16)]
2180 ADD sp, sp, #(32 * 16)
2183 for (i
= 0; i
<= 30; i
+= 2)
2184 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2185 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2187 /* Write the code into the inferior memory. */
2188 append_insns (&buildaddr
, p
- buf
, buf
);
2190 /* Now emit the relocated instruction. */
2191 *adjusted_insn_addr
= buildaddr
;
2192 target_read_uint32 (tpaddr
, &insn
);
2194 insn_data
.base
.insn_addr
= tpaddr
;
2195 insn_data
.new_addr
= buildaddr
;
2196 insn_data
.insn_ptr
= buf
;
2198 aarch64_relocate_instruction (insn
, &visitor
,
2199 (struct aarch64_insn_data
*) &insn_data
);
2201 /* We may not have been able to relocate the instruction. */
2202 if (insn_data
.insn_ptr
== buf
)
2205 "E.Could not relocate instruction from %s to %s.",
2206 core_addr_to_string_nz (tpaddr
),
2207 core_addr_to_string_nz (buildaddr
));
2211 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2212 *adjusted_insn_addr_end
= buildaddr
;
2214 /* Go back to the start of the buffer. */
2217 /* Emit a branch back from the jump pad. */
2218 offset
= (tpaddr
+ orig_size
- buildaddr
);
2219 if (!can_encode_int32 (offset
, 28))
2222 "E.Jump back from jump pad too far from tracepoint "
2223 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2228 p
+= emit_b (p
, 0, offset
);
2229 append_insns (&buildaddr
, p
- buf
, buf
);
2231 /* Give the caller a branch instruction into the jump pad. */
2232 offset
= (*jump_entry
- tpaddr
);
2233 if (!can_encode_int32 (offset
, 28))
2236 "E.Jump pad too far from tracepoint "
2237 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2242 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2243 *jjump_pad_insn_size
= 4;
2245 /* Return the end address of our pad. */
2246 *jump_entry
= buildaddr
;
2251 /* Helper function writing LEN instructions from START into
2252 current_insn_ptr. */
2255 emit_ops_insns (const uint32_t *start
, int len
)
2257 CORE_ADDR buildaddr
= current_insn_ptr
;
2260 debug_printf ("Adding %d instrucions at %s\n",
2261 len
, paddress (buildaddr
));
2263 append_insns (&buildaddr
, len
, start
);
2264 current_insn_ptr
= buildaddr
;
2267 /* Pop a register from the stack. */
2270 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2272 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2275 /* Push a register on the stack. */
2278 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2280 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2283 /* Implementation of emit_ops method "emit_prologue". */
2286 aarch64_emit_prologue (void)
2291 /* This function emit a prologue for the following function prototype:
2293 enum eval_result_type f (unsigned char *regs,
2296 The first argument is a buffer of raw registers. The second
2297 argument is the result of
2298 evaluating the expression, which will be set to whatever is on top of
2299 the stack at the end.
2301 The stack set up by the prologue is as such:
2303 High *------------------------------------------------------*
2306 | x1 (ULONGEST *value) |
2307 | x0 (unsigned char *regs) |
2308 Low *------------------------------------------------------*
2310 As we are implementing a stack machine, each opcode can expand the
2311 stack so we never know how far we are from the data saved by this
2312 prologue. In order to be able refer to value and regs later, we save
2313 the current stack pointer in the frame pointer. This way, it is not
2314 clobbered when calling C functions.
2316 Finally, throughtout every operation, we are using register x0 as the
2317 top of the stack, and x1 as a scratch register. */
2319 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2320 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2321 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2323 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2326 emit_ops_insns (buf
, p
- buf
);
2329 /* Implementation of emit_ops method "emit_epilogue". */
2332 aarch64_emit_epilogue (void)
2337 /* Store the result of the expression (x0) in *value. */
2338 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2339 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2340 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2342 /* Restore the previous state. */
2343 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2344 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2346 /* Return expr_eval_no_error. */
2347 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2348 p
+= emit_ret (p
, lr
);
2350 emit_ops_insns (buf
, p
- buf
);
2353 /* Implementation of emit_ops method "emit_add". */
2356 aarch64_emit_add (void)
2361 p
+= emit_pop (p
, x1
);
2362 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2364 emit_ops_insns (buf
, p
- buf
);
2367 /* Implementation of emit_ops method "emit_sub". */
2370 aarch64_emit_sub (void)
2375 p
+= emit_pop (p
, x1
);
2376 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2378 emit_ops_insns (buf
, p
- buf
);
2381 /* Implementation of emit_ops method "emit_mul". */
2384 aarch64_emit_mul (void)
2389 p
+= emit_pop (p
, x1
);
2390 p
+= emit_mul (p
, x0
, x1
, x0
);
2392 emit_ops_insns (buf
, p
- buf
);
2395 /* Implementation of emit_ops method "emit_lsh". */
2398 aarch64_emit_lsh (void)
2403 p
+= emit_pop (p
, x1
);
2404 p
+= emit_lslv (p
, x0
, x1
, x0
);
2406 emit_ops_insns (buf
, p
- buf
);
2409 /* Implementation of emit_ops method "emit_rsh_signed". */
2412 aarch64_emit_rsh_signed (void)
2417 p
+= emit_pop (p
, x1
);
2418 p
+= emit_asrv (p
, x0
, x1
, x0
);
2420 emit_ops_insns (buf
, p
- buf
);
2423 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2426 aarch64_emit_rsh_unsigned (void)
2431 p
+= emit_pop (p
, x1
);
2432 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2434 emit_ops_insns (buf
, p
- buf
);
2437 /* Implementation of emit_ops method "emit_ext". */
2440 aarch64_emit_ext (int arg
)
2445 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2447 emit_ops_insns (buf
, p
- buf
);
2450 /* Implementation of emit_ops method "emit_log_not". */
2453 aarch64_emit_log_not (void)
2458 /* If the top of the stack is 0, replace it with 1. Else replace it with
2461 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2462 p
+= emit_cset (p
, x0
, EQ
);
2464 emit_ops_insns (buf
, p
- buf
);
2467 /* Implementation of emit_ops method "emit_bit_and". */
2470 aarch64_emit_bit_and (void)
2475 p
+= emit_pop (p
, x1
);
2476 p
+= emit_and (p
, x0
, x0
, x1
);
2478 emit_ops_insns (buf
, p
- buf
);
2481 /* Implementation of emit_ops method "emit_bit_or". */
2484 aarch64_emit_bit_or (void)
2489 p
+= emit_pop (p
, x1
);
2490 p
+= emit_orr (p
, x0
, x0
, x1
);
2492 emit_ops_insns (buf
, p
- buf
);
2495 /* Implementation of emit_ops method "emit_bit_xor". */
2498 aarch64_emit_bit_xor (void)
2503 p
+= emit_pop (p
, x1
);
2504 p
+= emit_eor (p
, x0
, x0
, x1
);
2506 emit_ops_insns (buf
, p
- buf
);
2509 /* Implementation of emit_ops method "emit_bit_not". */
2512 aarch64_emit_bit_not (void)
2517 p
+= emit_mvn (p
, x0
, x0
);
2519 emit_ops_insns (buf
, p
- buf
);
2522 /* Implementation of emit_ops method "emit_equal". */
2525 aarch64_emit_equal (void)
2530 p
+= emit_pop (p
, x1
);
2531 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2532 p
+= emit_cset (p
, x0
, EQ
);
2534 emit_ops_insns (buf
, p
- buf
);
2537 /* Implementation of emit_ops method "emit_less_signed". */
2540 aarch64_emit_less_signed (void)
2545 p
+= emit_pop (p
, x1
);
2546 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2547 p
+= emit_cset (p
, x0
, LT
);
2549 emit_ops_insns (buf
, p
- buf
);
2552 /* Implementation of emit_ops method "emit_less_unsigned". */
2555 aarch64_emit_less_unsigned (void)
2560 p
+= emit_pop (p
, x1
);
2561 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2562 p
+= emit_cset (p
, x0
, LO
);
2564 emit_ops_insns (buf
, p
- buf
);
2567 /* Implementation of emit_ops method "emit_ref". */
2570 aarch64_emit_ref (int size
)
2578 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2581 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2584 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2587 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2590 /* Unknown size, bail on compilation. */
2595 emit_ops_insns (buf
, p
- buf
);
2598 /* Implementation of emit_ops method "emit_if_goto". */
2601 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2606 /* The Z flag is set or cleared here. */
2607 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2608 /* This instruction must not change the Z flag. */
2609 p
+= emit_pop (p
, x0
);
2610 /* Branch over the next instruction if x0 == 0. */
2611 p
+= emit_bcond (p
, EQ
, 8);
2613 /* The NOP instruction will be patched with an unconditional branch. */
2615 *offset_p
= (p
- buf
) * 4;
2620 emit_ops_insns (buf
, p
- buf
);
2623 /* Implementation of emit_ops method "emit_goto". */
2626 aarch64_emit_goto (int *offset_p
, int *size_p
)
2631 /* The NOP instruction will be patched with an unconditional branch. */
2638 emit_ops_insns (buf
, p
- buf
);
2641 /* Implementation of emit_ops method "write_goto_address". */
2644 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2648 emit_b (&insn
, 0, to
- from
);
2649 append_insns (&from
, 1, &insn
);
2652 /* Implementation of emit_ops method "emit_const". */
2655 aarch64_emit_const (LONGEST num
)
2660 p
+= emit_mov_addr (p
, x0
, num
);
2662 emit_ops_insns (buf
, p
- buf
);
2665 /* Implementation of emit_ops method "emit_call". */
2668 aarch64_emit_call (CORE_ADDR fn
)
2673 p
+= emit_mov_addr (p
, ip0
, fn
);
2674 p
+= emit_blr (p
, ip0
);
2676 emit_ops_insns (buf
, p
- buf
);
2679 /* Implementation of emit_ops method "emit_reg". */
2682 aarch64_emit_reg (int reg
)
2687 /* Set x0 to unsigned char *regs. */
2688 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2689 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2690 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2692 emit_ops_insns (buf
, p
- buf
);
2694 aarch64_emit_call (get_raw_reg_func_addr ());
2697 /* Implementation of emit_ops method "emit_pop". */
2700 aarch64_emit_pop (void)
2705 p
+= emit_pop (p
, x0
);
2707 emit_ops_insns (buf
, p
- buf
);
2710 /* Implementation of emit_ops method "emit_stack_flush". */
2713 aarch64_emit_stack_flush (void)
2718 p
+= emit_push (p
, x0
);
2720 emit_ops_insns (buf
, p
- buf
);
2723 /* Implementation of emit_ops method "emit_zero_ext". */
2726 aarch64_emit_zero_ext (int arg
)
2731 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2733 emit_ops_insns (buf
, p
- buf
);
2736 /* Implementation of emit_ops method "emit_swap". */
2739 aarch64_emit_swap (void)
2744 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2745 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2746 p
+= emit_mov (p
, x0
, register_operand (x1
));
2748 emit_ops_insns (buf
, p
- buf
);
2751 /* Implementation of emit_ops method "emit_stack_adjust". */
2754 aarch64_emit_stack_adjust (int n
)
2756 /* This is not needed with our design. */
2760 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2762 emit_ops_insns (buf
, p
- buf
);
2765 /* Implementation of emit_ops method "emit_int_call_1". */
2768 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2773 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2775 emit_ops_insns (buf
, p
- buf
);
2777 aarch64_emit_call (fn
);
2780 /* Implementation of emit_ops method "emit_void_call_2". */
2783 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2788 /* Push x0 on the stack. */
2789 aarch64_emit_stack_flush ();
2791 /* Setup arguments for the function call:
2794 x1: top of the stack
2799 p
+= emit_mov (p
, x1
, register_operand (x0
));
2800 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2802 emit_ops_insns (buf
, p
- buf
);
2804 aarch64_emit_call (fn
);
2807 aarch64_emit_pop ();
2810 /* Implementation of emit_ops method "emit_eq_goto". */
2813 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
2818 p
+= emit_pop (p
, x1
);
2819 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2820 /* Branch over the next instruction if x0 != x1. */
2821 p
+= emit_bcond (p
, NE
, 8);
2822 /* The NOP instruction will be patched with an unconditional branch. */
2824 *offset_p
= (p
- buf
) * 4;
2829 emit_ops_insns (buf
, p
- buf
);
2832 /* Implementation of emit_ops method "emit_ne_goto". */
2835 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
2840 p
+= emit_pop (p
, x1
);
2841 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2842 /* Branch over the next instruction if x0 == x1. */
2843 p
+= emit_bcond (p
, EQ
, 8);
2844 /* The NOP instruction will be patched with an unconditional branch. */
2846 *offset_p
= (p
- buf
) * 4;
2851 emit_ops_insns (buf
, p
- buf
);
2854 /* Implementation of emit_ops method "emit_lt_goto". */
2857 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
2862 p
+= emit_pop (p
, x1
);
2863 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2864 /* Branch over the next instruction if x0 >= x1. */
2865 p
+= emit_bcond (p
, GE
, 8);
2866 /* The NOP instruction will be patched with an unconditional branch. */
2868 *offset_p
= (p
- buf
) * 4;
2873 emit_ops_insns (buf
, p
- buf
);
2876 /* Implementation of emit_ops method "emit_le_goto". */
2879 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
2884 p
+= emit_pop (p
, x1
);
2885 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2886 /* Branch over the next instruction if x0 > x1. */
2887 p
+= emit_bcond (p
, GT
, 8);
2888 /* The NOP instruction will be patched with an unconditional branch. */
2890 *offset_p
= (p
- buf
) * 4;
2895 emit_ops_insns (buf
, p
- buf
);
2898 /* Implementation of emit_ops method "emit_gt_goto". */
2901 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
2906 p
+= emit_pop (p
, x1
);
2907 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2908 /* Branch over the next instruction if x0 <= x1. */
2909 p
+= emit_bcond (p
, LE
, 8);
2910 /* The NOP instruction will be patched with an unconditional branch. */
2912 *offset_p
= (p
- buf
) * 4;
2917 emit_ops_insns (buf
, p
- buf
);
2920 /* Implementation of emit_ops method "emit_ge_got". */
2923 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
2928 p
+= emit_pop (p
, x1
);
2929 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2930 /* Branch over the next instruction if x0 <= x1. */
2931 p
+= emit_bcond (p
, LT
, 8);
2932 /* The NOP instruction will be patched with an unconditional branch. */
2934 *offset_p
= (p
- buf
) * 4;
2939 emit_ops_insns (buf
, p
- buf
);
2942 static struct emit_ops aarch64_emit_ops_impl
=
2944 aarch64_emit_prologue
,
2945 aarch64_emit_epilogue
,
2950 aarch64_emit_rsh_signed
,
2951 aarch64_emit_rsh_unsigned
,
2953 aarch64_emit_log_not
,
2954 aarch64_emit_bit_and
,
2955 aarch64_emit_bit_or
,
2956 aarch64_emit_bit_xor
,
2957 aarch64_emit_bit_not
,
2959 aarch64_emit_less_signed
,
2960 aarch64_emit_less_unsigned
,
2962 aarch64_emit_if_goto
,
2964 aarch64_write_goto_address
,
2969 aarch64_emit_stack_flush
,
2970 aarch64_emit_zero_ext
,
2972 aarch64_emit_stack_adjust
,
2973 aarch64_emit_int_call_1
,
2974 aarch64_emit_void_call_2
,
2975 aarch64_emit_eq_goto
,
2976 aarch64_emit_ne_goto
,
2977 aarch64_emit_lt_goto
,
2978 aarch64_emit_le_goto
,
2979 aarch64_emit_gt_goto
,
2980 aarch64_emit_ge_got
,
2983 /* Implementation of linux_target_ops method "emit_ops". */
2985 static struct emit_ops
*
2986 aarch64_emit_ops (void)
2988 return &aarch64_emit_ops_impl
;
2991 /* Implementation of linux_target_ops method
2992 "get_min_fast_tracepoint_insn_len". */
2995 aarch64_get_min_fast_tracepoint_insn_len (void)
3000 /* Implementation of linux_target_ops method "supports_range_stepping". */
3003 aarch64_supports_range_stepping (void)
3008 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
3010 static const gdb_byte
*
3011 aarch64_sw_breakpoint_from_kind (int kind
, int *size
)
3013 if (is_64bit_tdesc ())
3015 *size
= aarch64_breakpoint_len
;
3016 return aarch64_breakpoint
;
3019 return arm_sw_breakpoint_from_kind (kind
, size
);
3022 /* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
3025 aarch64_breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3027 if (is_64bit_tdesc ())
3028 return aarch64_breakpoint_len
;
3030 return arm_breakpoint_kind_from_pc (pcptr
);
3033 /* Implementation of the linux_target_ops method
3034 "breakpoint_kind_from_current_state". */
3037 aarch64_breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3039 if (is_64bit_tdesc ())
3040 return aarch64_breakpoint_len
;
3042 return arm_breakpoint_kind_from_current_state (pcptr
);
3045 /* Support for hardware single step. */
3048 aarch64_supports_hardware_single_step (void)
3053 struct linux_target_ops the_low_target
=
3057 NULL
, /* cannot_fetch_register */
3058 NULL
, /* cannot_store_register */
3059 NULL
, /* fetch_register */
3062 aarch64_breakpoint_kind_from_pc
,
3063 aarch64_sw_breakpoint_from_kind
,
3064 NULL
, /* get_next_pcs */
3065 0, /* decr_pc_after_break */
3066 aarch64_breakpoint_at
,
3067 aarch64_supports_z_point_type
,
3068 aarch64_insert_point
,
3069 aarch64_remove_point
,
3070 aarch64_stopped_by_watchpoint
,
3071 aarch64_stopped_data_address
,
3072 NULL
, /* collect_ptrace_register */
3073 NULL
, /* supply_ptrace_register */
3074 aarch64_linux_siginfo_fixup
,
3075 aarch64_linux_new_process
,
3076 aarch64_linux_delete_process
,
3077 aarch64_linux_new_thread
,
3078 aarch64_linux_delete_thread
,
3079 aarch64_linux_new_fork
,
3080 aarch64_linux_prepare_to_resume
,
3081 NULL
, /* process_qsupported */
3082 aarch64_supports_tracepoints
,
3083 aarch64_get_thread_area
,
3084 aarch64_install_fast_tracepoint_jump_pad
,
3086 aarch64_get_min_fast_tracepoint_insn_len
,
3087 aarch64_supports_range_stepping
,
3088 aarch64_breakpoint_kind_from_current_state
,
3089 aarch64_supports_hardware_single_step
,
3090 aarch64_get_syscall_trapinfo
,
3094 initialize_low_arch (void)
3096 initialize_low_arch_aarch32 ();
3098 initialize_regsets_info (&aarch64_regsets_info
);
3099 initialize_regsets_info (&aarch64_sve_regsets_info
);
3102 initialize_low_tdesc ();