1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2024 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
26 #include "gdbsupport/x86-xstate.h"
27 #include "nat/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
32 #include "arch/amd64-linux-tdesc.h"
34 #include "nat/i386-linux.h"
37 #include "arch/i386-linux-tdesc.h"
38 #include "arch/x86-linux-tdesc-features.h"
40 #include "gdb_proc_service.h"
41 /* Don't include elf/common.h if linux/elf.h got included by
42 gdb_proc_service.h. */
44 #include "elf/common.h"
47 #include "gdbsupport/agent.h"
49 #include "tracepoint.h"
51 #include "nat/linux-nat.h"
52 #include "nat/x86-linux.h"
53 #include "nat/x86-linux-dregs.h"
54 #include "nat/x86-linux-tdesc.h"
57 static target_desc_up tdesc_amd64_linux_no_xml
;
59 static target_desc_up tdesc_i386_linux_no_xml
;
62 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
63 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
65 /* Backward compatibility for gdb without XML support. */
67 static const char xmltarget_i386_linux_no_xml
[] = "@<target>\
68 <architecture>i386</architecture>\
69 <osabi>GNU/Linux</osabi>\
73 static const char xmltarget_amd64_linux_no_xml
[] = "@<target>\
74 <architecture>i386:x86-64</architecture>\
75 <osabi>GNU/Linux</osabi>\
80 #include <sys/procfs.h>
83 #ifndef PTRACE_GET_THREAD_AREA
84 #define PTRACE_GET_THREAD_AREA 25
87 /* This definition comes from prctl.h, but some kernels may not have it. */
88 #ifndef PTRACE_ARCH_PRCTL
89 #define PTRACE_ARCH_PRCTL 30
92 /* The following definitions come from prctl.h, but may be absent
93 for certain configurations. */
95 #define ARCH_SET_GS 0x1001
96 #define ARCH_SET_FS 0x1002
97 #define ARCH_GET_FS 0x1003
98 #define ARCH_GET_GS 0x1004
101 /* Linux target op definitions for the x86 architecture.
102 This is initialized assuming an amd64 target.
103 'low_arch_setup' will correct it for i386 or amd64 targets. */
105 class x86_target
: public linux_process_target
109 const regs_info
*get_regs_info () override
;
111 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
113 bool supports_z_point_type (char z_type
) override
;
115 void process_qsupported (gdb::array_view
<const char * const> features
) override
;
117 bool supports_tracepoints () override
;
119 bool supports_fast_tracepoints () override
;
121 int install_fast_tracepoint_jump_pad
122 (CORE_ADDR tpoint
, CORE_ADDR tpaddr
, CORE_ADDR collector
,
123 CORE_ADDR lockaddr
, ULONGEST orig_size
, CORE_ADDR
*jump_entry
,
124 CORE_ADDR
*trampoline
, ULONGEST
*trampoline_size
,
125 unsigned char *jjump_pad_insn
, ULONGEST
*jjump_pad_insn_size
,
126 CORE_ADDR
*adjusted_insn_addr
, CORE_ADDR
*adjusted_insn_addr_end
,
129 int get_min_fast_tracepoint_insn_len () override
;
131 struct emit_ops
*emit_ops () override
;
133 int get_ipa_tdesc_idx () override
;
137 void low_arch_setup () override
;
139 bool low_cannot_fetch_register (int regno
) override
;
141 bool low_cannot_store_register (int regno
) override
;
143 bool low_supports_breakpoints () override
;
145 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
147 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
149 int low_decr_pc_after_break () override
;
151 bool low_breakpoint_at (CORE_ADDR pc
) override
;
153 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
154 int size
, raw_breakpoint
*bp
) override
;
156 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
157 int size
, raw_breakpoint
*bp
) override
;
159 bool low_stopped_by_watchpoint () override
;
161 CORE_ADDR
low_stopped_data_address () override
;
163 /* collect_ptrace_register/supply_ptrace_register are not needed in the
164 native i386 case (no registers smaller than an xfer unit), and are not
165 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
167 /* Need to fix up i386 siginfo if host is amd64. */
168 bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
169 int direction
) override
;
171 arch_process_info
*low_new_process () override
;
173 void low_delete_process (arch_process_info
*info
) override
;
175 void low_new_thread (lwp_info
*) override
;
177 void low_delete_thread (arch_lwp_info
*) override
;
179 void low_new_fork (process_info
*parent
, process_info
*child
) override
;
181 void low_prepare_to_resume (lwp_info
*lwp
) override
;
183 int low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
) override
;
185 bool low_supports_range_stepping () override
;
187 bool low_supports_catch_syscall () override
;
189 void low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
) override
;
193 /* Update all the target description of all processes; a new GDB
194 connected, and it may or not support xml target descriptions. */
195 void update_xmltarget ();
198 /* The singleton target ops object. */
200 static x86_target the_x86_target
;
202 /* Per-process arch-specific data we want to keep. */
204 struct arch_process_info
206 struct x86_debug_reg_state debug_reg_state
;
211 /* Mapping between the general-purpose registers in `struct user'
212 format and GDB's register array layout.
213 Note that the transfer layout uses 64-bit regs. */
214 static /*const*/ int i386_regmap
[] =
216 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
217 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
218 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
219 DS
* 8, ES
* 8, FS
* 8, GS
* 8
222 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
224 /* So code below doesn't have to care, i386 or amd64. */
225 #define ORIG_EAX ORIG_RAX
228 static const int x86_64_regmap
[] =
230 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
231 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
232 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
233 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
234 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
235 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
236 -1, -1, -1, -1, -1, -1, -1, -1,
237 -1, -1, -1, -1, -1, -1, -1, -1,
238 -1, -1, -1, -1, -1, -1, -1, -1,
240 -1, -1, -1, -1, -1, -1, -1, -1,
243 /* MPX is deprecated. Yet we keep this to not give the registers below
244 a new number. That could break older gdbs. */
245 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
246 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
247 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
248 -1, -1, -1, -1, -1, -1, -1, -1,
249 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
250 -1, -1, -1, -1, -1, -1, -1, -1,
251 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
252 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
253 -1, -1, -1, -1, -1, -1, -1, -1,
254 -1, -1, -1, -1, -1, -1, -1, -1,
255 -1, -1, -1, -1, -1, -1, -1, -1,
259 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
260 #define X86_64_USER_REGS (GS + 1)
262 #else /* ! __x86_64__ */
264 /* Mapping between the general-purpose registers in `struct user'
265 format and GDB's register array layout. */
266 static /*const*/ int i386_regmap
[] =
268 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
269 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
270 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
271 DS
* 4, ES
* 4, FS
* 4, GS
* 4
274 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
282 /* Returns true if THREAD belongs to a x86-64 process, per the tdesc. */
285 is_64bit_tdesc (thread_info
*thread
)
287 return register_size (thread
->process ()->tdesc
, 0) == 8;
293 /* Called by libthread_db. */
296 ps_get_thread_area (struct ps_prochandle
*ph
,
297 lwpid_t lwpid
, int idx
, void **base
)
300 lwp_info
*lwp
= find_lwp_pid (ptid_t (lwpid
));
301 gdb_assert (lwp
!= nullptr);
302 int use_64bit
= is_64bit_tdesc (lwp
->thread
);
309 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
313 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
324 unsigned int desc
[4];
326 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
327 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
330 /* Ensure we properly extend the value to 64-bits for x86_64. */
331 *base
= (void *) (uintptr_t) desc
[1];
336 /* Get the thread area address. This is used to recognize which
337 thread is which when tracing with the in-process agent library. We
338 don't read anything from the address, and treat it as opaque; it's
339 the address itself that we assume is unique per-thread. */
342 x86_target::low_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
344 lwp_info
*lwp
= find_lwp_pid (ptid_t (lwpid
));
345 gdb_assert (lwp
!= nullptr);
347 int use_64bit
= is_64bit_tdesc (lwp
->thread
);
352 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
354 *addr
= (CORE_ADDR
) (uintptr_t) base
;
363 thread_info
*thr
= lwp
->thread
;
364 regcache
*regcache
= get_thread_regcache (thr
);
365 unsigned int desc
[4];
367 const int reg_thread_area
= 3; /* bits to scale down register value. */
370 collect_register_by_name (regcache
, "gs", &gs
);
372 idx
= gs
>> reg_thread_area
;
374 if (ptrace (PTRACE_GET_THREAD_AREA
,
376 (void *) (long) idx
, (unsigned long) &desc
) < 0)
387 x86_target::low_cannot_store_register (int regno
)
390 if (is_64bit_tdesc (current_thread
))
394 return regno
>= I386_NUM_REGS
;
398 x86_target::low_cannot_fetch_register (int regno
)
401 if (is_64bit_tdesc (current_thread
))
405 return regno
>= I386_NUM_REGS
;
409 collect_register_i386 (struct regcache
*regcache
, int regno
, void *buf
)
411 collect_register (regcache
, regno
, buf
);
414 /* In case of x86_64 -m32, collect_register only writes 4 bytes, but the
415 space reserved in buf for the register is 8 bytes. Make sure the entire
416 reserved space is initialized. */
418 gdb_assert (register_size (regcache
->tdesc
, regno
) == 4);
422 /* Sign extend EAX value to avoid potential syscall restart
425 See amd64_linux_collect_native_gregset() in
426 gdb/amd64-linux-nat.c for a detailed explanation. */
427 *(int64_t *) buf
= *(int32_t *) buf
;
432 *(uint64_t *) buf
= *(uint32_t *) buf
;
438 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
443 if (register_size (regcache
->tdesc
, 0) == 8)
445 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
446 if (x86_64_regmap
[i
] != -1)
447 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
453 for (i
= 0; i
< I386_NUM_REGS
; i
++)
454 collect_register_i386 (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
456 /* Handle ORIG_EAX, which is not in i386_regmap. */
457 collect_register_i386 (regcache
, find_regno (regcache
->tdesc
, "orig_eax"),
458 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
462 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
467 if (register_size (regcache
->tdesc
, 0) == 8)
469 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
470 if (x86_64_regmap
[i
] != -1)
471 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
477 for (i
= 0; i
< I386_NUM_REGS
; i
++)
478 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
480 supply_register_by_name (regcache
, "orig_eax",
481 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
485 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
488 i387_cache_to_fxsave (regcache
, buf
);
490 i387_cache_to_fsave (regcache
, buf
);
495 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
498 i387_fxsave_to_cache (regcache
, buf
);
500 i387_fsave_to_cache (regcache
, buf
);
507 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
509 i387_cache_to_fxsave (regcache
, buf
);
513 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
515 i387_fxsave_to_cache (regcache
, buf
);
521 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
523 i387_cache_to_xsave (regcache
, buf
);
527 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
529 i387_xsave_to_cache (regcache
, buf
);
532 /* ??? The non-biarch i386 case stores all the i387 regs twice.
533 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
534 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
535 doesn't work. IWBN to avoid the duplication in the case where it
536 does work. Maybe the arch_setup routine could check whether it works
537 and update the supported regsets accordingly. */
539 static struct regset_info x86_regsets
[] =
541 #ifdef HAVE_PTRACE_GETREGS
542 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
544 x86_fill_gregset
, x86_store_gregset
},
545 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
546 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
548 # ifdef HAVE_PTRACE_GETFPXREGS
549 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
551 x86_fill_fpxregset
, x86_store_fpxregset
},
554 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
556 x86_fill_fpregset
, x86_store_fpregset
},
557 #endif /* HAVE_PTRACE_GETREGS */
562 x86_target::low_supports_breakpoints ()
568 x86_target::low_get_pc (regcache
*regcache
)
570 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
576 collect_register_by_name (regcache
, "rip", &pc
);
577 return (CORE_ADDR
) pc
;
583 collect_register_by_name (regcache
, "eip", &pc
);
584 return (CORE_ADDR
) pc
;
589 x86_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
591 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
597 supply_register_by_name (regcache
, "rip", &newpc
);
603 supply_register_by_name (regcache
, "eip", &newpc
);
608 x86_target::low_decr_pc_after_break ()
614 static const gdb_byte x86_breakpoint
[] = { 0xCC };
615 #define x86_breakpoint_len 1
618 x86_target::low_breakpoint_at (CORE_ADDR pc
)
622 read_memory (pc
, &c
, 1);
629 /* Low-level function vector. */
630 struct x86_dr_low_type x86_dr_low
=
632 x86_linux_dr_set_control
,
633 x86_linux_dr_set_addr
,
634 x86_linux_dr_get_addr
,
635 x86_linux_dr_get_status
,
636 x86_linux_dr_get_control
,
640 /* Breakpoint/Watchpoint support. */
643 x86_target::supports_z_point_type (char z_type
)
649 case Z_PACKET_WRITE_WP
:
650 case Z_PACKET_ACCESS_WP
:
658 x86_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
659 int size
, raw_breakpoint
*bp
)
661 struct process_info
*proc
= current_process ();
665 case raw_bkpt_type_hw
:
666 case raw_bkpt_type_write_wp
:
667 case raw_bkpt_type_access_wp
:
669 enum target_hw_bp_type hw_type
670 = raw_bkpt_type_to_target_hw_bp_type (type
);
671 struct x86_debug_reg_state
*state
672 = &proc
->priv
->arch_private
->debug_reg_state
;
674 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
684 x86_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
685 int size
, raw_breakpoint
*bp
)
687 struct process_info
*proc
= current_process ();
691 case raw_bkpt_type_hw
:
692 case raw_bkpt_type_write_wp
:
693 case raw_bkpt_type_access_wp
:
695 enum target_hw_bp_type hw_type
696 = raw_bkpt_type_to_target_hw_bp_type (type
);
697 struct x86_debug_reg_state
*state
698 = &proc
->priv
->arch_private
->debug_reg_state
;
700 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
709 x86_target::low_stopped_by_watchpoint ()
711 struct process_info
*proc
= current_process ();
712 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
716 x86_target::low_stopped_data_address ()
718 struct process_info
*proc
= current_process ();
720 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
726 /* Called when a new process is created. */
729 x86_target::low_new_process ()
731 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
733 x86_low_init_dregs (&info
->debug_reg_state
);
738 /* Called when a process is being deleted. */
741 x86_target::low_delete_process (arch_process_info
*info
)
747 x86_target::low_new_thread (lwp_info
*lwp
)
749 /* This comes from nat/. */
750 x86_linux_new_thread (lwp
);
754 x86_target::low_delete_thread (arch_lwp_info
*alwp
)
756 /* This comes from nat/. */
757 x86_linux_delete_thread (alwp
);
760 /* Target routine for new_fork. */
763 x86_target::low_new_fork (process_info
*parent
, process_info
*child
)
765 /* These are allocated by linux_add_process. */
766 gdb_assert (parent
->priv
!= NULL
767 && parent
->priv
->arch_private
!= NULL
);
768 gdb_assert (child
->priv
!= NULL
769 && child
->priv
->arch_private
!= NULL
);
771 /* Linux kernel before 2.6.33 commit
772 72f674d203cd230426437cdcf7dd6f681dad8b0d
773 will inherit hardware debug registers from parent
774 on fork/vfork/clone. Newer Linux kernels create such tasks with
775 zeroed debug registers.
777 GDB core assumes the child inherits the watchpoints/hw
778 breakpoints of the parent, and will remove them all from the
779 forked off process. Copy the debug registers mirrors into the
780 new process so that all breakpoints and watchpoints can be
781 removed together. The debug registers mirror will become zeroed
782 in the end before detaching the forked off process, thus making
783 this compatible with older Linux kernels too. */
785 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
789 x86_target::low_prepare_to_resume (lwp_info
*lwp
)
791 /* This comes from nat/. */
792 x86_linux_prepare_to_resume (lwp
);
795 /* See nat/x86-dregs.h. */
797 struct x86_debug_reg_state
*
798 x86_debug_reg_state (pid_t pid
)
800 struct process_info
*proc
= find_process_pid (pid
);
802 return &proc
->priv
->arch_private
->debug_reg_state
;
805 /* When GDBSERVER is built as a 64-bit application on linux, the
806 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
807 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
808 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
809 conversion in-place ourselves. */
811 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
812 layout of the inferiors' architecture. Returns true if any
813 conversion was done; false otherwise. If DIRECTION is 1, then copy
814 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
818 x86_target::low_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
821 unsigned int machine
;
822 int tid
= current_thread
->id
.lwp ();
823 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
825 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
826 if (!is_64bit_tdesc (current_thread
))
827 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
829 /* No fixup for native x32 GDB. */
830 else if (!is_elf64
&& sizeof (void *) == 8)
831 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
840 /* Get Linux/x86 target description from running target. */
842 static const struct target_desc
*
843 x86_linux_read_description ()
845 int tid
= current_thread
->id
.lwp ();
847 /* If we are not allowed to send an XML target description then we need
848 to use the hard-wired target descriptions. This corresponds to GDB's
849 default machine for x86.
851 This check needs to occur before any returns statements that might
852 generate some alternative target descriptions. */
855 x86_linux_arch_size arch_size
= x86_linux_ptrace_get_arch_size (tid
);
856 bool is_64bit
= arch_size
.is_64bit ();
857 bool is_x32
= arch_size
.is_x32 ();
859 if (sizeof (void *) == 4 && is_64bit
&& !is_x32
)
860 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
863 if (is_64bit
&& !is_x32
)
864 return tdesc_amd64_linux_no_xml
.get ();
867 return tdesc_i386_linux_no_xml
.get ();
870 /* If have_ptrace_getregset is changed to true by calling
871 x86_linux_tdesc_for_tid then we will perform some additional
873 bool have_ptrace_getregset_was_unknown
874 = have_ptrace_getregset
== TRIBOOL_UNKNOWN
;
876 /* Get pointers to where we should store the xcr0 and xsave_layout
877 values. These will be filled in by x86_linux_tdesc_for_tid the first
878 time that the function is called. Subsequent calls will not modify
879 the stored values. */
880 std::pair
<uint64_t *, x86_xsave_layout
*> storage
881 = i387_get_xsave_storage ();
883 const target_desc
*tdesc
884 = x86_linux_tdesc_for_tid (tid
, storage
.first
, storage
.second
);
886 if (have_ptrace_getregset_was_unknown
887 && have_ptrace_getregset
== TRIBOOL_TRUE
)
889 int xsave_len
= x86_xsave_length ();
891 /* Use PTRACE_GETREGSET if it is available. */
892 for (regset_info
*regset
= x86_regsets
;
893 regset
->fill_function
!= nullptr;
896 if (regset
->get_request
== PTRACE_GETREGSET
)
897 regset
->size
= xsave_len
;
898 else if (regset
->type
!= GENERAL_REGS
)
906 /* Update all the target description of all processes; a new GDB
907 connected, and it may or not support xml target descriptions. */
910 x86_target::update_xmltarget ()
912 scoped_restore_current_thread restore_thread
;
914 /* Before changing the register cache's internal layout, flush the
915 contents of the current valid caches back to the threads, and
916 release the current regcache objects. */
919 for_each_process ([this] (process_info
*proc
) {
922 /* Look up any thread of this process. */
923 switch_to_thread (find_any_thread_of_pid (pid
));
929 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
933 x86_target::process_qsupported (gdb::array_view
<const char * const> features
)
935 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
936 with "i386" in qSupported query, it supports x86 XML target
940 for (const char *feature
: features
)
942 if (startswith (feature
, "xmlRegisters="))
944 char *copy
= xstrdup (feature
+ 13);
947 for (char *p
= strtok_r (copy
, ",", &saveptr
);
949 p
= strtok_r (NULL
, ",", &saveptr
))
951 if (strcmp (p
, "i386") == 0)
965 /* Common for x86/x86-64. */
967 static struct regsets_info x86_regsets_info
=
969 x86_regsets
, /* regsets */
971 NULL
, /* disabled_regsets */
975 static struct regs_info amd64_linux_regs_info
=
977 NULL
, /* regset_bitmap */
978 NULL
, /* usrregs_info */
982 static struct usrregs_info i386_linux_usrregs_info
=
988 static struct regs_info i386_linux_regs_info
=
990 NULL
, /* regset_bitmap */
991 &i386_linux_usrregs_info
,
996 x86_target::get_regs_info ()
999 if (is_64bit_tdesc (current_thread
))
1000 return &amd64_linux_regs_info
;
1003 return &i386_linux_regs_info
;
1006 /* Initialize the target description for the architecture of the
1010 x86_target::low_arch_setup ()
1012 current_process ()->tdesc
= x86_linux_read_description ();
1016 x86_target::low_supports_catch_syscall ()
1021 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1022 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1025 x86_target::low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
)
1027 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1033 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
1034 *sysno
= (int) l_sysno
;
1037 collect_register_by_name (regcache
, "orig_eax", sysno
);
1041 x86_target::supports_tracepoints ()
1047 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1049 target_write_memory (*to
, buf
, len
);
1054 push_opcode (unsigned char *buf
, const char *op
)
1056 unsigned char *buf_org
= buf
;
1061 unsigned long ul
= strtoul (op
, &endptr
, 16);
1070 return buf
- buf_org
;
1075 /* Build a jump pad that saves registers and calls a collection
1076 function. Writes a jump instruction to the jump pad to
1077 JJUMPAD_INSN. The caller is responsible to write it in at the
1078 tracepoint address. */
1081 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1082 CORE_ADDR collector
,
1085 CORE_ADDR
*jump_entry
,
1086 CORE_ADDR
*trampoline
,
1087 ULONGEST
*trampoline_size
,
1088 unsigned char *jjump_pad_insn
,
1089 ULONGEST
*jjump_pad_insn_size
,
1090 CORE_ADDR
*adjusted_insn_addr
,
1091 CORE_ADDR
*adjusted_insn_addr_end
,
1094 unsigned char buf
[40];
1098 CORE_ADDR buildaddr
= *jump_entry
;
1100 /* Build the jump pad. */
1102 /* First, do tracepoint data collection. Save registers. */
1104 /* Need to ensure stack pointer saved first. */
1105 buf
[i
++] = 0x54; /* push %rsp */
1106 buf
[i
++] = 0x55; /* push %rbp */
1107 buf
[i
++] = 0x57; /* push %rdi */
1108 buf
[i
++] = 0x56; /* push %rsi */
1109 buf
[i
++] = 0x52; /* push %rdx */
1110 buf
[i
++] = 0x51; /* push %rcx */
1111 buf
[i
++] = 0x53; /* push %rbx */
1112 buf
[i
++] = 0x50; /* push %rax */
1113 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1114 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1115 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1116 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1117 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1118 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1119 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1120 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1121 buf
[i
++] = 0x9c; /* pushfq */
1122 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1124 memcpy (buf
+ i
, &tpaddr
, 8);
1126 buf
[i
++] = 0x57; /* push %rdi */
1127 append_insns (&buildaddr
, i
, buf
);
1129 /* Stack space for the collecting_t object. */
1131 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1132 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1133 memcpy (buf
+ i
, &tpoint
, 8);
1135 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1136 i
+= push_opcode (&buf
[i
],
1137 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1138 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1139 append_insns (&buildaddr
, i
, buf
);
1143 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1144 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1146 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1147 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1148 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1149 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1150 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1151 append_insns (&buildaddr
, i
, buf
);
1153 /* Set up the gdb_collect call. */
1154 /* At this point, (stack pointer + 0x18) is the base of our saved
1158 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1159 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1161 /* tpoint address may be 64-bit wide. */
1162 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1163 memcpy (buf
+ i
, &tpoint
, 8);
1165 append_insns (&buildaddr
, i
, buf
);
1167 /* The collector function being in the shared library, may be
1168 >31-bits away off the jump pad. */
1170 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1171 memcpy (buf
+ i
, &collector
, 8);
1173 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1174 append_insns (&buildaddr
, i
, buf
);
1176 /* Clear the spin-lock. */
1178 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1179 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1180 memcpy (buf
+ i
, &lockaddr
, 8);
1182 append_insns (&buildaddr
, i
, buf
);
1184 /* Remove stack that had been used for the collect_t object. */
1186 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1187 append_insns (&buildaddr
, i
, buf
);
1189 /* Restore register state. */
1191 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1195 buf
[i
++] = 0x9d; /* popfq */
1196 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1197 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1198 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1199 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1200 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1201 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1202 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1203 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1204 buf
[i
++] = 0x58; /* pop %rax */
1205 buf
[i
++] = 0x5b; /* pop %rbx */
1206 buf
[i
++] = 0x59; /* pop %rcx */
1207 buf
[i
++] = 0x5a; /* pop %rdx */
1208 buf
[i
++] = 0x5e; /* pop %rsi */
1209 buf
[i
++] = 0x5f; /* pop %rdi */
1210 buf
[i
++] = 0x5d; /* pop %rbp */
1211 buf
[i
++] = 0x5c; /* pop %rsp */
1212 append_insns (&buildaddr
, i
, buf
);
1214 /* Now, adjust the original instruction to execute in the jump
1216 *adjusted_insn_addr
= buildaddr
;
1217 relocate_instruction (&buildaddr
, tpaddr
);
1218 *adjusted_insn_addr_end
= buildaddr
;
1220 /* Finally, write a jump back to the program. */
1222 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1223 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1226 "E.Jump back from jump pad too far from tracepoint "
1227 "(offset 0x%" PRIx64
" > int32).", loffset
);
1231 offset
= (int) loffset
;
1232 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1233 memcpy (buf
+ 1, &offset
, 4);
1234 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1236 /* The jump pad is now built. Wire in a jump to our jump pad. This
1237 is always done last (by our caller actually), so that we can
1238 install fast tracepoints with threads running. This relies on
1239 the agent's atomic write support. */
1240 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1241 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1244 "E.Jump pad too far from tracepoint "
1245 "(offset 0x%" PRIx64
" > int32).", loffset
);
1249 offset
= (int) loffset
;
1251 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1252 memcpy (buf
+ 1, &offset
, 4);
1253 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1254 *jjump_pad_insn_size
= sizeof (jump_insn
);
1256 /* Return the end address of our pad. */
1257 *jump_entry
= buildaddr
;
1262 #endif /* __x86_64__ */
1264 /* Build a jump pad that saves registers and calls a collection
1265 function. Writes a jump instruction to the jump pad to
1266 JJUMPAD_INSN. The caller is responsible to write it in at the
1267 tracepoint address. */
1270 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1271 CORE_ADDR collector
,
1274 CORE_ADDR
*jump_entry
,
1275 CORE_ADDR
*trampoline
,
1276 ULONGEST
*trampoline_size
,
1277 unsigned char *jjump_pad_insn
,
1278 ULONGEST
*jjump_pad_insn_size
,
1279 CORE_ADDR
*adjusted_insn_addr
,
1280 CORE_ADDR
*adjusted_insn_addr_end
,
1283 unsigned char buf
[0x100];
1285 CORE_ADDR buildaddr
= *jump_entry
;
1287 /* Build the jump pad. */
1289 /* First, do tracepoint data collection. Save registers. */
1291 buf
[i
++] = 0x60; /* pushad */
1292 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1293 *((int *)(buf
+ i
)) = (int) tpaddr
;
1295 buf
[i
++] = 0x9c; /* pushf */
1296 buf
[i
++] = 0x1e; /* push %ds */
1297 buf
[i
++] = 0x06; /* push %es */
1298 buf
[i
++] = 0x0f; /* push %fs */
1300 buf
[i
++] = 0x0f; /* push %gs */
1302 buf
[i
++] = 0x16; /* push %ss */
1303 buf
[i
++] = 0x0e; /* push %cs */
1304 append_insns (&buildaddr
, i
, buf
);
1306 /* Stack space for the collecting_t object. */
1308 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1310 /* Build the object. */
1311 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1312 memcpy (buf
+ i
, &tpoint
, 4);
1314 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1316 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1317 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1318 append_insns (&buildaddr
, i
, buf
);
1320 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1321 If we cared for it, this could be using xchg alternatively. */
1324 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1325 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1327 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1329 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1330 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1331 append_insns (&buildaddr
, i
, buf
);
1334 /* Set up arguments to the gdb_collect call. */
1336 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1337 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1338 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1339 append_insns (&buildaddr
, i
, buf
);
1342 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1343 append_insns (&buildaddr
, i
, buf
);
1346 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1347 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1349 append_insns (&buildaddr
, i
, buf
);
1351 buf
[0] = 0xe8; /* call <reladdr> */
1352 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1353 memcpy (buf
+ 1, &offset
, 4);
1354 append_insns (&buildaddr
, 5, buf
);
1355 /* Clean up after the call. */
1356 buf
[0] = 0x83; /* add $0x8,%esp */
1359 append_insns (&buildaddr
, 3, buf
);
1362 /* Clear the spin-lock. This would need the LOCK prefix on older
1365 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1366 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1367 memcpy (buf
+ i
, &lockaddr
, 4);
1369 append_insns (&buildaddr
, i
, buf
);
1372 /* Remove stack that had been used for the collect_t object. */
1374 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1375 append_insns (&buildaddr
, i
, buf
);
1378 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1381 buf
[i
++] = 0x17; /* pop %ss */
1382 buf
[i
++] = 0x0f; /* pop %gs */
1384 buf
[i
++] = 0x0f; /* pop %fs */
1386 buf
[i
++] = 0x07; /* pop %es */
1387 buf
[i
++] = 0x1f; /* pop %ds */
1388 buf
[i
++] = 0x9d; /* popf */
1389 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1392 buf
[i
++] = 0x61; /* popad */
1393 append_insns (&buildaddr
, i
, buf
);
1395 /* Now, adjust the original instruction to execute in the jump
1397 *adjusted_insn_addr
= buildaddr
;
1398 relocate_instruction (&buildaddr
, tpaddr
);
1399 *adjusted_insn_addr_end
= buildaddr
;
1401 /* Write the jump back to the program. */
1402 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1403 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1404 memcpy (buf
+ 1, &offset
, 4);
1405 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1407 /* The jump pad is now built. Wire in a jump to our jump pad. This
1408 is always done last (by our caller actually), so that we can
1409 install fast tracepoints with threads running. This relies on
1410 the agent's atomic write support. */
1413 /* Create a trampoline. */
1414 *trampoline_size
= sizeof (jump_insn
);
1415 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1417 /* No trampoline space available. */
1419 "E.Cannot allocate trampoline space needed for fast "
1420 "tracepoints on 4-byte instructions.");
1424 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1425 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1426 memcpy (buf
+ 1, &offset
, 4);
1427 target_write_memory (*trampoline
, buf
, sizeof (jump_insn
));
1429 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1430 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1431 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1432 memcpy (buf
+ 2, &offset
, 2);
1433 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1434 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1438 /* Else use a 32-bit relative jump instruction. */
1439 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1440 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1441 memcpy (buf
+ 1, &offset
, 4);
1442 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1443 *jjump_pad_insn_size
= sizeof (jump_insn
);
1446 /* Return the end address of our pad. */
1447 *jump_entry
= buildaddr
;
1453 x86_target::supports_fast_tracepoints ()
1459 x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1461 CORE_ADDR collector
,
1464 CORE_ADDR
*jump_entry
,
1465 CORE_ADDR
*trampoline
,
1466 ULONGEST
*trampoline_size
,
1467 unsigned char *jjump_pad_insn
,
1468 ULONGEST
*jjump_pad_insn_size
,
1469 CORE_ADDR
*adjusted_insn_addr
,
1470 CORE_ADDR
*adjusted_insn_addr_end
,
1474 if (is_64bit_tdesc (current_thread
))
1475 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1476 collector
, lockaddr
,
1477 orig_size
, jump_entry
,
1478 trampoline
, trampoline_size
,
1480 jjump_pad_insn_size
,
1482 adjusted_insn_addr_end
,
1486 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1487 collector
, lockaddr
,
1488 orig_size
, jump_entry
,
1489 trampoline
, trampoline_size
,
1491 jjump_pad_insn_size
,
1493 adjusted_insn_addr_end
,
1497 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1501 x86_target::get_min_fast_tracepoint_insn_len ()
1503 static int warned_about_fast_tracepoints
= 0;
1506 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1507 used for fast tracepoints. */
1508 if (is_64bit_tdesc (current_thread
))
1512 if (agent_loaded_p ())
1514 char errbuf
[IPA_BUFSIZ
];
1518 /* On x86, if trampolines are available, then 4-byte jump instructions
1519 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1520 with a 4-byte offset are used instead. */
1521 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1525 /* GDB has no channel to explain to user why a shorter fast
1526 tracepoint is not possible, but at least make GDBserver
1527 mention that something has gone awry. */
1528 if (!warned_about_fast_tracepoints
)
1530 warning ("4-byte fast tracepoints not available; %s", errbuf
);
1531 warned_about_fast_tracepoints
= 1;
1538 /* Indicate that the minimum length is currently unknown since the IPA
1539 has not loaded yet. */
1545 add_insns (unsigned char *start
, int len
)
1547 CORE_ADDR buildaddr
= current_insn_ptr
;
1549 threads_debug_printf ("Adding %d bytes of insn at %s",
1550 len
, paddress (buildaddr
));
1552 append_insns (&buildaddr
, len
, start
);
1553 current_insn_ptr
= buildaddr
;
1556 /* Our general strategy for emitting code is to avoid specifying raw
1557 bytes whenever possible, and instead copy a block of inline asm
1558 that is embedded in the function. This is a little messy, because
1559 we need to keep the compiler from discarding what looks like dead
1560 code, plus suppress various warnings. */
1562 #define EMIT_ASM(NAME, INSNS) \
1565 extern unsigned char start_ ## NAME, end_ ## NAME; \
1566 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1567 __asm__ ("jmp end_" #NAME "\n" \
1568 "\t" "start_" #NAME ":" \
1570 "\t" "end_" #NAME ":"); \
1575 #define EMIT_ASM32(NAME,INSNS) \
1578 extern unsigned char start_ ## NAME, end_ ## NAME; \
1579 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1580 __asm__ (".code32\n" \
1581 "\t" "jmp end_" #NAME "\n" \
1582 "\t" "start_" #NAME ":\n" \
1584 "\t" "end_" #NAME ":\n" \
1590 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1597 amd64_emit_prologue (void)
1599 EMIT_ASM (amd64_prologue
,
1601 "movq %rsp,%rbp\n\t"
1602 "sub $0x20,%rsp\n\t"
1603 "movq %rdi,-8(%rbp)\n\t"
1604 "movq %rsi,-16(%rbp)");
1609 amd64_emit_epilogue (void)
1611 EMIT_ASM (amd64_epilogue
,
1612 "movq -16(%rbp),%rdi\n\t"
1613 "movq %rax,(%rdi)\n\t"
1620 amd64_emit_add (void)
1622 EMIT_ASM (amd64_add
,
1623 "add (%rsp),%rax\n\t"
1624 "lea 0x8(%rsp),%rsp");
1628 amd64_emit_sub (void)
1630 EMIT_ASM (amd64_sub
,
1631 "sub %rax,(%rsp)\n\t"
1636 amd64_emit_mul (void)
1642 amd64_emit_lsh (void)
1648 amd64_emit_rsh_signed (void)
1654 amd64_emit_rsh_unsigned (void)
1660 amd64_emit_ext (int arg
)
1665 EMIT_ASM (amd64_ext_8
,
1671 EMIT_ASM (amd64_ext_16
,
1676 EMIT_ASM (amd64_ext_32
,
1685 amd64_emit_log_not (void)
1687 EMIT_ASM (amd64_log_not
,
1688 "test %rax,%rax\n\t"
1694 amd64_emit_bit_and (void)
1696 EMIT_ASM (amd64_and
,
1697 "and (%rsp),%rax\n\t"
1698 "lea 0x8(%rsp),%rsp");
1702 amd64_emit_bit_or (void)
1705 "or (%rsp),%rax\n\t"
1706 "lea 0x8(%rsp),%rsp");
1710 amd64_emit_bit_xor (void)
1712 EMIT_ASM (amd64_xor
,
1713 "xor (%rsp),%rax\n\t"
1714 "lea 0x8(%rsp),%rsp");
1718 amd64_emit_bit_not (void)
1720 EMIT_ASM (amd64_bit_not
,
1721 "xorq $0xffffffffffffffff,%rax");
1725 amd64_emit_equal (void)
1727 EMIT_ASM (amd64_equal
,
1728 "cmp %rax,(%rsp)\n\t"
1729 "je .Lamd64_equal_true\n\t"
1731 "jmp .Lamd64_equal_end\n\t"
1732 ".Lamd64_equal_true:\n\t"
1734 ".Lamd64_equal_end:\n\t"
1735 "lea 0x8(%rsp),%rsp");
1739 amd64_emit_less_signed (void)
1741 EMIT_ASM (amd64_less_signed
,
1742 "cmp %rax,(%rsp)\n\t"
1743 "jl .Lamd64_less_signed_true\n\t"
1745 "jmp .Lamd64_less_signed_end\n\t"
1746 ".Lamd64_less_signed_true:\n\t"
1748 ".Lamd64_less_signed_end:\n\t"
1749 "lea 0x8(%rsp),%rsp");
1753 amd64_emit_less_unsigned (void)
1755 EMIT_ASM (amd64_less_unsigned
,
1756 "cmp %rax,(%rsp)\n\t"
1757 "jb .Lamd64_less_unsigned_true\n\t"
1759 "jmp .Lamd64_less_unsigned_end\n\t"
1760 ".Lamd64_less_unsigned_true:\n\t"
1762 ".Lamd64_less_unsigned_end:\n\t"
1763 "lea 0x8(%rsp),%rsp");
1767 amd64_emit_ref (int size
)
1772 EMIT_ASM (amd64_ref1
,
1776 EMIT_ASM (amd64_ref2
,
1780 EMIT_ASM (amd64_ref4
,
1781 "movl (%rax),%eax");
1784 EMIT_ASM (amd64_ref8
,
1785 "movq (%rax),%rax");
1791 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1793 EMIT_ASM (amd64_if_goto
,
1797 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1805 amd64_emit_goto (int *offset_p
, int *size_p
)
1807 EMIT_ASM (amd64_goto
,
1808 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1816 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1818 int diff
= (to
- (from
+ size
));
1819 unsigned char buf
[sizeof (int)];
1827 memcpy (buf
, &diff
, sizeof (int));
1828 target_write_memory (from
, buf
, sizeof (int));
1832 amd64_emit_const (LONGEST num
)
1834 unsigned char buf
[16];
1836 CORE_ADDR buildaddr
= current_insn_ptr
;
1839 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1840 memcpy (&buf
[i
], &num
, sizeof (num
));
1842 append_insns (&buildaddr
, i
, buf
);
1843 current_insn_ptr
= buildaddr
;
1847 amd64_emit_call (CORE_ADDR fn
)
1849 unsigned char buf
[16];
1851 CORE_ADDR buildaddr
;
1854 /* The destination function being in the shared library, may be
1855 >31-bits away off the compiled code pad. */
1857 buildaddr
= current_insn_ptr
;
1859 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1863 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1865 /* Offset is too large for a call. Use callq, but that requires
1866 a register, so avoid it if possible. Use r10, since it is
1867 call-clobbered, we don't have to push/pop it. */
1868 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1870 memcpy (buf
+ i
, &fn
, 8);
1872 buf
[i
++] = 0xff; /* callq *%r10 */
1877 int offset32
= offset64
; /* we know we can't overflow here. */
1879 buf
[i
++] = 0xe8; /* call <reladdr> */
1880 memcpy (buf
+ i
, &offset32
, 4);
1884 append_insns (&buildaddr
, i
, buf
);
1885 current_insn_ptr
= buildaddr
;
1889 amd64_emit_reg (int reg
)
1891 unsigned char buf
[16];
1893 CORE_ADDR buildaddr
;
1895 /* Assume raw_regs is still in %rdi. */
1896 buildaddr
= current_insn_ptr
;
1898 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1899 memcpy (&buf
[i
], ®
, sizeof (reg
));
1901 append_insns (&buildaddr
, i
, buf
);
1902 current_insn_ptr
= buildaddr
;
1903 amd64_emit_call (get_raw_reg_func_addr ());
1907 amd64_emit_pop (void)
1909 EMIT_ASM (amd64_pop
,
1914 amd64_emit_stack_flush (void)
1916 EMIT_ASM (amd64_stack_flush
,
1921 amd64_emit_zero_ext (int arg
)
1926 EMIT_ASM (amd64_zero_ext_8
,
1930 EMIT_ASM (amd64_zero_ext_16
,
1931 "and $0xffff,%rax");
1934 EMIT_ASM (amd64_zero_ext_32
,
1935 "mov $0xffffffff,%rcx\n\t"
1944 amd64_emit_swap (void)
1946 EMIT_ASM (amd64_swap
,
1953 amd64_emit_stack_adjust (int n
)
1955 unsigned char buf
[16];
1957 CORE_ADDR buildaddr
= current_insn_ptr
;
1960 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
1964 /* This only handles adjustments up to 16, but we don't expect any more. */
1966 append_insns (&buildaddr
, i
, buf
);
1967 current_insn_ptr
= buildaddr
;
1970 /* FN's prototype is `LONGEST(*fn)(int)'. */
1973 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
1975 unsigned char buf
[16];
1977 CORE_ADDR buildaddr
;
1979 buildaddr
= current_insn_ptr
;
1981 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1982 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1984 append_insns (&buildaddr
, i
, buf
);
1985 current_insn_ptr
= buildaddr
;
1986 amd64_emit_call (fn
);
1989 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1992 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
1994 unsigned char buf
[16];
1996 CORE_ADDR buildaddr
;
1998 buildaddr
= current_insn_ptr
;
2000 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2001 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2003 append_insns (&buildaddr
, i
, buf
);
2004 current_insn_ptr
= buildaddr
;
2005 EMIT_ASM (amd64_void_call_2_a
,
2006 /* Save away a copy of the stack top. */
2008 /* Also pass top as the second argument. */
2010 amd64_emit_call (fn
);
2011 EMIT_ASM (amd64_void_call_2_b
,
2012 /* Restore the stack top, %rax may have been trashed. */
2017 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2020 "cmp %rax,(%rsp)\n\t"
2021 "jne .Lamd64_eq_fallthru\n\t"
2022 "lea 0x8(%rsp),%rsp\n\t"
2024 /* jmp, but don't trust the assembler to choose the right jump */
2025 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2026 ".Lamd64_eq_fallthru:\n\t"
2027 "lea 0x8(%rsp),%rsp\n\t"
2037 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2040 "cmp %rax,(%rsp)\n\t"
2041 "je .Lamd64_ne_fallthru\n\t"
2042 "lea 0x8(%rsp),%rsp\n\t"
2044 /* jmp, but don't trust the assembler to choose the right jump */
2045 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2046 ".Lamd64_ne_fallthru:\n\t"
2047 "lea 0x8(%rsp),%rsp\n\t"
2057 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2060 "cmp %rax,(%rsp)\n\t"
2061 "jnl .Lamd64_lt_fallthru\n\t"
2062 "lea 0x8(%rsp),%rsp\n\t"
2064 /* jmp, but don't trust the assembler to choose the right jump */
2065 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2066 ".Lamd64_lt_fallthru:\n\t"
2067 "lea 0x8(%rsp),%rsp\n\t"
2077 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2080 "cmp %rax,(%rsp)\n\t"
2081 "jnle .Lamd64_le_fallthru\n\t"
2082 "lea 0x8(%rsp),%rsp\n\t"
2084 /* jmp, but don't trust the assembler to choose the right jump */
2085 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2086 ".Lamd64_le_fallthru:\n\t"
2087 "lea 0x8(%rsp),%rsp\n\t"
2097 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2100 "cmp %rax,(%rsp)\n\t"
2101 "jng .Lamd64_gt_fallthru\n\t"
2102 "lea 0x8(%rsp),%rsp\n\t"
2104 /* jmp, but don't trust the assembler to choose the right jump */
2105 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2106 ".Lamd64_gt_fallthru:\n\t"
2107 "lea 0x8(%rsp),%rsp\n\t"
2117 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2120 "cmp %rax,(%rsp)\n\t"
2121 "jnge .Lamd64_ge_fallthru\n\t"
2122 ".Lamd64_ge_jump:\n\t"
2123 "lea 0x8(%rsp),%rsp\n\t"
2125 /* jmp, but don't trust the assembler to choose the right jump */
2126 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2127 ".Lamd64_ge_fallthru:\n\t"
2128 "lea 0x8(%rsp),%rsp\n\t"
2137 static emit_ops amd64_emit_ops
=
2139 amd64_emit_prologue
,
2140 amd64_emit_epilogue
,
2145 amd64_emit_rsh_signed
,
2146 amd64_emit_rsh_unsigned
,
2154 amd64_emit_less_signed
,
2155 amd64_emit_less_unsigned
,
2159 amd64_write_goto_address
,
2164 amd64_emit_stack_flush
,
2165 amd64_emit_zero_ext
,
2167 amd64_emit_stack_adjust
,
2168 amd64_emit_int_call_1
,
2169 amd64_emit_void_call_2
,
2178 #endif /* __x86_64__ */
2181 i386_emit_prologue (void)
2183 EMIT_ASM32 (i386_prologue
,
2187 /* At this point, the raw regs base address is at 8(%ebp), and the
2188 value pointer is at 12(%ebp). */
2192 i386_emit_epilogue (void)
2194 EMIT_ASM32 (i386_epilogue
,
2195 "mov 12(%ebp),%ecx\n\t"
2196 "mov %eax,(%ecx)\n\t"
2197 "mov %ebx,0x4(%ecx)\n\t"
2205 i386_emit_add (void)
2207 EMIT_ASM32 (i386_add
,
2208 "add (%esp),%eax\n\t"
2209 "adc 0x4(%esp),%ebx\n\t"
2210 "lea 0x8(%esp),%esp");
2214 i386_emit_sub (void)
2216 EMIT_ASM32 (i386_sub
,
2217 "subl %eax,(%esp)\n\t"
2218 "sbbl %ebx,4(%esp)\n\t"
2224 i386_emit_mul (void)
2230 i386_emit_lsh (void)
2236 i386_emit_rsh_signed (void)
2242 i386_emit_rsh_unsigned (void)
2248 i386_emit_ext (int arg
)
2253 EMIT_ASM32 (i386_ext_8
,
2256 "movl %eax,%ebx\n\t"
2260 EMIT_ASM32 (i386_ext_16
,
2262 "movl %eax,%ebx\n\t"
2266 EMIT_ASM32 (i386_ext_32
,
2267 "movl %eax,%ebx\n\t"
2276 i386_emit_log_not (void)
2278 EMIT_ASM32 (i386_log_not
,
2280 "test %eax,%eax\n\t"
2287 i386_emit_bit_and (void)
2289 EMIT_ASM32 (i386_and
,
2290 "and (%esp),%eax\n\t"
2291 "and 0x4(%esp),%ebx\n\t"
2292 "lea 0x8(%esp),%esp");
2296 i386_emit_bit_or (void)
2298 EMIT_ASM32 (i386_or
,
2299 "or (%esp),%eax\n\t"
2300 "or 0x4(%esp),%ebx\n\t"
2301 "lea 0x8(%esp),%esp");
2305 i386_emit_bit_xor (void)
2307 EMIT_ASM32 (i386_xor
,
2308 "xor (%esp),%eax\n\t"
2309 "xor 0x4(%esp),%ebx\n\t"
2310 "lea 0x8(%esp),%esp");
2314 i386_emit_bit_not (void)
2316 EMIT_ASM32 (i386_bit_not
,
2317 "xor $0xffffffff,%eax\n\t"
2318 "xor $0xffffffff,%ebx\n\t");
2322 i386_emit_equal (void)
2324 EMIT_ASM32 (i386_equal
,
2325 "cmpl %ebx,4(%esp)\n\t"
2326 "jne .Li386_equal_false\n\t"
2327 "cmpl %eax,(%esp)\n\t"
2328 "je .Li386_equal_true\n\t"
2329 ".Li386_equal_false:\n\t"
2331 "jmp .Li386_equal_end\n\t"
2332 ".Li386_equal_true:\n\t"
2334 ".Li386_equal_end:\n\t"
2336 "lea 0x8(%esp),%esp");
2340 i386_emit_less_signed (void)
2342 EMIT_ASM32 (i386_less_signed
,
2343 "cmpl %ebx,4(%esp)\n\t"
2344 "jl .Li386_less_signed_true\n\t"
2345 "jne .Li386_less_signed_false\n\t"
2346 "cmpl %eax,(%esp)\n\t"
2347 "jl .Li386_less_signed_true\n\t"
2348 ".Li386_less_signed_false:\n\t"
2350 "jmp .Li386_less_signed_end\n\t"
2351 ".Li386_less_signed_true:\n\t"
2353 ".Li386_less_signed_end:\n\t"
2355 "lea 0x8(%esp),%esp");
2359 i386_emit_less_unsigned (void)
2361 EMIT_ASM32 (i386_less_unsigned
,
2362 "cmpl %ebx,4(%esp)\n\t"
2363 "jb .Li386_less_unsigned_true\n\t"
2364 "jne .Li386_less_unsigned_false\n\t"
2365 "cmpl %eax,(%esp)\n\t"
2366 "jb .Li386_less_unsigned_true\n\t"
2367 ".Li386_less_unsigned_false:\n\t"
2369 "jmp .Li386_less_unsigned_end\n\t"
2370 ".Li386_less_unsigned_true:\n\t"
2372 ".Li386_less_unsigned_end:\n\t"
2374 "lea 0x8(%esp),%esp");
2378 i386_emit_ref (int size
)
2383 EMIT_ASM32 (i386_ref1
,
2387 EMIT_ASM32 (i386_ref2
,
2391 EMIT_ASM32 (i386_ref4
,
2392 "movl (%eax),%eax");
2395 EMIT_ASM32 (i386_ref8
,
2396 "movl 4(%eax),%ebx\n\t"
2397 "movl (%eax),%eax");
2403 i386_emit_if_goto (int *offset_p
, int *size_p
)
2405 EMIT_ASM32 (i386_if_goto
,
2411 /* Don't trust the assembler to choose the right jump */
2412 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2415 *offset_p
= 11; /* be sure that this matches the sequence above */
2421 i386_emit_goto (int *offset_p
, int *size_p
)
2423 EMIT_ASM32 (i386_goto
,
2424 /* Don't trust the assembler to choose the right jump */
2425 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2433 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2435 int diff
= (to
- (from
+ size
));
2436 unsigned char buf
[sizeof (int)];
2438 /* We're only doing 4-byte sizes at the moment. */
2445 memcpy (buf
, &diff
, sizeof (int));
2446 target_write_memory (from
, buf
, sizeof (int));
2450 i386_emit_const (LONGEST num
)
2452 unsigned char buf
[16];
2454 CORE_ADDR buildaddr
= current_insn_ptr
;
2457 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2458 lo
= num
& 0xffffffff;
2459 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2461 hi
= ((num
>> 32) & 0xffffffff);
2464 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2465 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2470 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2472 append_insns (&buildaddr
, i
, buf
);
2473 current_insn_ptr
= buildaddr
;
2477 i386_emit_call (CORE_ADDR fn
)
2479 unsigned char buf
[16];
2481 CORE_ADDR buildaddr
;
2483 buildaddr
= current_insn_ptr
;
2485 buf
[i
++] = 0xe8; /* call <reladdr> */
2486 offset
= ((int) fn
) - (buildaddr
+ 5);
2487 memcpy (buf
+ 1, &offset
, 4);
2488 append_insns (&buildaddr
, 5, buf
);
2489 current_insn_ptr
= buildaddr
;
2493 i386_emit_reg (int reg
)
2495 unsigned char buf
[16];
2497 CORE_ADDR buildaddr
;
2499 EMIT_ASM32 (i386_reg_a
,
2501 buildaddr
= current_insn_ptr
;
2503 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2504 memcpy (&buf
[i
], ®
, sizeof (reg
));
2506 append_insns (&buildaddr
, i
, buf
);
2507 current_insn_ptr
= buildaddr
;
2508 EMIT_ASM32 (i386_reg_b
,
2509 "mov %eax,4(%esp)\n\t"
2510 "mov 8(%ebp),%eax\n\t"
2512 i386_emit_call (get_raw_reg_func_addr ());
2513 EMIT_ASM32 (i386_reg_c
,
2515 "lea 0x8(%esp),%esp");
2519 i386_emit_pop (void)
2521 EMIT_ASM32 (i386_pop
,
2527 i386_emit_stack_flush (void)
2529 EMIT_ASM32 (i386_stack_flush
,
2535 i386_emit_zero_ext (int arg
)
2540 EMIT_ASM32 (i386_zero_ext_8
,
2541 "and $0xff,%eax\n\t"
2545 EMIT_ASM32 (i386_zero_ext_16
,
2546 "and $0xffff,%eax\n\t"
2550 EMIT_ASM32 (i386_zero_ext_32
,
2559 i386_emit_swap (void)
2561 EMIT_ASM32 (i386_swap
,
2571 i386_emit_stack_adjust (int n
)
2573 unsigned char buf
[16];
2575 CORE_ADDR buildaddr
= current_insn_ptr
;
2578 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2582 append_insns (&buildaddr
, i
, buf
);
2583 current_insn_ptr
= buildaddr
;
2586 /* FN's prototype is `LONGEST(*fn)(int)'. */
2589 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2591 unsigned char buf
[16];
2593 CORE_ADDR buildaddr
;
2595 EMIT_ASM32 (i386_int_call_1_a
,
2596 /* Reserve a bit of stack space. */
2598 /* Put the one argument on the stack. */
2599 buildaddr
= current_insn_ptr
;
2601 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2604 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2606 append_insns (&buildaddr
, i
, buf
);
2607 current_insn_ptr
= buildaddr
;
2608 i386_emit_call (fn
);
2609 EMIT_ASM32 (i386_int_call_1_c
,
2611 "lea 0x8(%esp),%esp");
2614 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2617 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2619 unsigned char buf
[16];
2621 CORE_ADDR buildaddr
;
2623 EMIT_ASM32 (i386_void_call_2_a
,
2624 /* Preserve %eax only; we don't have to worry about %ebx. */
2626 /* Reserve a bit of stack space for arguments. */
2627 "sub $0x10,%esp\n\t"
2628 /* Copy "top" to the second argument position. (Note that
2629 we can't assume function won't scribble on its
2630 arguments, so don't try to restore from this.) */
2631 "mov %eax,4(%esp)\n\t"
2632 "mov %ebx,8(%esp)");
2633 /* Put the first argument on the stack. */
2634 buildaddr
= current_insn_ptr
;
2636 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2639 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2641 append_insns (&buildaddr
, i
, buf
);
2642 current_insn_ptr
= buildaddr
;
2643 i386_emit_call (fn
);
2644 EMIT_ASM32 (i386_void_call_2_b
,
2645 "lea 0x10(%esp),%esp\n\t"
2646 /* Restore original stack top. */
2652 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2655 /* Check low half first, more likely to be decider */
2656 "cmpl %eax,(%esp)\n\t"
2657 "jne .Leq_fallthru\n\t"
2658 "cmpl %ebx,4(%esp)\n\t"
2659 "jne .Leq_fallthru\n\t"
2660 "lea 0x8(%esp),%esp\n\t"
2663 /* jmp, but don't trust the assembler to choose the right jump */
2664 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2665 ".Leq_fallthru:\n\t"
2666 "lea 0x8(%esp),%esp\n\t"
2677 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2680 /* Check low half first, more likely to be decider */
2681 "cmpl %eax,(%esp)\n\t"
2683 "cmpl %ebx,4(%esp)\n\t"
2684 "je .Lne_fallthru\n\t"
2686 "lea 0x8(%esp),%esp\n\t"
2689 /* jmp, but don't trust the assembler to choose the right jump */
2690 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2691 ".Lne_fallthru:\n\t"
2692 "lea 0x8(%esp),%esp\n\t"
2703 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2706 "cmpl %ebx,4(%esp)\n\t"
2708 "jne .Llt_fallthru\n\t"
2709 "cmpl %eax,(%esp)\n\t"
2710 "jnl .Llt_fallthru\n\t"
2712 "lea 0x8(%esp),%esp\n\t"
2715 /* jmp, but don't trust the assembler to choose the right jump */
2716 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2717 ".Llt_fallthru:\n\t"
2718 "lea 0x8(%esp),%esp\n\t"
2729 i386_emit_le_goto (int *offset_p
, int *size_p
)
2732 "cmpl %ebx,4(%esp)\n\t"
2734 "jne .Lle_fallthru\n\t"
2735 "cmpl %eax,(%esp)\n\t"
2736 "jnle .Lle_fallthru\n\t"
2738 "lea 0x8(%esp),%esp\n\t"
2741 /* jmp, but don't trust the assembler to choose the right jump */
2742 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2743 ".Lle_fallthru:\n\t"
2744 "lea 0x8(%esp),%esp\n\t"
2755 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2758 "cmpl %ebx,4(%esp)\n\t"
2760 "jne .Lgt_fallthru\n\t"
2761 "cmpl %eax,(%esp)\n\t"
2762 "jng .Lgt_fallthru\n\t"
2764 "lea 0x8(%esp),%esp\n\t"
2767 /* jmp, but don't trust the assembler to choose the right jump */
2768 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2769 ".Lgt_fallthru:\n\t"
2770 "lea 0x8(%esp),%esp\n\t"
2781 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2784 "cmpl %ebx,4(%esp)\n\t"
2786 "jne .Lge_fallthru\n\t"
2787 "cmpl %eax,(%esp)\n\t"
2788 "jnge .Lge_fallthru\n\t"
2790 "lea 0x8(%esp),%esp\n\t"
2793 /* jmp, but don't trust the assembler to choose the right jump */
2794 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2795 ".Lge_fallthru:\n\t"
2796 "lea 0x8(%esp),%esp\n\t"
2806 static emit_ops i386_emit_ops
=
2814 i386_emit_rsh_signed
,
2815 i386_emit_rsh_unsigned
,
2823 i386_emit_less_signed
,
2824 i386_emit_less_unsigned
,
2828 i386_write_goto_address
,
2833 i386_emit_stack_flush
,
2836 i386_emit_stack_adjust
,
2837 i386_emit_int_call_1
,
2838 i386_emit_void_call_2
,
2849 x86_target::emit_ops ()
2852 if (is_64bit_tdesc (current_thread
))
2853 return &amd64_emit_ops
;
2856 return &i386_emit_ops
;
2859 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2862 x86_target::sw_breakpoint_from_kind (int kind
, int *size
)
2864 *size
= x86_breakpoint_len
;
2865 return x86_breakpoint
;
2869 x86_target::low_supports_range_stepping ()
2875 x86_target::get_ipa_tdesc_idx ()
2877 const target_desc
*tdesc
= current_process ()->tdesc
;
2881 /* If USE_XML is false then we should be using one of these target
2882 descriptions, see x86_linux_read_description for where we choose
2883 one of these. Both of these descriptions are created from this
2884 fixed xcr0 value X86_XSTATE_SSE_MASK. */
2885 gdb_assert (tdesc
== tdesc_i386_linux_no_xml
.get ()
2887 || tdesc
== tdesc_amd64_linux_no_xml
.get ()
2888 #endif /* __x86_64__ */
2890 return x86_linux_xcr0_to_tdesc_idx (X86_XSTATE_SSE_MASK
);
2893 /* The xcr0 value and xsave layout value are cached when the target
2894 description is read. Grab their cache location, and use the cached
2895 value to calculate a tdesc index. */
2896 std::pair
<uint64_t *, x86_xsave_layout
*> storage
2897 = i387_get_xsave_storage ();
2898 uint64_t xcr0
= *storage
.first
;
2900 return x86_linux_xcr0_to_tdesc_idx (xcr0
);
2903 /* The linux target ops object. */
2905 linux_process_target
*the_linux_target
= &the_x86_target
;
2908 initialize_low_arch (void)
2910 /* Initialize the Linux target descriptions. */
2912 tdesc_amd64_linux_no_xml
= allocate_target_description ();
2913 copy_target_description (tdesc_amd64_linux_no_xml
.get (),
2914 amd64_linux_read_description (X86_XSTATE_SSE_MASK
,
2916 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2919 tdesc_i386_linux_no_xml
= allocate_target_description ();
2920 copy_target_description (tdesc_i386_linux_no_xml
.get (),
2921 i386_linux_read_description (X86_XSTATE_SSE_MASK
));
2922 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
2924 initialize_regsets_info (&x86_regsets_info
);