1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2018 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
75 #include "nat/gdb_ptrace.h"
78 #ifndef PTRACE_GET_THREAD_AREA
79 #define PTRACE_GET_THREAD_AREA 25
82 /* This definition comes from prctl.h, but some kernels may not have it. */
83 #ifndef PTRACE_ARCH_PRCTL
84 #define PTRACE_ARCH_PRCTL 30
87 /* The following definitions come from prctl.h, but may be absent
88 for certain configurations. */
90 #define ARCH_SET_GS 0x1001
91 #define ARCH_SET_FS 0x1002
92 #define ARCH_GET_FS 0x1003
93 #define ARCH_GET_GS 0x1004
96 /* Per-process arch-specific data we want to keep. */
98 struct arch_process_info
100 struct x86_debug_reg_state debug_reg_state
;
105 /* Mapping between the general-purpose registers in `struct user'
106 format and GDB's register array layout.
107 Note that the transfer layout uses 64-bit regs. */
108 static /*const*/ int i386_regmap
[] =
110 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
111 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
112 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
113 DS
* 8, ES
* 8, FS
* 8, GS
* 8
116 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
118 /* So code below doesn't have to care, i386 or amd64. */
119 #define ORIG_EAX ORIG_RAX
122 static const int x86_64_regmap
[] =
124 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
125 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
126 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
127 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
128 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
129 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
130 -1, -1, -1, -1, -1, -1, -1, -1,
131 -1, -1, -1, -1, -1, -1, -1, -1,
132 -1, -1, -1, -1, -1, -1, -1, -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
136 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
141 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
142 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
143 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
144 -1, -1, -1, -1, -1, -1, -1, -1,
145 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
146 -1, -1, -1, -1, -1, -1, -1, -1,
147 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
148 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
149 -1, -1, -1, -1, -1, -1, -1, -1,
150 -1, -1, -1, -1, -1, -1, -1, -1,
151 -1, -1, -1, -1, -1, -1, -1, -1,
155 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
156 #define X86_64_USER_REGS (GS + 1)
158 #else /* ! __x86_64__ */
160 /* Mapping between the general-purpose registers in `struct user'
161 format and GDB's register array layout. */
162 static /*const*/ int i386_regmap
[] =
164 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
165 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
166 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
167 DS
* 4, ES
* 4, FS
* 4, GS
* 4
170 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
178 /* Returns true if the current inferior belongs to a x86-64 process,
182 is_64bit_tdesc (void)
184 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
186 return register_size (regcache
->tdesc
, 0) == 8;
192 /* Called by libthread_db. */
195 ps_get_thread_area (struct ps_prochandle
*ph
,
196 lwpid_t lwpid
, int idx
, void **base
)
199 int use_64bit
= is_64bit_tdesc ();
206 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
210 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
221 unsigned int desc
[4];
223 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
224 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
227 /* Ensure we properly extend the value to 64-bits for x86_64. */
228 *base
= (void *) (uintptr_t) desc
[1];
233 /* Get the thread area address. This is used to recognize which
234 thread is which when tracing with the in-process agent library. We
235 don't read anything from the address, and treat it as opaque; it's
236 the address itself that we assume is unique per-thread. */
239 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
242 int use_64bit
= is_64bit_tdesc ();
247 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
249 *addr
= (CORE_ADDR
) (uintptr_t) base
;
258 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
259 struct thread_info
*thr
= get_lwp_thread (lwp
);
260 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
261 unsigned int desc
[4];
263 const int reg_thread_area
= 3; /* bits to scale down register value. */
266 collect_register_by_name (regcache
, "gs", &gs
);
268 idx
= gs
>> reg_thread_area
;
270 if (ptrace (PTRACE_GET_THREAD_AREA
,
272 (void *) (long) idx
, (unsigned long) &desc
) < 0)
283 x86_cannot_store_register (int regno
)
286 if (is_64bit_tdesc ())
290 return regno
>= I386_NUM_REGS
;
294 x86_cannot_fetch_register (int regno
)
297 if (is_64bit_tdesc ())
301 return regno
>= I386_NUM_REGS
;
305 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
310 if (register_size (regcache
->tdesc
, 0) == 8)
312 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
313 if (x86_64_regmap
[i
] != -1)
314 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
316 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
319 int lwpid
= lwpid_of (current_thread
);
321 collect_register_by_name (regcache
, "fs_base", &base
);
322 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_FS
);
324 collect_register_by_name (regcache
, "gs_base", &base
);
325 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_GS
);
332 /* 32-bit inferior registers need to be zero-extended.
333 Callers would read uninitialized memory otherwise. */
334 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
337 for (i
= 0; i
< I386_NUM_REGS
; i
++)
338 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
340 collect_register_by_name (regcache
, "orig_eax",
341 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
345 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
350 if (register_size (regcache
->tdesc
, 0) == 8)
352 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
353 if (x86_64_regmap
[i
] != -1)
354 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
356 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
359 int lwpid
= lwpid_of (current_thread
);
361 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
362 supply_register_by_name (regcache
, "fs_base", &base
);
364 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_GS
) == 0)
365 supply_register_by_name (regcache
, "gs_base", &base
);
372 for (i
= 0; i
< I386_NUM_REGS
; i
++)
373 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
375 supply_register_by_name (regcache
, "orig_eax",
376 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
380 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
383 i387_cache_to_fxsave (regcache
, buf
);
385 i387_cache_to_fsave (regcache
, buf
);
390 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
393 i387_fxsave_to_cache (regcache
, buf
);
395 i387_fsave_to_cache (regcache
, buf
);
402 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
404 i387_cache_to_fxsave (regcache
, buf
);
408 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
410 i387_fxsave_to_cache (regcache
, buf
);
416 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
418 i387_cache_to_xsave (regcache
, buf
);
422 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
424 i387_xsave_to_cache (regcache
, buf
);
427 /* ??? The non-biarch i386 case stores all the i387 regs twice.
428 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
429 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
430 doesn't work. IWBN to avoid the duplication in the case where it
431 does work. Maybe the arch_setup routine could check whether it works
432 and update the supported regsets accordingly. */
434 static struct regset_info x86_regsets
[] =
436 #ifdef HAVE_PTRACE_GETREGS
437 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
439 x86_fill_gregset
, x86_store_gregset
},
440 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
441 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
443 # ifdef HAVE_PTRACE_GETFPXREGS
444 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
446 x86_fill_fpxregset
, x86_store_fpxregset
},
449 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
451 x86_fill_fpregset
, x86_store_fpregset
},
452 #endif /* HAVE_PTRACE_GETREGS */
457 x86_get_pc (struct regcache
*regcache
)
459 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
465 collect_register_by_name (regcache
, "rip", &pc
);
466 return (CORE_ADDR
) pc
;
472 collect_register_by_name (regcache
, "eip", &pc
);
473 return (CORE_ADDR
) pc
;
478 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
480 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
486 supply_register_by_name (regcache
, "rip", &newpc
);
492 supply_register_by_name (regcache
, "eip", &newpc
);
496 static const gdb_byte x86_breakpoint
[] = { 0xCC };
497 #define x86_breakpoint_len 1
500 x86_breakpoint_at (CORE_ADDR pc
)
504 (*the_target
->read_memory
) (pc
, &c
, 1);
511 /* Low-level function vector. */
512 struct x86_dr_low_type x86_dr_low
=
514 x86_linux_dr_set_control
,
515 x86_linux_dr_set_addr
,
516 x86_linux_dr_get_addr
,
517 x86_linux_dr_get_status
,
518 x86_linux_dr_get_control
,
522 /* Breakpoint/Watchpoint support. */
525 x86_supports_z_point_type (char z_type
)
531 case Z_PACKET_WRITE_WP
:
532 case Z_PACKET_ACCESS_WP
:
540 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
541 int size
, struct raw_breakpoint
*bp
)
543 struct process_info
*proc
= current_process ();
547 case raw_bkpt_type_hw
:
548 case raw_bkpt_type_write_wp
:
549 case raw_bkpt_type_access_wp
:
551 enum target_hw_bp_type hw_type
552 = raw_bkpt_type_to_target_hw_bp_type (type
);
553 struct x86_debug_reg_state
*state
554 = &proc
->priv
->arch_private
->debug_reg_state
;
556 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
566 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
567 int size
, struct raw_breakpoint
*bp
)
569 struct process_info
*proc
= current_process ();
573 case raw_bkpt_type_hw
:
574 case raw_bkpt_type_write_wp
:
575 case raw_bkpt_type_access_wp
:
577 enum target_hw_bp_type hw_type
578 = raw_bkpt_type_to_target_hw_bp_type (type
);
579 struct x86_debug_reg_state
*state
580 = &proc
->priv
->arch_private
->debug_reg_state
;
582 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
591 x86_stopped_by_watchpoint (void)
593 struct process_info
*proc
= current_process ();
594 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
598 x86_stopped_data_address (void)
600 struct process_info
*proc
= current_process ();
602 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
608 /* Called when a new process is created. */
610 static struct arch_process_info
*
611 x86_linux_new_process (void)
613 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
615 x86_low_init_dregs (&info
->debug_reg_state
);
620 /* Called when a process is being deleted. */
623 x86_linux_delete_process (struct arch_process_info
*info
)
628 /* Target routine for linux_new_fork. */
631 x86_linux_new_fork (struct process_info
*parent
, struct process_info
*child
)
633 /* These are allocated by linux_add_process. */
634 gdb_assert (parent
->priv
!= NULL
635 && parent
->priv
->arch_private
!= NULL
);
636 gdb_assert (child
->priv
!= NULL
637 && child
->priv
->arch_private
!= NULL
);
639 /* Linux kernel before 2.6.33 commit
640 72f674d203cd230426437cdcf7dd6f681dad8b0d
641 will inherit hardware debug registers from parent
642 on fork/vfork/clone. Newer Linux kernels create such tasks with
643 zeroed debug registers.
645 GDB core assumes the child inherits the watchpoints/hw
646 breakpoints of the parent, and will remove them all from the
647 forked off process. Copy the debug registers mirrors into the
648 new process so that all breakpoints and watchpoints can be
649 removed together. The debug registers mirror will become zeroed
650 in the end before detaching the forked off process, thus making
651 this compatible with older Linux kernels too. */
653 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
656 /* See nat/x86-dregs.h. */
658 struct x86_debug_reg_state
*
659 x86_debug_reg_state (pid_t pid
)
661 struct process_info
*proc
= find_process_pid (pid
);
663 return &proc
->priv
->arch_private
->debug_reg_state
;
666 /* When GDBSERVER is built as a 64-bit application on linux, the
667 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
668 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
669 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
670 conversion in-place ourselves. */
672 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
673 layout of the inferiors' architecture. Returns true if any
674 conversion was done; false otherwise. If DIRECTION is 1, then copy
675 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
679 x86_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
682 unsigned int machine
;
683 int tid
= lwpid_of (current_thread
);
684 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
686 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
687 if (!is_64bit_tdesc ())
688 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
690 /* No fixup for native x32 GDB. */
691 else if (!is_elf64
&& sizeof (void *) == 8)
692 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
701 /* Format of XSAVE extended state is:
705 sw_usable_bytes[464..511]
706 xstate_hdr_bytes[512..575]
711 Same memory layout will be used for the coredump NT_X86_XSTATE
712 representing the XSAVE extended state registers.
714 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
715 extended state mask, which is the same as the extended control register
716 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
717 together with the mask saved in the xstate_hdr_bytes to determine what
718 states the processor/OS supports and what state, used or initialized,
719 the process/thread is in. */
720 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
722 /* Does the current host support the GETFPXREGS request? The header
723 file may or may not define it, and even if it is defined, the
724 kernel will return EIO if it's running on a pre-SSE processor. */
725 int have_ptrace_getfpxregs
=
726 #ifdef HAVE_PTRACE_GETFPXREGS
733 /* Get Linux/x86 target description from running target. */
735 static const struct target_desc
*
736 x86_linux_read_description (void)
738 unsigned int machine
;
742 static uint64_t xcr0
;
743 struct regset_info
*regset
;
745 tid
= lwpid_of (current_thread
);
747 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
749 if (sizeof (void *) == 4)
752 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
754 else if (machine
== EM_X86_64
)
755 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
759 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
760 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
762 elf_fpxregset_t fpxregs
;
764 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
766 have_ptrace_getfpxregs
= 0;
767 have_ptrace_getregset
= 0;
768 return i386_linux_read_description (X86_XSTATE_X87
);
771 have_ptrace_getfpxregs
= 1;
777 x86_xcr0
= X86_XSTATE_SSE_MASK
;
781 if (machine
== EM_X86_64
)
782 return tdesc_amd64_linux_no_xml
;
785 return tdesc_i386_linux_no_xml
;
788 if (have_ptrace_getregset
== -1)
790 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
793 iov
.iov_base
= xstateregs
;
794 iov
.iov_len
= sizeof (xstateregs
);
796 /* Check if PTRACE_GETREGSET works. */
797 if (ptrace (PTRACE_GETREGSET
, tid
,
798 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
799 have_ptrace_getregset
= 0;
802 have_ptrace_getregset
= 1;
804 /* Get XCR0 from XSAVE extended state. */
805 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
806 / sizeof (uint64_t))];
808 /* Use PTRACE_GETREGSET if it is available. */
809 for (regset
= x86_regsets
;
810 regset
->fill_function
!= NULL
; regset
++)
811 if (regset
->get_request
== PTRACE_GETREGSET
)
812 regset
->size
= X86_XSTATE_SIZE (xcr0
);
813 else if (regset
->type
!= GENERAL_REGS
)
818 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
819 xcr0_features
= (have_ptrace_getregset
820 && (xcr0
& X86_XSTATE_ALL_MASK
));
825 if (machine
== EM_X86_64
)
828 const target_desc
*tdesc
= NULL
;
832 tdesc
= amd64_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
,
837 tdesc
= amd64_linux_read_description (X86_XSTATE_SSE_MASK
, !is_elf64
);
843 const target_desc
*tdesc
= NULL
;
846 tdesc
= i386_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
);
849 tdesc
= i386_linux_read_description (X86_XSTATE_SSE
);
854 gdb_assert_not_reached ("failed to return tdesc");
857 /* Update all the target description of all processes; a new GDB
858 connected, and it may or not support xml target descriptions. */
861 x86_linux_update_xmltarget (void)
863 struct thread_info
*saved_thread
= current_thread
;
865 /* Before changing the register cache's internal layout, flush the
866 contents of the current valid caches back to the threads, and
867 release the current regcache objects. */
870 for_each_process ([] (process_info
*proc
) {
873 /* Look up any thread of this process. */
874 current_thread
= find_any_thread_of_pid (pid
);
876 the_low_target
.arch_setup ();
879 current_thread
= saved_thread
;
882 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
886 x86_linux_process_qsupported (char **features
, int count
)
890 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
891 with "i386" in qSupported query, it supports x86 XML target
894 for (i
= 0; i
< count
; i
++)
896 const char *feature
= features
[i
];
898 if (startswith (feature
, "xmlRegisters="))
900 char *copy
= xstrdup (feature
+ 13);
903 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
905 if (strcmp (p
, "i386") == 0)
915 x86_linux_update_xmltarget ();
918 /* Common for x86/x86-64. */
920 static struct regsets_info x86_regsets_info
=
922 x86_regsets
, /* regsets */
924 NULL
, /* disabled_regsets */
928 static struct regs_info amd64_linux_regs_info
=
930 NULL
, /* regset_bitmap */
931 NULL
, /* usrregs_info */
935 static struct usrregs_info i386_linux_usrregs_info
=
941 static struct regs_info i386_linux_regs_info
=
943 NULL
, /* regset_bitmap */
944 &i386_linux_usrregs_info
,
948 const struct regs_info
*
949 x86_linux_regs_info (void)
952 if (is_64bit_tdesc ())
953 return &amd64_linux_regs_info
;
956 return &i386_linux_regs_info
;
959 /* Initialize the target description for the architecture of the
963 x86_arch_setup (void)
965 current_process ()->tdesc
= x86_linux_read_description ();
968 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
969 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
972 x86_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
974 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
980 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
981 *sysno
= (int) l_sysno
;
984 collect_register_by_name (regcache
, "orig_eax", sysno
);
988 x86_supports_tracepoints (void)
994 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
996 write_inferior_memory (*to
, buf
, len
);
1001 push_opcode (unsigned char *buf
, const char *op
)
1003 unsigned char *buf_org
= buf
;
1008 unsigned long ul
= strtoul (op
, &endptr
, 16);
1017 return buf
- buf_org
;
1022 /* Build a jump pad that saves registers and calls a collection
1023 function. Writes a jump instruction to the jump pad to
1024 JJUMPAD_INSN. The caller is responsible to write it in at the
1025 tracepoint address. */
1028 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1029 CORE_ADDR collector
,
1032 CORE_ADDR
*jump_entry
,
1033 CORE_ADDR
*trampoline
,
1034 ULONGEST
*trampoline_size
,
1035 unsigned char *jjump_pad_insn
,
1036 ULONGEST
*jjump_pad_insn_size
,
1037 CORE_ADDR
*adjusted_insn_addr
,
1038 CORE_ADDR
*adjusted_insn_addr_end
,
1041 unsigned char buf
[40];
1045 CORE_ADDR buildaddr
= *jump_entry
;
1047 /* Build the jump pad. */
1049 /* First, do tracepoint data collection. Save registers. */
1051 /* Need to ensure stack pointer saved first. */
1052 buf
[i
++] = 0x54; /* push %rsp */
1053 buf
[i
++] = 0x55; /* push %rbp */
1054 buf
[i
++] = 0x57; /* push %rdi */
1055 buf
[i
++] = 0x56; /* push %rsi */
1056 buf
[i
++] = 0x52; /* push %rdx */
1057 buf
[i
++] = 0x51; /* push %rcx */
1058 buf
[i
++] = 0x53; /* push %rbx */
1059 buf
[i
++] = 0x50; /* push %rax */
1060 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1061 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1062 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1063 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1064 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1065 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1066 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1067 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1068 buf
[i
++] = 0x9c; /* pushfq */
1069 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1071 memcpy (buf
+ i
, &tpaddr
, 8);
1073 buf
[i
++] = 0x57; /* push %rdi */
1074 append_insns (&buildaddr
, i
, buf
);
1076 /* Stack space for the collecting_t object. */
1078 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1079 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1080 memcpy (buf
+ i
, &tpoint
, 8);
1082 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1083 i
+= push_opcode (&buf
[i
],
1084 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1085 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1086 append_insns (&buildaddr
, i
, buf
);
1090 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1091 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1093 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1094 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1095 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1096 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1097 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1098 append_insns (&buildaddr
, i
, buf
);
1100 /* Set up the gdb_collect call. */
1101 /* At this point, (stack pointer + 0x18) is the base of our saved
1105 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1106 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1108 /* tpoint address may be 64-bit wide. */
1109 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1110 memcpy (buf
+ i
, &tpoint
, 8);
1112 append_insns (&buildaddr
, i
, buf
);
1114 /* The collector function being in the shared library, may be
1115 >31-bits away off the jump pad. */
1117 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1118 memcpy (buf
+ i
, &collector
, 8);
1120 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1121 append_insns (&buildaddr
, i
, buf
);
1123 /* Clear the spin-lock. */
1125 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1126 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1127 memcpy (buf
+ i
, &lockaddr
, 8);
1129 append_insns (&buildaddr
, i
, buf
);
1131 /* Remove stack that had been used for the collect_t object. */
1133 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1134 append_insns (&buildaddr
, i
, buf
);
1136 /* Restore register state. */
1138 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1142 buf
[i
++] = 0x9d; /* popfq */
1143 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1144 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1145 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1146 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1147 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1148 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1149 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1150 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1151 buf
[i
++] = 0x58; /* pop %rax */
1152 buf
[i
++] = 0x5b; /* pop %rbx */
1153 buf
[i
++] = 0x59; /* pop %rcx */
1154 buf
[i
++] = 0x5a; /* pop %rdx */
1155 buf
[i
++] = 0x5e; /* pop %rsi */
1156 buf
[i
++] = 0x5f; /* pop %rdi */
1157 buf
[i
++] = 0x5d; /* pop %rbp */
1158 buf
[i
++] = 0x5c; /* pop %rsp */
1159 append_insns (&buildaddr
, i
, buf
);
1161 /* Now, adjust the original instruction to execute in the jump
1163 *adjusted_insn_addr
= buildaddr
;
1164 relocate_instruction (&buildaddr
, tpaddr
);
1165 *adjusted_insn_addr_end
= buildaddr
;
1167 /* Finally, write a jump back to the program. */
1169 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1170 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1173 "E.Jump back from jump pad too far from tracepoint "
1174 "(offset 0x%" PRIx64
" > int32).", loffset
);
1178 offset
= (int) loffset
;
1179 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1180 memcpy (buf
+ 1, &offset
, 4);
1181 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1183 /* The jump pad is now built. Wire in a jump to our jump pad. This
1184 is always done last (by our caller actually), so that we can
1185 install fast tracepoints with threads running. This relies on
1186 the agent's atomic write support. */
1187 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1188 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1191 "E.Jump pad too far from tracepoint "
1192 "(offset 0x%" PRIx64
" > int32).", loffset
);
1196 offset
= (int) loffset
;
1198 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1199 memcpy (buf
+ 1, &offset
, 4);
1200 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1201 *jjump_pad_insn_size
= sizeof (jump_insn
);
1203 /* Return the end address of our pad. */
1204 *jump_entry
= buildaddr
;
1209 #endif /* __x86_64__ */
1211 /* Build a jump pad that saves registers and calls a collection
1212 function. Writes a jump instruction to the jump pad to
1213 JJUMPAD_INSN. The caller is responsible to write it in at the
1214 tracepoint address. */
1217 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1218 CORE_ADDR collector
,
1221 CORE_ADDR
*jump_entry
,
1222 CORE_ADDR
*trampoline
,
1223 ULONGEST
*trampoline_size
,
1224 unsigned char *jjump_pad_insn
,
1225 ULONGEST
*jjump_pad_insn_size
,
1226 CORE_ADDR
*adjusted_insn_addr
,
1227 CORE_ADDR
*adjusted_insn_addr_end
,
1230 unsigned char buf
[0x100];
1232 CORE_ADDR buildaddr
= *jump_entry
;
1234 /* Build the jump pad. */
1236 /* First, do tracepoint data collection. Save registers. */
1238 buf
[i
++] = 0x60; /* pushad */
1239 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1240 *((int *)(buf
+ i
)) = (int) tpaddr
;
1242 buf
[i
++] = 0x9c; /* pushf */
1243 buf
[i
++] = 0x1e; /* push %ds */
1244 buf
[i
++] = 0x06; /* push %es */
1245 buf
[i
++] = 0x0f; /* push %fs */
1247 buf
[i
++] = 0x0f; /* push %gs */
1249 buf
[i
++] = 0x16; /* push %ss */
1250 buf
[i
++] = 0x0e; /* push %cs */
1251 append_insns (&buildaddr
, i
, buf
);
1253 /* Stack space for the collecting_t object. */
1255 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1257 /* Build the object. */
1258 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1259 memcpy (buf
+ i
, &tpoint
, 4);
1261 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1263 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1264 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1265 append_insns (&buildaddr
, i
, buf
);
1267 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1268 If we cared for it, this could be using xchg alternatively. */
1271 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1272 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1274 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1276 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1277 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1278 append_insns (&buildaddr
, i
, buf
);
1281 /* Set up arguments to the gdb_collect call. */
1283 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1284 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1285 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1286 append_insns (&buildaddr
, i
, buf
);
1289 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1290 append_insns (&buildaddr
, i
, buf
);
1293 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1294 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1296 append_insns (&buildaddr
, i
, buf
);
1298 buf
[0] = 0xe8; /* call <reladdr> */
1299 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1300 memcpy (buf
+ 1, &offset
, 4);
1301 append_insns (&buildaddr
, 5, buf
);
1302 /* Clean up after the call. */
1303 buf
[0] = 0x83; /* add $0x8,%esp */
1306 append_insns (&buildaddr
, 3, buf
);
1309 /* Clear the spin-lock. This would need the LOCK prefix on older
1312 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1313 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1314 memcpy (buf
+ i
, &lockaddr
, 4);
1316 append_insns (&buildaddr
, i
, buf
);
1319 /* Remove stack that had been used for the collect_t object. */
1321 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1322 append_insns (&buildaddr
, i
, buf
);
1325 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1328 buf
[i
++] = 0x17; /* pop %ss */
1329 buf
[i
++] = 0x0f; /* pop %gs */
1331 buf
[i
++] = 0x0f; /* pop %fs */
1333 buf
[i
++] = 0x07; /* pop %es */
1334 buf
[i
++] = 0x1f; /* pop %ds */
1335 buf
[i
++] = 0x9d; /* popf */
1336 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1339 buf
[i
++] = 0x61; /* popad */
1340 append_insns (&buildaddr
, i
, buf
);
1342 /* Now, adjust the original instruction to execute in the jump
1344 *adjusted_insn_addr
= buildaddr
;
1345 relocate_instruction (&buildaddr
, tpaddr
);
1346 *adjusted_insn_addr_end
= buildaddr
;
1348 /* Write the jump back to the program. */
1349 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1350 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1351 memcpy (buf
+ 1, &offset
, 4);
1352 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1354 /* The jump pad is now built. Wire in a jump to our jump pad. This
1355 is always done last (by our caller actually), so that we can
1356 install fast tracepoints with threads running. This relies on
1357 the agent's atomic write support. */
1360 /* Create a trampoline. */
1361 *trampoline_size
= sizeof (jump_insn
);
1362 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1364 /* No trampoline space available. */
1366 "E.Cannot allocate trampoline space needed for fast "
1367 "tracepoints on 4-byte instructions.");
1371 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1372 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1373 memcpy (buf
+ 1, &offset
, 4);
1374 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1376 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1377 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1378 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1379 memcpy (buf
+ 2, &offset
, 2);
1380 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1381 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1385 /* Else use a 32-bit relative jump instruction. */
1386 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1387 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1388 memcpy (buf
+ 1, &offset
, 4);
1389 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1390 *jjump_pad_insn_size
= sizeof (jump_insn
);
1393 /* Return the end address of our pad. */
1394 *jump_entry
= buildaddr
;
1400 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1401 CORE_ADDR collector
,
1404 CORE_ADDR
*jump_entry
,
1405 CORE_ADDR
*trampoline
,
1406 ULONGEST
*trampoline_size
,
1407 unsigned char *jjump_pad_insn
,
1408 ULONGEST
*jjump_pad_insn_size
,
1409 CORE_ADDR
*adjusted_insn_addr
,
1410 CORE_ADDR
*adjusted_insn_addr_end
,
1414 if (is_64bit_tdesc ())
1415 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1416 collector
, lockaddr
,
1417 orig_size
, jump_entry
,
1418 trampoline
, trampoline_size
,
1420 jjump_pad_insn_size
,
1422 adjusted_insn_addr_end
,
1426 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1427 collector
, lockaddr
,
1428 orig_size
, jump_entry
,
1429 trampoline
, trampoline_size
,
1431 jjump_pad_insn_size
,
1433 adjusted_insn_addr_end
,
1437 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1441 x86_get_min_fast_tracepoint_insn_len (void)
1443 static int warned_about_fast_tracepoints
= 0;
1446 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1447 used for fast tracepoints. */
1448 if (is_64bit_tdesc ())
1452 if (agent_loaded_p ())
1454 char errbuf
[IPA_BUFSIZ
];
1458 /* On x86, if trampolines are available, then 4-byte jump instructions
1459 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1460 with a 4-byte offset are used instead. */
1461 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1465 /* GDB has no channel to explain to user why a shorter fast
1466 tracepoint is not possible, but at least make GDBserver
1467 mention that something has gone awry. */
1468 if (!warned_about_fast_tracepoints
)
1470 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
1471 warned_about_fast_tracepoints
= 1;
1478 /* Indicate that the minimum length is currently unknown since the IPA
1479 has not loaded yet. */
1485 add_insns (unsigned char *start
, int len
)
1487 CORE_ADDR buildaddr
= current_insn_ptr
;
1490 debug_printf ("Adding %d bytes of insn at %s\n",
1491 len
, paddress (buildaddr
));
1493 append_insns (&buildaddr
, len
, start
);
1494 current_insn_ptr
= buildaddr
;
1497 /* Our general strategy for emitting code is to avoid specifying raw
1498 bytes whenever possible, and instead copy a block of inline asm
1499 that is embedded in the function. This is a little messy, because
1500 we need to keep the compiler from discarding what looks like dead
1501 code, plus suppress various warnings. */
1503 #define EMIT_ASM(NAME, INSNS) \
1506 extern unsigned char start_ ## NAME, end_ ## NAME; \
1507 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1508 __asm__ ("jmp end_" #NAME "\n" \
1509 "\t" "start_" #NAME ":" \
1511 "\t" "end_" #NAME ":"); \
1516 #define EMIT_ASM32(NAME,INSNS) \
1519 extern unsigned char start_ ## NAME, end_ ## NAME; \
1520 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1521 __asm__ (".code32\n" \
1522 "\t" "jmp end_" #NAME "\n" \
1523 "\t" "start_" #NAME ":\n" \
1525 "\t" "end_" #NAME ":\n" \
1531 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1538 amd64_emit_prologue (void)
1540 EMIT_ASM (amd64_prologue
,
1542 "movq %rsp,%rbp\n\t"
1543 "sub $0x20,%rsp\n\t"
1544 "movq %rdi,-8(%rbp)\n\t"
1545 "movq %rsi,-16(%rbp)");
1550 amd64_emit_epilogue (void)
1552 EMIT_ASM (amd64_epilogue
,
1553 "movq -16(%rbp),%rdi\n\t"
1554 "movq %rax,(%rdi)\n\t"
1561 amd64_emit_add (void)
1563 EMIT_ASM (amd64_add
,
1564 "add (%rsp),%rax\n\t"
1565 "lea 0x8(%rsp),%rsp");
1569 amd64_emit_sub (void)
1571 EMIT_ASM (amd64_sub
,
1572 "sub %rax,(%rsp)\n\t"
1577 amd64_emit_mul (void)
1583 amd64_emit_lsh (void)
1589 amd64_emit_rsh_signed (void)
1595 amd64_emit_rsh_unsigned (void)
1601 amd64_emit_ext (int arg
)
1606 EMIT_ASM (amd64_ext_8
,
1612 EMIT_ASM (amd64_ext_16
,
1617 EMIT_ASM (amd64_ext_32
,
1626 amd64_emit_log_not (void)
1628 EMIT_ASM (amd64_log_not
,
1629 "test %rax,%rax\n\t"
1635 amd64_emit_bit_and (void)
1637 EMIT_ASM (amd64_and
,
1638 "and (%rsp),%rax\n\t"
1639 "lea 0x8(%rsp),%rsp");
1643 amd64_emit_bit_or (void)
1646 "or (%rsp),%rax\n\t"
1647 "lea 0x8(%rsp),%rsp");
1651 amd64_emit_bit_xor (void)
1653 EMIT_ASM (amd64_xor
,
1654 "xor (%rsp),%rax\n\t"
1655 "lea 0x8(%rsp),%rsp");
1659 amd64_emit_bit_not (void)
1661 EMIT_ASM (amd64_bit_not
,
1662 "xorq $0xffffffffffffffff,%rax");
1666 amd64_emit_equal (void)
1668 EMIT_ASM (amd64_equal
,
1669 "cmp %rax,(%rsp)\n\t"
1670 "je .Lamd64_equal_true\n\t"
1672 "jmp .Lamd64_equal_end\n\t"
1673 ".Lamd64_equal_true:\n\t"
1675 ".Lamd64_equal_end:\n\t"
1676 "lea 0x8(%rsp),%rsp");
1680 amd64_emit_less_signed (void)
1682 EMIT_ASM (amd64_less_signed
,
1683 "cmp %rax,(%rsp)\n\t"
1684 "jl .Lamd64_less_signed_true\n\t"
1686 "jmp .Lamd64_less_signed_end\n\t"
1687 ".Lamd64_less_signed_true:\n\t"
1689 ".Lamd64_less_signed_end:\n\t"
1690 "lea 0x8(%rsp),%rsp");
1694 amd64_emit_less_unsigned (void)
1696 EMIT_ASM (amd64_less_unsigned
,
1697 "cmp %rax,(%rsp)\n\t"
1698 "jb .Lamd64_less_unsigned_true\n\t"
1700 "jmp .Lamd64_less_unsigned_end\n\t"
1701 ".Lamd64_less_unsigned_true:\n\t"
1703 ".Lamd64_less_unsigned_end:\n\t"
1704 "lea 0x8(%rsp),%rsp");
1708 amd64_emit_ref (int size
)
1713 EMIT_ASM (amd64_ref1
,
1717 EMIT_ASM (amd64_ref2
,
1721 EMIT_ASM (amd64_ref4
,
1722 "movl (%rax),%eax");
1725 EMIT_ASM (amd64_ref8
,
1726 "movq (%rax),%rax");
1732 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1734 EMIT_ASM (amd64_if_goto
,
1738 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1746 amd64_emit_goto (int *offset_p
, int *size_p
)
1748 EMIT_ASM (amd64_goto
,
1749 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1757 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1759 int diff
= (to
- (from
+ size
));
1760 unsigned char buf
[sizeof (int)];
1768 memcpy (buf
, &diff
, sizeof (int));
1769 write_inferior_memory (from
, buf
, sizeof (int));
1773 amd64_emit_const (LONGEST num
)
1775 unsigned char buf
[16];
1777 CORE_ADDR buildaddr
= current_insn_ptr
;
1780 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1781 memcpy (&buf
[i
], &num
, sizeof (num
));
1783 append_insns (&buildaddr
, i
, buf
);
1784 current_insn_ptr
= buildaddr
;
1788 amd64_emit_call (CORE_ADDR fn
)
1790 unsigned char buf
[16];
1792 CORE_ADDR buildaddr
;
1795 /* The destination function being in the shared library, may be
1796 >31-bits away off the compiled code pad. */
1798 buildaddr
= current_insn_ptr
;
1800 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1804 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1806 /* Offset is too large for a call. Use callq, but that requires
1807 a register, so avoid it if possible. Use r10, since it is
1808 call-clobbered, we don't have to push/pop it. */
1809 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1811 memcpy (buf
+ i
, &fn
, 8);
1813 buf
[i
++] = 0xff; /* callq *%r10 */
1818 int offset32
= offset64
; /* we know we can't overflow here. */
1820 buf
[i
++] = 0xe8; /* call <reladdr> */
1821 memcpy (buf
+ i
, &offset32
, 4);
1825 append_insns (&buildaddr
, i
, buf
);
1826 current_insn_ptr
= buildaddr
;
1830 amd64_emit_reg (int reg
)
1832 unsigned char buf
[16];
1834 CORE_ADDR buildaddr
;
1836 /* Assume raw_regs is still in %rdi. */
1837 buildaddr
= current_insn_ptr
;
1839 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1840 memcpy (&buf
[i
], ®
, sizeof (reg
));
1842 append_insns (&buildaddr
, i
, buf
);
1843 current_insn_ptr
= buildaddr
;
1844 amd64_emit_call (get_raw_reg_func_addr ());
1848 amd64_emit_pop (void)
1850 EMIT_ASM (amd64_pop
,
1855 amd64_emit_stack_flush (void)
1857 EMIT_ASM (amd64_stack_flush
,
1862 amd64_emit_zero_ext (int arg
)
1867 EMIT_ASM (amd64_zero_ext_8
,
1871 EMIT_ASM (amd64_zero_ext_16
,
1872 "and $0xffff,%rax");
1875 EMIT_ASM (amd64_zero_ext_32
,
1876 "mov $0xffffffff,%rcx\n\t"
1885 amd64_emit_swap (void)
1887 EMIT_ASM (amd64_swap
,
1894 amd64_emit_stack_adjust (int n
)
1896 unsigned char buf
[16];
1898 CORE_ADDR buildaddr
= current_insn_ptr
;
1901 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
1905 /* This only handles adjustments up to 16, but we don't expect any more. */
1907 append_insns (&buildaddr
, i
, buf
);
1908 current_insn_ptr
= buildaddr
;
1911 /* FN's prototype is `LONGEST(*fn)(int)'. */
1914 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
1916 unsigned char buf
[16];
1918 CORE_ADDR buildaddr
;
1920 buildaddr
= current_insn_ptr
;
1922 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1923 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1925 append_insns (&buildaddr
, i
, buf
);
1926 current_insn_ptr
= buildaddr
;
1927 amd64_emit_call (fn
);
1930 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1933 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
1935 unsigned char buf
[16];
1937 CORE_ADDR buildaddr
;
1939 buildaddr
= current_insn_ptr
;
1941 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1942 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1944 append_insns (&buildaddr
, i
, buf
);
1945 current_insn_ptr
= buildaddr
;
1946 EMIT_ASM (amd64_void_call_2_a
,
1947 /* Save away a copy of the stack top. */
1949 /* Also pass top as the second argument. */
1951 amd64_emit_call (fn
);
1952 EMIT_ASM (amd64_void_call_2_b
,
1953 /* Restore the stack top, %rax may have been trashed. */
1958 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
1961 "cmp %rax,(%rsp)\n\t"
1962 "jne .Lamd64_eq_fallthru\n\t"
1963 "lea 0x8(%rsp),%rsp\n\t"
1965 /* jmp, but don't trust the assembler to choose the right jump */
1966 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
1967 ".Lamd64_eq_fallthru:\n\t"
1968 "lea 0x8(%rsp),%rsp\n\t"
1978 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
1981 "cmp %rax,(%rsp)\n\t"
1982 "je .Lamd64_ne_fallthru\n\t"
1983 "lea 0x8(%rsp),%rsp\n\t"
1985 /* jmp, but don't trust the assembler to choose the right jump */
1986 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
1987 ".Lamd64_ne_fallthru:\n\t"
1988 "lea 0x8(%rsp),%rsp\n\t"
1998 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2001 "cmp %rax,(%rsp)\n\t"
2002 "jnl .Lamd64_lt_fallthru\n\t"
2003 "lea 0x8(%rsp),%rsp\n\t"
2005 /* jmp, but don't trust the assembler to choose the right jump */
2006 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2007 ".Lamd64_lt_fallthru:\n\t"
2008 "lea 0x8(%rsp),%rsp\n\t"
2018 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2021 "cmp %rax,(%rsp)\n\t"
2022 "jnle .Lamd64_le_fallthru\n\t"
2023 "lea 0x8(%rsp),%rsp\n\t"
2025 /* jmp, but don't trust the assembler to choose the right jump */
2026 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2027 ".Lamd64_le_fallthru:\n\t"
2028 "lea 0x8(%rsp),%rsp\n\t"
2038 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2041 "cmp %rax,(%rsp)\n\t"
2042 "jng .Lamd64_gt_fallthru\n\t"
2043 "lea 0x8(%rsp),%rsp\n\t"
2045 /* jmp, but don't trust the assembler to choose the right jump */
2046 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2047 ".Lamd64_gt_fallthru:\n\t"
2048 "lea 0x8(%rsp),%rsp\n\t"
2058 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2061 "cmp %rax,(%rsp)\n\t"
2062 "jnge .Lamd64_ge_fallthru\n\t"
2063 ".Lamd64_ge_jump:\n\t"
2064 "lea 0x8(%rsp),%rsp\n\t"
2066 /* jmp, but don't trust the assembler to choose the right jump */
2067 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2068 ".Lamd64_ge_fallthru:\n\t"
2069 "lea 0x8(%rsp),%rsp\n\t"
2078 struct emit_ops amd64_emit_ops
=
2080 amd64_emit_prologue
,
2081 amd64_emit_epilogue
,
2086 amd64_emit_rsh_signed
,
2087 amd64_emit_rsh_unsigned
,
2095 amd64_emit_less_signed
,
2096 amd64_emit_less_unsigned
,
2100 amd64_write_goto_address
,
2105 amd64_emit_stack_flush
,
2106 amd64_emit_zero_ext
,
2108 amd64_emit_stack_adjust
,
2109 amd64_emit_int_call_1
,
2110 amd64_emit_void_call_2
,
2119 #endif /* __x86_64__ */
2122 i386_emit_prologue (void)
2124 EMIT_ASM32 (i386_prologue
,
2128 /* At this point, the raw regs base address is at 8(%ebp), and the
2129 value pointer is at 12(%ebp). */
2133 i386_emit_epilogue (void)
2135 EMIT_ASM32 (i386_epilogue
,
2136 "mov 12(%ebp),%ecx\n\t"
2137 "mov %eax,(%ecx)\n\t"
2138 "mov %ebx,0x4(%ecx)\n\t"
2146 i386_emit_add (void)
2148 EMIT_ASM32 (i386_add
,
2149 "add (%esp),%eax\n\t"
2150 "adc 0x4(%esp),%ebx\n\t"
2151 "lea 0x8(%esp),%esp");
2155 i386_emit_sub (void)
2157 EMIT_ASM32 (i386_sub
,
2158 "subl %eax,(%esp)\n\t"
2159 "sbbl %ebx,4(%esp)\n\t"
2165 i386_emit_mul (void)
2171 i386_emit_lsh (void)
2177 i386_emit_rsh_signed (void)
2183 i386_emit_rsh_unsigned (void)
2189 i386_emit_ext (int arg
)
2194 EMIT_ASM32 (i386_ext_8
,
2197 "movl %eax,%ebx\n\t"
2201 EMIT_ASM32 (i386_ext_16
,
2203 "movl %eax,%ebx\n\t"
2207 EMIT_ASM32 (i386_ext_32
,
2208 "movl %eax,%ebx\n\t"
2217 i386_emit_log_not (void)
2219 EMIT_ASM32 (i386_log_not
,
2221 "test %eax,%eax\n\t"
2228 i386_emit_bit_and (void)
2230 EMIT_ASM32 (i386_and
,
2231 "and (%esp),%eax\n\t"
2232 "and 0x4(%esp),%ebx\n\t"
2233 "lea 0x8(%esp),%esp");
2237 i386_emit_bit_or (void)
2239 EMIT_ASM32 (i386_or
,
2240 "or (%esp),%eax\n\t"
2241 "or 0x4(%esp),%ebx\n\t"
2242 "lea 0x8(%esp),%esp");
2246 i386_emit_bit_xor (void)
2248 EMIT_ASM32 (i386_xor
,
2249 "xor (%esp),%eax\n\t"
2250 "xor 0x4(%esp),%ebx\n\t"
2251 "lea 0x8(%esp),%esp");
2255 i386_emit_bit_not (void)
2257 EMIT_ASM32 (i386_bit_not
,
2258 "xor $0xffffffff,%eax\n\t"
2259 "xor $0xffffffff,%ebx\n\t");
2263 i386_emit_equal (void)
2265 EMIT_ASM32 (i386_equal
,
2266 "cmpl %ebx,4(%esp)\n\t"
2267 "jne .Li386_equal_false\n\t"
2268 "cmpl %eax,(%esp)\n\t"
2269 "je .Li386_equal_true\n\t"
2270 ".Li386_equal_false:\n\t"
2272 "jmp .Li386_equal_end\n\t"
2273 ".Li386_equal_true:\n\t"
2275 ".Li386_equal_end:\n\t"
2277 "lea 0x8(%esp),%esp");
2281 i386_emit_less_signed (void)
2283 EMIT_ASM32 (i386_less_signed
,
2284 "cmpl %ebx,4(%esp)\n\t"
2285 "jl .Li386_less_signed_true\n\t"
2286 "jne .Li386_less_signed_false\n\t"
2287 "cmpl %eax,(%esp)\n\t"
2288 "jl .Li386_less_signed_true\n\t"
2289 ".Li386_less_signed_false:\n\t"
2291 "jmp .Li386_less_signed_end\n\t"
2292 ".Li386_less_signed_true:\n\t"
2294 ".Li386_less_signed_end:\n\t"
2296 "lea 0x8(%esp),%esp");
2300 i386_emit_less_unsigned (void)
2302 EMIT_ASM32 (i386_less_unsigned
,
2303 "cmpl %ebx,4(%esp)\n\t"
2304 "jb .Li386_less_unsigned_true\n\t"
2305 "jne .Li386_less_unsigned_false\n\t"
2306 "cmpl %eax,(%esp)\n\t"
2307 "jb .Li386_less_unsigned_true\n\t"
2308 ".Li386_less_unsigned_false:\n\t"
2310 "jmp .Li386_less_unsigned_end\n\t"
2311 ".Li386_less_unsigned_true:\n\t"
2313 ".Li386_less_unsigned_end:\n\t"
2315 "lea 0x8(%esp),%esp");
2319 i386_emit_ref (int size
)
2324 EMIT_ASM32 (i386_ref1
,
2328 EMIT_ASM32 (i386_ref2
,
2332 EMIT_ASM32 (i386_ref4
,
2333 "movl (%eax),%eax");
2336 EMIT_ASM32 (i386_ref8
,
2337 "movl 4(%eax),%ebx\n\t"
2338 "movl (%eax),%eax");
2344 i386_emit_if_goto (int *offset_p
, int *size_p
)
2346 EMIT_ASM32 (i386_if_goto
,
2352 /* Don't trust the assembler to choose the right jump */
2353 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2356 *offset_p
= 11; /* be sure that this matches the sequence above */
2362 i386_emit_goto (int *offset_p
, int *size_p
)
2364 EMIT_ASM32 (i386_goto
,
2365 /* Don't trust the assembler to choose the right jump */
2366 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2374 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2376 int diff
= (to
- (from
+ size
));
2377 unsigned char buf
[sizeof (int)];
2379 /* We're only doing 4-byte sizes at the moment. */
2386 memcpy (buf
, &diff
, sizeof (int));
2387 write_inferior_memory (from
, buf
, sizeof (int));
2391 i386_emit_const (LONGEST num
)
2393 unsigned char buf
[16];
2395 CORE_ADDR buildaddr
= current_insn_ptr
;
2398 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2399 lo
= num
& 0xffffffff;
2400 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2402 hi
= ((num
>> 32) & 0xffffffff);
2405 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2406 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2411 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2413 append_insns (&buildaddr
, i
, buf
);
2414 current_insn_ptr
= buildaddr
;
2418 i386_emit_call (CORE_ADDR fn
)
2420 unsigned char buf
[16];
2422 CORE_ADDR buildaddr
;
2424 buildaddr
= current_insn_ptr
;
2426 buf
[i
++] = 0xe8; /* call <reladdr> */
2427 offset
= ((int) fn
) - (buildaddr
+ 5);
2428 memcpy (buf
+ 1, &offset
, 4);
2429 append_insns (&buildaddr
, 5, buf
);
2430 current_insn_ptr
= buildaddr
;
2434 i386_emit_reg (int reg
)
2436 unsigned char buf
[16];
2438 CORE_ADDR buildaddr
;
2440 EMIT_ASM32 (i386_reg_a
,
2442 buildaddr
= current_insn_ptr
;
2444 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2445 memcpy (&buf
[i
], ®
, sizeof (reg
));
2447 append_insns (&buildaddr
, i
, buf
);
2448 current_insn_ptr
= buildaddr
;
2449 EMIT_ASM32 (i386_reg_b
,
2450 "mov %eax,4(%esp)\n\t"
2451 "mov 8(%ebp),%eax\n\t"
2453 i386_emit_call (get_raw_reg_func_addr ());
2454 EMIT_ASM32 (i386_reg_c
,
2456 "lea 0x8(%esp),%esp");
2460 i386_emit_pop (void)
2462 EMIT_ASM32 (i386_pop
,
2468 i386_emit_stack_flush (void)
2470 EMIT_ASM32 (i386_stack_flush
,
2476 i386_emit_zero_ext (int arg
)
2481 EMIT_ASM32 (i386_zero_ext_8
,
2482 "and $0xff,%eax\n\t"
2486 EMIT_ASM32 (i386_zero_ext_16
,
2487 "and $0xffff,%eax\n\t"
2491 EMIT_ASM32 (i386_zero_ext_32
,
2500 i386_emit_swap (void)
2502 EMIT_ASM32 (i386_swap
,
2512 i386_emit_stack_adjust (int n
)
2514 unsigned char buf
[16];
2516 CORE_ADDR buildaddr
= current_insn_ptr
;
2519 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2523 append_insns (&buildaddr
, i
, buf
);
2524 current_insn_ptr
= buildaddr
;
2527 /* FN's prototype is `LONGEST(*fn)(int)'. */
2530 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2532 unsigned char buf
[16];
2534 CORE_ADDR buildaddr
;
2536 EMIT_ASM32 (i386_int_call_1_a
,
2537 /* Reserve a bit of stack space. */
2539 /* Put the one argument on the stack. */
2540 buildaddr
= current_insn_ptr
;
2542 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2545 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2547 append_insns (&buildaddr
, i
, buf
);
2548 current_insn_ptr
= buildaddr
;
2549 i386_emit_call (fn
);
2550 EMIT_ASM32 (i386_int_call_1_c
,
2552 "lea 0x8(%esp),%esp");
2555 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2558 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2560 unsigned char buf
[16];
2562 CORE_ADDR buildaddr
;
2564 EMIT_ASM32 (i386_void_call_2_a
,
2565 /* Preserve %eax only; we don't have to worry about %ebx. */
2567 /* Reserve a bit of stack space for arguments. */
2568 "sub $0x10,%esp\n\t"
2569 /* Copy "top" to the second argument position. (Note that
2570 we can't assume function won't scribble on its
2571 arguments, so don't try to restore from this.) */
2572 "mov %eax,4(%esp)\n\t"
2573 "mov %ebx,8(%esp)");
2574 /* Put the first argument on the stack. */
2575 buildaddr
= current_insn_ptr
;
2577 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2580 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2582 append_insns (&buildaddr
, i
, buf
);
2583 current_insn_ptr
= buildaddr
;
2584 i386_emit_call (fn
);
2585 EMIT_ASM32 (i386_void_call_2_b
,
2586 "lea 0x10(%esp),%esp\n\t"
2587 /* Restore original stack top. */
2593 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2596 /* Check low half first, more likely to be decider */
2597 "cmpl %eax,(%esp)\n\t"
2598 "jne .Leq_fallthru\n\t"
2599 "cmpl %ebx,4(%esp)\n\t"
2600 "jne .Leq_fallthru\n\t"
2601 "lea 0x8(%esp),%esp\n\t"
2604 /* jmp, but don't trust the assembler to choose the right jump */
2605 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2606 ".Leq_fallthru:\n\t"
2607 "lea 0x8(%esp),%esp\n\t"
2618 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2621 /* Check low half first, more likely to be decider */
2622 "cmpl %eax,(%esp)\n\t"
2624 "cmpl %ebx,4(%esp)\n\t"
2625 "je .Lne_fallthru\n\t"
2627 "lea 0x8(%esp),%esp\n\t"
2630 /* jmp, but don't trust the assembler to choose the right jump */
2631 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2632 ".Lne_fallthru:\n\t"
2633 "lea 0x8(%esp),%esp\n\t"
2644 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2647 "cmpl %ebx,4(%esp)\n\t"
2649 "jne .Llt_fallthru\n\t"
2650 "cmpl %eax,(%esp)\n\t"
2651 "jnl .Llt_fallthru\n\t"
2653 "lea 0x8(%esp),%esp\n\t"
2656 /* jmp, but don't trust the assembler to choose the right jump */
2657 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2658 ".Llt_fallthru:\n\t"
2659 "lea 0x8(%esp),%esp\n\t"
2670 i386_emit_le_goto (int *offset_p
, int *size_p
)
2673 "cmpl %ebx,4(%esp)\n\t"
2675 "jne .Lle_fallthru\n\t"
2676 "cmpl %eax,(%esp)\n\t"
2677 "jnle .Lle_fallthru\n\t"
2679 "lea 0x8(%esp),%esp\n\t"
2682 /* jmp, but don't trust the assembler to choose the right jump */
2683 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2684 ".Lle_fallthru:\n\t"
2685 "lea 0x8(%esp),%esp\n\t"
2696 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2699 "cmpl %ebx,4(%esp)\n\t"
2701 "jne .Lgt_fallthru\n\t"
2702 "cmpl %eax,(%esp)\n\t"
2703 "jng .Lgt_fallthru\n\t"
2705 "lea 0x8(%esp),%esp\n\t"
2708 /* jmp, but don't trust the assembler to choose the right jump */
2709 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2710 ".Lgt_fallthru:\n\t"
2711 "lea 0x8(%esp),%esp\n\t"
2722 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2725 "cmpl %ebx,4(%esp)\n\t"
2727 "jne .Lge_fallthru\n\t"
2728 "cmpl %eax,(%esp)\n\t"
2729 "jnge .Lge_fallthru\n\t"
2731 "lea 0x8(%esp),%esp\n\t"
2734 /* jmp, but don't trust the assembler to choose the right jump */
2735 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2736 ".Lge_fallthru:\n\t"
2737 "lea 0x8(%esp),%esp\n\t"
2747 struct emit_ops i386_emit_ops
=
2755 i386_emit_rsh_signed
,
2756 i386_emit_rsh_unsigned
,
2764 i386_emit_less_signed
,
2765 i386_emit_less_unsigned
,
2769 i386_write_goto_address
,
2774 i386_emit_stack_flush
,
2777 i386_emit_stack_adjust
,
2778 i386_emit_int_call_1
,
2779 i386_emit_void_call_2
,
2789 static struct emit_ops
*
2793 if (is_64bit_tdesc ())
2794 return &amd64_emit_ops
;
2797 return &i386_emit_ops
;
2800 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2802 static const gdb_byte
*
2803 x86_sw_breakpoint_from_kind (int kind
, int *size
)
2805 *size
= x86_breakpoint_len
;
2806 return x86_breakpoint
;
2810 x86_supports_range_stepping (void)
2815 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2819 x86_supports_hardware_single_step (void)
2825 x86_get_ipa_tdesc_idx (void)
2827 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2828 const struct target_desc
*tdesc
= regcache
->tdesc
;
2831 return amd64_get_ipa_tdesc_idx (tdesc
);
2834 if (tdesc
== tdesc_i386_linux_no_xml
)
2835 return X86_TDESC_SSE
;
2837 return i386_get_ipa_tdesc_idx (tdesc
);
2840 /* This is initialized assuming an amd64 target.
2841 x86_arch_setup will correct it for i386 or amd64 targets. */
2843 struct linux_target_ops the_low_target
=
2846 x86_linux_regs_info
,
2847 x86_cannot_fetch_register
,
2848 x86_cannot_store_register
,
2849 NULL
, /* fetch_register */
2852 NULL
, /* breakpoint_kind_from_pc */
2853 x86_sw_breakpoint_from_kind
,
2857 x86_supports_z_point_type
,
2860 x86_stopped_by_watchpoint
,
2861 x86_stopped_data_address
,
2862 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2863 native i386 case (no registers smaller than an xfer unit), and are not
2864 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2867 /* need to fix up i386 siginfo if host is amd64 */
2869 x86_linux_new_process
,
2870 x86_linux_delete_process
,
2871 x86_linux_new_thread
,
2872 x86_linux_delete_thread
,
2874 x86_linux_prepare_to_resume
,
2875 x86_linux_process_qsupported
,
2876 x86_supports_tracepoints
,
2877 x86_get_thread_area
,
2878 x86_install_fast_tracepoint_jump_pad
,
2880 x86_get_min_fast_tracepoint_insn_len
,
2881 x86_supports_range_stepping
,
2882 NULL
, /* breakpoint_kind_from_current_state */
2883 x86_supports_hardware_single_step
,
2884 x86_get_syscall_trapinfo
,
2885 x86_get_ipa_tdesc_idx
,
2889 initialize_low_arch (void)
2891 /* Initialize the Linux target descriptions. */
2893 tdesc_amd64_linux_no_xml
= allocate_target_description ();
2894 copy_target_description (tdesc_amd64_linux_no_xml
,
2895 amd64_linux_read_description (X86_XSTATE_SSE_MASK
,
2897 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2901 initialize_low_tdesc ();
2904 tdesc_i386_linux_no_xml
= allocate_target_description ();
2905 copy_target_description (tdesc_i386_linux_no_xml
,
2906 i386_linux_read_description (X86_XSTATE_SSE_MASK
));
2907 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
2909 initialize_regsets_info (&x86_regsets_info
);