Remove Walter Lee as maintainer for Tile Gx and Tile Pro
[binutils-gdb.git] / gdbserver / linux-x86-low.cc
blobe4a455c7ef6b928d1e70c55c91eec45a2381dfc4
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2024 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #include <signal.h>
21 #include <limits.h>
22 #include <inttypes.h>
23 #include "linux-low.h"
24 #include "i387-fp.h"
25 #include "x86-low.h"
26 #include "gdbsupport/x86-xstate.h"
27 #include "nat/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #include "arch/amd64-linux-tdesc.h"
33 #else
34 #include "nat/i386-linux.h"
35 #endif
37 #include "arch/i386-linux-tdesc.h"
38 #include "arch/x86-linux-tdesc-features.h"
40 #include "gdb_proc_service.h"
41 /* Don't include elf/common.h if linux/elf.h got included by
42 gdb_proc_service.h. */
43 #ifndef ELFMAG0
44 #include "elf/common.h"
45 #endif
47 #include "gdbsupport/agent.h"
48 #include "tdesc.h"
49 #include "tracepoint.h"
50 #include "ax.h"
51 #include "nat/linux-nat.h"
52 #include "nat/x86-linux.h"
53 #include "nat/x86-linux-dregs.h"
54 #include "nat/x86-linux-tdesc.h"
56 #ifdef __x86_64__
57 static target_desc_up tdesc_amd64_linux_no_xml;
58 #endif
59 static target_desc_up tdesc_i386_linux_no_xml;
62 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
63 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
65 /* Backward compatibility for gdb without XML support. */
67 static const char xmltarget_i386_linux_no_xml[] = "@<target>\
68 <architecture>i386</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
72 #ifdef __x86_64__
73 static const char xmltarget_amd64_linux_no_xml[] = "@<target>\
74 <architecture>i386:x86-64</architecture>\
75 <osabi>GNU/Linux</osabi>\
76 </target>";
77 #endif
79 #include <sys/reg.h>
80 #include <sys/procfs.h>
81 #include <sys/uio.h>
83 #ifndef PTRACE_GET_THREAD_AREA
84 #define PTRACE_GET_THREAD_AREA 25
85 #endif
87 /* This definition comes from prctl.h, but some kernels may not have it. */
88 #ifndef PTRACE_ARCH_PRCTL
89 #define PTRACE_ARCH_PRCTL 30
90 #endif
92 /* The following definitions come from prctl.h, but may be absent
93 for certain configurations. */
94 #ifndef ARCH_GET_FS
95 #define ARCH_SET_GS 0x1001
96 #define ARCH_SET_FS 0x1002
97 #define ARCH_GET_FS 0x1003
98 #define ARCH_GET_GS 0x1004
99 #endif
101 /* Linux target op definitions for the x86 architecture.
102 This is initialized assuming an amd64 target.
103 'low_arch_setup' will correct it for i386 or amd64 targets. */
105 class x86_target : public linux_process_target
107 public:
109 const regs_info *get_regs_info () override;
111 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
113 bool supports_z_point_type (char z_type) override;
115 void process_qsupported (gdb::array_view<const char * const> features) override;
117 bool supports_tracepoints () override;
119 bool supports_fast_tracepoints () override;
121 int install_fast_tracepoint_jump_pad
122 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
123 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
124 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
125 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
126 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
127 char *err) override;
129 int get_min_fast_tracepoint_insn_len () override;
131 struct emit_ops *emit_ops () override;
133 int get_ipa_tdesc_idx () override;
135 protected:
137 void low_arch_setup () override;
139 bool low_cannot_fetch_register (int regno) override;
141 bool low_cannot_store_register (int regno) override;
143 bool low_supports_breakpoints () override;
145 CORE_ADDR low_get_pc (regcache *regcache) override;
147 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
149 int low_decr_pc_after_break () override;
151 bool low_breakpoint_at (CORE_ADDR pc) override;
153 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
154 int size, raw_breakpoint *bp) override;
156 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
157 int size, raw_breakpoint *bp) override;
159 bool low_stopped_by_watchpoint () override;
161 CORE_ADDR low_stopped_data_address () override;
163 /* collect_ptrace_register/supply_ptrace_register are not needed in the
164 native i386 case (no registers smaller than an xfer unit), and are not
165 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
167 /* Need to fix up i386 siginfo if host is amd64. */
168 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
169 int direction) override;
171 arch_process_info *low_new_process () override;
173 void low_delete_process (arch_process_info *info) override;
175 void low_new_thread (lwp_info *) override;
177 void low_delete_thread (arch_lwp_info *) override;
179 void low_new_fork (process_info *parent, process_info *child) override;
181 void low_prepare_to_resume (lwp_info *lwp) override;
183 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
185 bool low_supports_range_stepping () override;
187 bool low_supports_catch_syscall () override;
189 void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
191 private:
193 /* Update all the target description of all processes; a new GDB
194 connected, and it may or not support xml target descriptions. */
195 void update_xmltarget ();
198 /* The singleton target ops object. */
200 static x86_target the_x86_target;
202 /* Per-process arch-specific data we want to keep. */
204 struct arch_process_info
206 struct x86_debug_reg_state debug_reg_state;
209 #ifdef __x86_64__
211 /* Mapping between the general-purpose registers in `struct user'
212 format and GDB's register array layout.
213 Note that the transfer layout uses 64-bit regs. */
214 static /*const*/ int i386_regmap[] =
216 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
217 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
218 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
219 DS * 8, ES * 8, FS * 8, GS * 8
222 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
224 /* So code below doesn't have to care, i386 or amd64. */
225 #define ORIG_EAX ORIG_RAX
226 #define REGSIZE 8
228 static const int x86_64_regmap[] =
230 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
231 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
232 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
233 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
234 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
235 DS * 8, ES * 8, FS * 8, GS * 8,
236 -1, -1, -1, -1, -1, -1, -1, -1,
237 -1, -1, -1, -1, -1, -1, -1, -1,
238 -1, -1, -1, -1, -1, -1, -1, -1,
240 -1, -1, -1, -1, -1, -1, -1, -1,
241 ORIG_RAX * 8,
242 21 * 8, 22 * 8,
243 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
244 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
245 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
246 -1, -1, -1, -1, -1, -1, -1, -1,
247 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
248 -1, -1, -1, -1, -1, -1, -1, -1,
249 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
250 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
251 -1, -1, -1, -1, -1, -1, -1, -1,
252 -1, -1, -1, -1, -1, -1, -1, -1,
253 -1, -1, -1, -1, -1, -1, -1, -1,
254 -1 /* pkru */
257 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
258 #define X86_64_USER_REGS (GS + 1)
260 #else /* ! __x86_64__ */
262 /* Mapping between the general-purpose registers in `struct user'
263 format and GDB's register array layout. */
264 static /*const*/ int i386_regmap[] =
266 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
267 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
268 EIP * 4, EFL * 4, CS * 4, SS * 4,
269 DS * 4, ES * 4, FS * 4, GS * 4
272 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
274 #define REGSIZE 4
276 #endif
278 #ifdef __x86_64__
280 /* Returns true if THREAD belongs to a x86-64 process, per the tdesc. */
282 static int
283 is_64bit_tdesc (thread_info *thread)
285 struct regcache *regcache = get_thread_regcache (thread, 0);
287 return register_size (regcache->tdesc, 0) == 8;
290 #endif
293 /* Called by libthread_db. */
295 ps_err_e
296 ps_get_thread_area (struct ps_prochandle *ph,
297 lwpid_t lwpid, int idx, void **base)
299 #ifdef __x86_64__
300 lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
301 gdb_assert (lwp != nullptr);
302 int use_64bit = is_64bit_tdesc (get_lwp_thread (lwp));
304 if (use_64bit)
306 switch (idx)
308 case FS:
309 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
310 return PS_OK;
311 break;
312 case GS:
313 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
314 return PS_OK;
315 break;
316 default:
317 return PS_BADADDR;
319 return PS_ERR;
321 #endif
324 unsigned int desc[4];
326 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
327 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
328 return PS_ERR;
330 /* Ensure we properly extend the value to 64-bits for x86_64. */
331 *base = (void *) (uintptr_t) desc[1];
332 return PS_OK;
336 /* Get the thread area address. This is used to recognize which
337 thread is which when tracing with the in-process agent library. We
338 don't read anything from the address, and treat it as opaque; it's
339 the address itself that we assume is unique per-thread. */
342 x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
344 lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
345 gdb_assert (lwp != nullptr);
346 #ifdef __x86_64__
347 int use_64bit = is_64bit_tdesc (get_lwp_thread (lwp));
349 if (use_64bit)
351 void *base;
352 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
354 *addr = (CORE_ADDR) (uintptr_t) base;
355 return 0;
358 return -1;
360 #endif
363 struct thread_info *thr = get_lwp_thread (lwp);
364 struct regcache *regcache = get_thread_regcache (thr, 1);
365 unsigned int desc[4];
366 ULONGEST gs = 0;
367 const int reg_thread_area = 3; /* bits to scale down register value. */
368 int idx;
370 collect_register_by_name (regcache, "gs", &gs);
372 idx = gs >> reg_thread_area;
374 if (ptrace (PTRACE_GET_THREAD_AREA,
375 lwpid_of (thr),
376 (void *) (long) idx, (unsigned long) &desc) < 0)
377 return -1;
379 *addr = desc[1];
380 return 0;
386 bool
387 x86_target::low_cannot_store_register (int regno)
389 #ifdef __x86_64__
390 if (is_64bit_tdesc (current_thread))
391 return false;
392 #endif
394 return regno >= I386_NUM_REGS;
397 bool
398 x86_target::low_cannot_fetch_register (int regno)
400 #ifdef __x86_64__
401 if (is_64bit_tdesc (current_thread))
402 return false;
403 #endif
405 return regno >= I386_NUM_REGS;
408 static void
409 collect_register_i386 (struct regcache *regcache, int regno, void *buf)
411 collect_register (regcache, regno, buf);
413 #ifdef __x86_64__
414 /* In case of x86_64 -m32, collect_register only writes 4 bytes, but the
415 space reserved in buf for the register is 8 bytes. Make sure the entire
416 reserved space is initialized. */
418 gdb_assert (register_size (regcache->tdesc, regno) == 4);
420 if (regno == RAX)
422 /* Sign extend EAX value to avoid potential syscall restart
423 problems.
425 See amd64_linux_collect_native_gregset() in
426 gdb/amd64-linux-nat.c for a detailed explanation. */
427 *(int64_t *) buf = *(int32_t *) buf;
429 else
431 /* Zero-extend. */
432 *(uint64_t *) buf = *(uint32_t *) buf;
434 #endif
437 static void
438 x86_fill_gregset (struct regcache *regcache, void *buf)
440 int i;
442 #ifdef __x86_64__
443 if (register_size (regcache->tdesc, 0) == 8)
445 for (i = 0; i < X86_64_NUM_REGS; i++)
446 if (x86_64_regmap[i] != -1)
447 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
449 return;
451 #endif
453 for (i = 0; i < I386_NUM_REGS; i++)
454 collect_register_i386 (regcache, i, ((char *) buf) + i386_regmap[i]);
456 /* Handle ORIG_EAX, which is not in i386_regmap. */
457 collect_register_i386 (regcache, find_regno (regcache->tdesc, "orig_eax"),
458 ((char *) buf) + ORIG_EAX * REGSIZE);
461 static void
462 x86_store_gregset (struct regcache *regcache, const void *buf)
464 int i;
466 #ifdef __x86_64__
467 if (register_size (regcache->tdesc, 0) == 8)
469 for (i = 0; i < X86_64_NUM_REGS; i++)
470 if (x86_64_regmap[i] != -1)
471 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
473 return;
475 #endif
477 for (i = 0; i < I386_NUM_REGS; i++)
478 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
480 supply_register_by_name (regcache, "orig_eax",
481 ((char *) buf) + ORIG_EAX * REGSIZE);
484 static void
485 x86_fill_fpregset (struct regcache *regcache, void *buf)
487 #ifdef __x86_64__
488 i387_cache_to_fxsave (regcache, buf);
489 #else
490 i387_cache_to_fsave (regcache, buf);
491 #endif
494 static void
495 x86_store_fpregset (struct regcache *regcache, const void *buf)
497 #ifdef __x86_64__
498 i387_fxsave_to_cache (regcache, buf);
499 #else
500 i387_fsave_to_cache (regcache, buf);
501 #endif
504 #ifndef __x86_64__
506 static void
507 x86_fill_fpxregset (struct regcache *regcache, void *buf)
509 i387_cache_to_fxsave (regcache, buf);
512 static void
513 x86_store_fpxregset (struct regcache *regcache, const void *buf)
515 i387_fxsave_to_cache (regcache, buf);
518 #endif
520 static void
521 x86_fill_xstateregset (struct regcache *regcache, void *buf)
523 i387_cache_to_xsave (regcache, buf);
526 static void
527 x86_store_xstateregset (struct regcache *regcache, const void *buf)
529 i387_xsave_to_cache (regcache, buf);
532 /* ??? The non-biarch i386 case stores all the i387 regs twice.
533 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
534 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
535 doesn't work. IWBN to avoid the duplication in the case where it
536 does work. Maybe the arch_setup routine could check whether it works
537 and update the supported regsets accordingly. */
539 static struct regset_info x86_regsets[] =
541 #ifdef HAVE_PTRACE_GETREGS
542 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
543 GENERAL_REGS,
544 x86_fill_gregset, x86_store_gregset },
545 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
546 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
547 # ifndef __x86_64__
548 # ifdef HAVE_PTRACE_GETFPXREGS
549 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
550 EXTENDED_REGS,
551 x86_fill_fpxregset, x86_store_fpxregset },
552 # endif
553 # endif
554 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
555 FP_REGS,
556 x86_fill_fpregset, x86_store_fpregset },
557 #endif /* HAVE_PTRACE_GETREGS */
558 NULL_REGSET
561 bool
562 x86_target::low_supports_breakpoints ()
564 return true;
567 CORE_ADDR
568 x86_target::low_get_pc (regcache *regcache)
570 int use_64bit = register_size (regcache->tdesc, 0) == 8;
572 if (use_64bit)
574 uint64_t pc;
576 collect_register_by_name (regcache, "rip", &pc);
577 return (CORE_ADDR) pc;
579 else
581 uint32_t pc;
583 collect_register_by_name (regcache, "eip", &pc);
584 return (CORE_ADDR) pc;
588 void
589 x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
591 int use_64bit = register_size (regcache->tdesc, 0) == 8;
593 if (use_64bit)
595 uint64_t newpc = pc;
597 supply_register_by_name (regcache, "rip", &newpc);
599 else
601 uint32_t newpc = pc;
603 supply_register_by_name (regcache, "eip", &newpc);
608 x86_target::low_decr_pc_after_break ()
610 return 1;
614 static const gdb_byte x86_breakpoint[] = { 0xCC };
615 #define x86_breakpoint_len 1
617 bool
618 x86_target::low_breakpoint_at (CORE_ADDR pc)
620 unsigned char c;
622 read_memory (pc, &c, 1);
623 if (c == 0xCC)
624 return true;
626 return false;
629 /* Low-level function vector. */
630 struct x86_dr_low_type x86_dr_low =
632 x86_linux_dr_set_control,
633 x86_linux_dr_set_addr,
634 x86_linux_dr_get_addr,
635 x86_linux_dr_get_status,
636 x86_linux_dr_get_control,
637 sizeof (void *),
640 /* Breakpoint/Watchpoint support. */
642 bool
643 x86_target::supports_z_point_type (char z_type)
645 switch (z_type)
647 case Z_PACKET_SW_BP:
648 case Z_PACKET_HW_BP:
649 case Z_PACKET_WRITE_WP:
650 case Z_PACKET_ACCESS_WP:
651 return true;
652 default:
653 return false;
658 x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
659 int size, raw_breakpoint *bp)
661 struct process_info *proc = current_process ();
663 switch (type)
665 case raw_bkpt_type_hw:
666 case raw_bkpt_type_write_wp:
667 case raw_bkpt_type_access_wp:
669 enum target_hw_bp_type hw_type
670 = raw_bkpt_type_to_target_hw_bp_type (type);
671 struct x86_debug_reg_state *state
672 = &proc->priv->arch_private->debug_reg_state;
674 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
677 default:
678 /* Unsupported. */
679 return 1;
684 x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
685 int size, raw_breakpoint *bp)
687 struct process_info *proc = current_process ();
689 switch (type)
691 case raw_bkpt_type_hw:
692 case raw_bkpt_type_write_wp:
693 case raw_bkpt_type_access_wp:
695 enum target_hw_bp_type hw_type
696 = raw_bkpt_type_to_target_hw_bp_type (type);
697 struct x86_debug_reg_state *state
698 = &proc->priv->arch_private->debug_reg_state;
700 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
702 default:
703 /* Unsupported. */
704 return 1;
708 bool
709 x86_target::low_stopped_by_watchpoint ()
711 struct process_info *proc = current_process ();
712 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
715 CORE_ADDR
716 x86_target::low_stopped_data_address ()
718 struct process_info *proc = current_process ();
719 CORE_ADDR addr;
720 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
721 &addr))
722 return addr;
723 return 0;
726 /* Called when a new process is created. */
728 arch_process_info *
729 x86_target::low_new_process ()
731 struct arch_process_info *info = XCNEW (struct arch_process_info);
733 x86_low_init_dregs (&info->debug_reg_state);
735 return info;
738 /* Called when a process is being deleted. */
740 void
741 x86_target::low_delete_process (arch_process_info *info)
743 xfree (info);
746 void
747 x86_target::low_new_thread (lwp_info *lwp)
749 /* This comes from nat/. */
750 x86_linux_new_thread (lwp);
753 void
754 x86_target::low_delete_thread (arch_lwp_info *alwp)
756 /* This comes from nat/. */
757 x86_linux_delete_thread (alwp);
760 /* Target routine for new_fork. */
762 void
763 x86_target::low_new_fork (process_info *parent, process_info *child)
765 /* These are allocated by linux_add_process. */
766 gdb_assert (parent->priv != NULL
767 && parent->priv->arch_private != NULL);
768 gdb_assert (child->priv != NULL
769 && child->priv->arch_private != NULL);
771 /* Linux kernel before 2.6.33 commit
772 72f674d203cd230426437cdcf7dd6f681dad8b0d
773 will inherit hardware debug registers from parent
774 on fork/vfork/clone. Newer Linux kernels create such tasks with
775 zeroed debug registers.
777 GDB core assumes the child inherits the watchpoints/hw
778 breakpoints of the parent, and will remove them all from the
779 forked off process. Copy the debug registers mirrors into the
780 new process so that all breakpoints and watchpoints can be
781 removed together. The debug registers mirror will become zeroed
782 in the end before detaching the forked off process, thus making
783 this compatible with older Linux kernels too. */
785 *child->priv->arch_private = *parent->priv->arch_private;
788 void
789 x86_target::low_prepare_to_resume (lwp_info *lwp)
791 /* This comes from nat/. */
792 x86_linux_prepare_to_resume (lwp);
795 /* See nat/x86-dregs.h. */
797 struct x86_debug_reg_state *
798 x86_debug_reg_state (pid_t pid)
800 struct process_info *proc = find_process_pid (pid);
802 return &proc->priv->arch_private->debug_reg_state;
805 /* When GDBSERVER is built as a 64-bit application on linux, the
806 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
807 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
808 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
809 conversion in-place ourselves. */
811 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
812 layout of the inferiors' architecture. Returns true if any
813 conversion was done; false otherwise. If DIRECTION is 1, then copy
814 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
815 INF. */
817 bool
818 x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
820 #ifdef __x86_64__
821 unsigned int machine;
822 int tid = lwpid_of (current_thread);
823 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
825 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
826 if (!is_64bit_tdesc (current_thread))
827 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
828 FIXUP_32);
829 /* No fixup for native x32 GDB. */
830 else if (!is_elf64 && sizeof (void *) == 8)
831 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
832 FIXUP_X32);
833 #endif
835 return false;
838 static int use_xml;
840 /* Get Linux/x86 target description from running target. */
842 static const struct target_desc *
843 x86_linux_read_description ()
845 int tid = lwpid_of (current_thread);
847 /* If we are not allowed to send an XML target description then we need
848 to use the hard-wired target descriptions. This corresponds to GDB's
849 default machine for x86.
851 This check needs to occur before any returns statements that might
852 generate some alternative target descriptions. */
853 if (!use_xml)
855 x86_linux_arch_size arch_size = x86_linux_ptrace_get_arch_size (tid);
856 bool is_64bit = arch_size.is_64bit ();
857 bool is_x32 = arch_size.is_x32 ();
859 if (sizeof (void *) == 4 && is_64bit && !is_x32)
860 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
862 #ifdef __x86_64__
863 if (is_64bit && !is_x32)
864 return tdesc_amd64_linux_no_xml.get ();
865 else
866 #endif
867 return tdesc_i386_linux_no_xml.get ();
870 /* If have_ptrace_getregset is changed to true by calling
871 x86_linux_tdesc_for_tid then we will perform some additional
872 initialisation. */
873 bool have_ptrace_getregset_was_unknown
874 = have_ptrace_getregset == TRIBOOL_UNKNOWN;
876 /* Get pointers to where we should store the xcr0 and xsave_layout
877 values. These will be filled in by x86_linux_tdesc_for_tid the first
878 time that the function is called. Subsequent calls will not modify
879 the stored values. */
880 std::pair<uint64_t *, x86_xsave_layout *> storage
881 = i387_get_xsave_storage ();
883 const target_desc *tdesc
884 = x86_linux_tdesc_for_tid (tid, storage.first, storage.second);
886 if (have_ptrace_getregset_was_unknown
887 && have_ptrace_getregset == TRIBOOL_TRUE)
889 int xsave_len = x86_xsave_length ();
891 /* Use PTRACE_GETREGSET if it is available. */
892 for (regset_info *regset = x86_regsets;
893 regset->fill_function != nullptr;
894 regset++)
896 if (regset->get_request == PTRACE_GETREGSET)
897 regset->size = xsave_len;
898 else if (regset->type != GENERAL_REGS)
899 regset->size = 0;
903 return tdesc;
906 /* Update all the target description of all processes; a new GDB
907 connected, and it may or not support xml target descriptions. */
909 void
910 x86_target::update_xmltarget ()
912 scoped_restore_current_thread restore_thread;
914 /* Before changing the register cache's internal layout, flush the
915 contents of the current valid caches back to the threads, and
916 release the current regcache objects. */
917 regcache_release ();
919 for_each_process ([this] (process_info *proc) {
920 int pid = proc->pid;
922 /* Look up any thread of this process. */
923 switch_to_thread (find_any_thread_of_pid (pid));
925 low_arch_setup ();
929 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
930 PTRACE_GETREGSET. */
932 void
933 x86_target::process_qsupported (gdb::array_view<const char * const> features)
935 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
936 with "i386" in qSupported query, it supports x86 XML target
937 descriptions. */
938 use_xml = 0;
940 for (const char *feature : features)
942 if (startswith (feature, "xmlRegisters="))
944 char *copy = xstrdup (feature + 13);
946 char *saveptr;
947 for (char *p = strtok_r (copy, ",", &saveptr);
948 p != NULL;
949 p = strtok_r (NULL, ",", &saveptr))
951 if (strcmp (p, "i386") == 0)
953 use_xml = 1;
954 break;
958 free (copy);
962 update_xmltarget ();
965 /* Common for x86/x86-64. */
967 static struct regsets_info x86_regsets_info =
969 x86_regsets, /* regsets */
970 0, /* num_regsets */
971 NULL, /* disabled_regsets */
974 #ifdef __x86_64__
975 static struct regs_info amd64_linux_regs_info =
977 NULL, /* regset_bitmap */
978 NULL, /* usrregs_info */
979 &x86_regsets_info
981 #endif
982 static struct usrregs_info i386_linux_usrregs_info =
984 I386_NUM_REGS,
985 i386_regmap,
988 static struct regs_info i386_linux_regs_info =
990 NULL, /* regset_bitmap */
991 &i386_linux_usrregs_info,
992 &x86_regsets_info
995 const regs_info *
996 x86_target::get_regs_info ()
998 #ifdef __x86_64__
999 if (is_64bit_tdesc (current_thread))
1000 return &amd64_linux_regs_info;
1001 else
1002 #endif
1003 return &i386_linux_regs_info;
1006 /* Initialize the target description for the architecture of the
1007 inferior. */
1009 void
1010 x86_target::low_arch_setup ()
1012 current_process ()->tdesc = x86_linux_read_description ();
1015 bool
1016 x86_target::low_supports_catch_syscall ()
1018 return true;
1021 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1022 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1024 void
1025 x86_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
1027 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1029 if (use_64bit)
1031 long l_sysno;
1033 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1034 *sysno = (int) l_sysno;
1036 else
1037 collect_register_by_name (regcache, "orig_eax", sysno);
1040 bool
1041 x86_target::supports_tracepoints ()
1043 return true;
1046 static void
1047 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1049 target_write_memory (*to, buf, len);
1050 *to += len;
1053 static int
1054 push_opcode (unsigned char *buf, const char *op)
1056 unsigned char *buf_org = buf;
1058 while (1)
1060 char *endptr;
1061 unsigned long ul = strtoul (op, &endptr, 16);
1063 if (endptr == op)
1064 break;
1066 *buf++ = ul;
1067 op = endptr;
1070 return buf - buf_org;
1073 #ifdef __x86_64__
1075 /* Build a jump pad that saves registers and calls a collection
1076 function. Writes a jump instruction to the jump pad to
1077 JJUMPAD_INSN. The caller is responsible to write it in at the
1078 tracepoint address. */
1080 static int
1081 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1082 CORE_ADDR collector,
1083 CORE_ADDR lockaddr,
1084 ULONGEST orig_size,
1085 CORE_ADDR *jump_entry,
1086 CORE_ADDR *trampoline,
1087 ULONGEST *trampoline_size,
1088 unsigned char *jjump_pad_insn,
1089 ULONGEST *jjump_pad_insn_size,
1090 CORE_ADDR *adjusted_insn_addr,
1091 CORE_ADDR *adjusted_insn_addr_end,
1092 char *err)
1094 unsigned char buf[40];
1095 int i, offset;
1096 int64_t loffset;
1098 CORE_ADDR buildaddr = *jump_entry;
1100 /* Build the jump pad. */
1102 /* First, do tracepoint data collection. Save registers. */
1103 i = 0;
1104 /* Need to ensure stack pointer saved first. */
1105 buf[i++] = 0x54; /* push %rsp */
1106 buf[i++] = 0x55; /* push %rbp */
1107 buf[i++] = 0x57; /* push %rdi */
1108 buf[i++] = 0x56; /* push %rsi */
1109 buf[i++] = 0x52; /* push %rdx */
1110 buf[i++] = 0x51; /* push %rcx */
1111 buf[i++] = 0x53; /* push %rbx */
1112 buf[i++] = 0x50; /* push %rax */
1113 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1114 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1115 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1116 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1117 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1118 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1119 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1120 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1121 buf[i++] = 0x9c; /* pushfq */
1122 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1123 buf[i++] = 0xbf;
1124 memcpy (buf + i, &tpaddr, 8);
1125 i += 8;
1126 buf[i++] = 0x57; /* push %rdi */
1127 append_insns (&buildaddr, i, buf);
1129 /* Stack space for the collecting_t object. */
1130 i = 0;
1131 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1132 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1133 memcpy (buf + i, &tpoint, 8);
1134 i += 8;
1135 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1136 i += push_opcode (&buf[i],
1137 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1138 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1139 append_insns (&buildaddr, i, buf);
1141 /* spin-lock. */
1142 i = 0;
1143 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1144 memcpy (&buf[i], (void *) &lockaddr, 8);
1145 i += 8;
1146 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1147 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1148 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1149 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1150 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1151 append_insns (&buildaddr, i, buf);
1153 /* Set up the gdb_collect call. */
1154 /* At this point, (stack pointer + 0x18) is the base of our saved
1155 register block. */
1157 i = 0;
1158 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1159 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1161 /* tpoint address may be 64-bit wide. */
1162 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1163 memcpy (buf + i, &tpoint, 8);
1164 i += 8;
1165 append_insns (&buildaddr, i, buf);
1167 /* The collector function being in the shared library, may be
1168 >31-bits away off the jump pad. */
1169 i = 0;
1170 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1171 memcpy (buf + i, &collector, 8);
1172 i += 8;
1173 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1174 append_insns (&buildaddr, i, buf);
1176 /* Clear the spin-lock. */
1177 i = 0;
1178 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1179 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1180 memcpy (buf + i, &lockaddr, 8);
1181 i += 8;
1182 append_insns (&buildaddr, i, buf);
1184 /* Remove stack that had been used for the collect_t object. */
1185 i = 0;
1186 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1187 append_insns (&buildaddr, i, buf);
1189 /* Restore register state. */
1190 i = 0;
1191 buf[i++] = 0x48; /* add $0x8,%rsp */
1192 buf[i++] = 0x83;
1193 buf[i++] = 0xc4;
1194 buf[i++] = 0x08;
1195 buf[i++] = 0x9d; /* popfq */
1196 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1197 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1198 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1199 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1200 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1201 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1202 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1203 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1204 buf[i++] = 0x58; /* pop %rax */
1205 buf[i++] = 0x5b; /* pop %rbx */
1206 buf[i++] = 0x59; /* pop %rcx */
1207 buf[i++] = 0x5a; /* pop %rdx */
1208 buf[i++] = 0x5e; /* pop %rsi */
1209 buf[i++] = 0x5f; /* pop %rdi */
1210 buf[i++] = 0x5d; /* pop %rbp */
1211 buf[i++] = 0x5c; /* pop %rsp */
1212 append_insns (&buildaddr, i, buf);
1214 /* Now, adjust the original instruction to execute in the jump
1215 pad. */
1216 *adjusted_insn_addr = buildaddr;
1217 relocate_instruction (&buildaddr, tpaddr);
1218 *adjusted_insn_addr_end = buildaddr;
1220 /* Finally, write a jump back to the program. */
1222 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1223 if (loffset > INT_MAX || loffset < INT_MIN)
1225 sprintf (err,
1226 "E.Jump back from jump pad too far from tracepoint "
1227 "(offset 0x%" PRIx64 " > int32).", loffset);
1228 return 1;
1231 offset = (int) loffset;
1232 memcpy (buf, jump_insn, sizeof (jump_insn));
1233 memcpy (buf + 1, &offset, 4);
1234 append_insns (&buildaddr, sizeof (jump_insn), buf);
1236 /* The jump pad is now built. Wire in a jump to our jump pad. This
1237 is always done last (by our caller actually), so that we can
1238 install fast tracepoints with threads running. This relies on
1239 the agent's atomic write support. */
1240 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1241 if (loffset > INT_MAX || loffset < INT_MIN)
1243 sprintf (err,
1244 "E.Jump pad too far from tracepoint "
1245 "(offset 0x%" PRIx64 " > int32).", loffset);
1246 return 1;
1249 offset = (int) loffset;
1251 memcpy (buf, jump_insn, sizeof (jump_insn));
1252 memcpy (buf + 1, &offset, 4);
1253 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1254 *jjump_pad_insn_size = sizeof (jump_insn);
1256 /* Return the end address of our pad. */
1257 *jump_entry = buildaddr;
1259 return 0;
1262 #endif /* __x86_64__ */
1264 /* Build a jump pad that saves registers and calls a collection
1265 function. Writes a jump instruction to the jump pad to
1266 JJUMPAD_INSN. The caller is responsible to write it in at the
1267 tracepoint address. */
1269 static int
1270 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1271 CORE_ADDR collector,
1272 CORE_ADDR lockaddr,
1273 ULONGEST orig_size,
1274 CORE_ADDR *jump_entry,
1275 CORE_ADDR *trampoline,
1276 ULONGEST *trampoline_size,
1277 unsigned char *jjump_pad_insn,
1278 ULONGEST *jjump_pad_insn_size,
1279 CORE_ADDR *adjusted_insn_addr,
1280 CORE_ADDR *adjusted_insn_addr_end,
1281 char *err)
1283 unsigned char buf[0x100];
1284 int i, offset;
1285 CORE_ADDR buildaddr = *jump_entry;
1287 /* Build the jump pad. */
1289 /* First, do tracepoint data collection. Save registers. */
1290 i = 0;
1291 buf[i++] = 0x60; /* pushad */
1292 buf[i++] = 0x68; /* push tpaddr aka $pc */
1293 *((int *)(buf + i)) = (int) tpaddr;
1294 i += 4;
1295 buf[i++] = 0x9c; /* pushf */
1296 buf[i++] = 0x1e; /* push %ds */
1297 buf[i++] = 0x06; /* push %es */
1298 buf[i++] = 0x0f; /* push %fs */
1299 buf[i++] = 0xa0;
1300 buf[i++] = 0x0f; /* push %gs */
1301 buf[i++] = 0xa8;
1302 buf[i++] = 0x16; /* push %ss */
1303 buf[i++] = 0x0e; /* push %cs */
1304 append_insns (&buildaddr, i, buf);
1306 /* Stack space for the collecting_t object. */
1307 i = 0;
1308 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1310 /* Build the object. */
1311 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1312 memcpy (buf + i, &tpoint, 4);
1313 i += 4;
1314 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1316 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1317 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1318 append_insns (&buildaddr, i, buf);
1320 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1321 If we cared for it, this could be using xchg alternatively. */
1323 i = 0;
1324 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1325 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1326 %esp,<lockaddr> */
1327 memcpy (&buf[i], (void *) &lockaddr, 4);
1328 i += 4;
1329 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1330 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1331 append_insns (&buildaddr, i, buf);
1334 /* Set up arguments to the gdb_collect call. */
1335 i = 0;
1336 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1337 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1338 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1339 append_insns (&buildaddr, i, buf);
1341 i = 0;
1342 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1343 append_insns (&buildaddr, i, buf);
1345 i = 0;
1346 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1347 memcpy (&buf[i], (void *) &tpoint, 4);
1348 i += 4;
1349 append_insns (&buildaddr, i, buf);
1351 buf[0] = 0xe8; /* call <reladdr> */
1352 offset = collector - (buildaddr + sizeof (jump_insn));
1353 memcpy (buf + 1, &offset, 4);
1354 append_insns (&buildaddr, 5, buf);
1355 /* Clean up after the call. */
1356 buf[0] = 0x83; /* add $0x8,%esp */
1357 buf[1] = 0xc4;
1358 buf[2] = 0x08;
1359 append_insns (&buildaddr, 3, buf);
1362 /* Clear the spin-lock. This would need the LOCK prefix on older
1363 broken archs. */
1364 i = 0;
1365 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1366 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1367 memcpy (buf + i, &lockaddr, 4);
1368 i += 4;
1369 append_insns (&buildaddr, i, buf);
1372 /* Remove stack that had been used for the collect_t object. */
1373 i = 0;
1374 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1375 append_insns (&buildaddr, i, buf);
1377 i = 0;
1378 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1379 buf[i++] = 0xc4;
1380 buf[i++] = 0x04;
1381 buf[i++] = 0x17; /* pop %ss */
1382 buf[i++] = 0x0f; /* pop %gs */
1383 buf[i++] = 0xa9;
1384 buf[i++] = 0x0f; /* pop %fs */
1385 buf[i++] = 0xa1;
1386 buf[i++] = 0x07; /* pop %es */
1387 buf[i++] = 0x1f; /* pop %ds */
1388 buf[i++] = 0x9d; /* popf */
1389 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1390 buf[i++] = 0xc4;
1391 buf[i++] = 0x04;
1392 buf[i++] = 0x61; /* popad */
1393 append_insns (&buildaddr, i, buf);
1395 /* Now, adjust the original instruction to execute in the jump
1396 pad. */
1397 *adjusted_insn_addr = buildaddr;
1398 relocate_instruction (&buildaddr, tpaddr);
1399 *adjusted_insn_addr_end = buildaddr;
1401 /* Write the jump back to the program. */
1402 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1403 memcpy (buf, jump_insn, sizeof (jump_insn));
1404 memcpy (buf + 1, &offset, 4);
1405 append_insns (&buildaddr, sizeof (jump_insn), buf);
1407 /* The jump pad is now built. Wire in a jump to our jump pad. This
1408 is always done last (by our caller actually), so that we can
1409 install fast tracepoints with threads running. This relies on
1410 the agent's atomic write support. */
1411 if (orig_size == 4)
1413 /* Create a trampoline. */
1414 *trampoline_size = sizeof (jump_insn);
1415 if (!claim_trampoline_space (*trampoline_size, trampoline))
1417 /* No trampoline space available. */
1418 strcpy (err,
1419 "E.Cannot allocate trampoline space needed for fast "
1420 "tracepoints on 4-byte instructions.");
1421 return 1;
1424 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1425 memcpy (buf, jump_insn, sizeof (jump_insn));
1426 memcpy (buf + 1, &offset, 4);
1427 target_write_memory (*trampoline, buf, sizeof (jump_insn));
1429 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1430 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1431 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1432 memcpy (buf + 2, &offset, 2);
1433 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1434 *jjump_pad_insn_size = sizeof (small_jump_insn);
1436 else
1438 /* Else use a 32-bit relative jump instruction. */
1439 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1440 memcpy (buf, jump_insn, sizeof (jump_insn));
1441 memcpy (buf + 1, &offset, 4);
1442 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1443 *jjump_pad_insn_size = sizeof (jump_insn);
1446 /* Return the end address of our pad. */
1447 *jump_entry = buildaddr;
1449 return 0;
1452 bool
1453 x86_target::supports_fast_tracepoints ()
1455 return true;
1459 x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1460 CORE_ADDR tpaddr,
1461 CORE_ADDR collector,
1462 CORE_ADDR lockaddr,
1463 ULONGEST orig_size,
1464 CORE_ADDR *jump_entry,
1465 CORE_ADDR *trampoline,
1466 ULONGEST *trampoline_size,
1467 unsigned char *jjump_pad_insn,
1468 ULONGEST *jjump_pad_insn_size,
1469 CORE_ADDR *adjusted_insn_addr,
1470 CORE_ADDR *adjusted_insn_addr_end,
1471 char *err)
1473 #ifdef __x86_64__
1474 if (is_64bit_tdesc (current_thread))
1475 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1476 collector, lockaddr,
1477 orig_size, jump_entry,
1478 trampoline, trampoline_size,
1479 jjump_pad_insn,
1480 jjump_pad_insn_size,
1481 adjusted_insn_addr,
1482 adjusted_insn_addr_end,
1483 err);
1484 #endif
1486 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1487 collector, lockaddr,
1488 orig_size, jump_entry,
1489 trampoline, trampoline_size,
1490 jjump_pad_insn,
1491 jjump_pad_insn_size,
1492 adjusted_insn_addr,
1493 adjusted_insn_addr_end,
1494 err);
1497 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1498 architectures. */
1501 x86_target::get_min_fast_tracepoint_insn_len ()
1503 static int warned_about_fast_tracepoints = 0;
1505 #ifdef __x86_64__
1506 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1507 used for fast tracepoints. */
1508 if (is_64bit_tdesc (current_thread))
1509 return 5;
1510 #endif
1512 if (agent_loaded_p ())
1514 char errbuf[IPA_BUFSIZ];
1516 errbuf[0] = '\0';
1518 /* On x86, if trampolines are available, then 4-byte jump instructions
1519 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1520 with a 4-byte offset are used instead. */
1521 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1522 return 4;
1523 else
1525 /* GDB has no channel to explain to user why a shorter fast
1526 tracepoint is not possible, but at least make GDBserver
1527 mention that something has gone awry. */
1528 if (!warned_about_fast_tracepoints)
1530 warning ("4-byte fast tracepoints not available; %s", errbuf);
1531 warned_about_fast_tracepoints = 1;
1533 return 5;
1536 else
1538 /* Indicate that the minimum length is currently unknown since the IPA
1539 has not loaded yet. */
1540 return 0;
1544 static void
1545 add_insns (unsigned char *start, int len)
1547 CORE_ADDR buildaddr = current_insn_ptr;
1549 threads_debug_printf ("Adding %d bytes of insn at %s",
1550 len, paddress (buildaddr));
1552 append_insns (&buildaddr, len, start);
1553 current_insn_ptr = buildaddr;
1556 /* Our general strategy for emitting code is to avoid specifying raw
1557 bytes whenever possible, and instead copy a block of inline asm
1558 that is embedded in the function. This is a little messy, because
1559 we need to keep the compiler from discarding what looks like dead
1560 code, plus suppress various warnings. */
1562 #define EMIT_ASM(NAME, INSNS) \
1563 do \
1565 extern unsigned char start_ ## NAME, end_ ## NAME; \
1566 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1567 __asm__ ("jmp end_" #NAME "\n" \
1568 "\t" "start_" #NAME ":" \
1569 "\t" INSNS "\n" \
1570 "\t" "end_" #NAME ":"); \
1571 } while (0)
1573 #ifdef __x86_64__
1575 #define EMIT_ASM32(NAME,INSNS) \
1576 do \
1578 extern unsigned char start_ ## NAME, end_ ## NAME; \
1579 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1580 __asm__ (".code32\n" \
1581 "\t" "jmp end_" #NAME "\n" \
1582 "\t" "start_" #NAME ":\n" \
1583 "\t" INSNS "\n" \
1584 "\t" "end_" #NAME ":\n" \
1585 ".code64\n"); \
1586 } while (0)
1588 #else
1590 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1592 #endif
1594 #ifdef __x86_64__
1596 static void
1597 amd64_emit_prologue (void)
1599 EMIT_ASM (amd64_prologue,
1600 "pushq %rbp\n\t"
1601 "movq %rsp,%rbp\n\t"
1602 "sub $0x20,%rsp\n\t"
1603 "movq %rdi,-8(%rbp)\n\t"
1604 "movq %rsi,-16(%rbp)");
1608 static void
1609 amd64_emit_epilogue (void)
1611 EMIT_ASM (amd64_epilogue,
1612 "movq -16(%rbp),%rdi\n\t"
1613 "movq %rax,(%rdi)\n\t"
1614 "xor %rax,%rax\n\t"
1615 "leave\n\t"
1616 "ret");
1619 static void
1620 amd64_emit_add (void)
1622 EMIT_ASM (amd64_add,
1623 "add (%rsp),%rax\n\t"
1624 "lea 0x8(%rsp),%rsp");
1627 static void
1628 amd64_emit_sub (void)
1630 EMIT_ASM (amd64_sub,
1631 "sub %rax,(%rsp)\n\t"
1632 "pop %rax");
1635 static void
1636 amd64_emit_mul (void)
1638 emit_error = 1;
1641 static void
1642 amd64_emit_lsh (void)
1644 emit_error = 1;
1647 static void
1648 amd64_emit_rsh_signed (void)
1650 emit_error = 1;
1653 static void
1654 amd64_emit_rsh_unsigned (void)
1656 emit_error = 1;
1659 static void
1660 amd64_emit_ext (int arg)
1662 switch (arg)
1664 case 8:
1665 EMIT_ASM (amd64_ext_8,
1666 "cbtw\n\t"
1667 "cwtl\n\t"
1668 "cltq");
1669 break;
1670 case 16:
1671 EMIT_ASM (amd64_ext_16,
1672 "cwtl\n\t"
1673 "cltq");
1674 break;
1675 case 32:
1676 EMIT_ASM (amd64_ext_32,
1677 "cltq");
1678 break;
1679 default:
1680 emit_error = 1;
1684 static void
1685 amd64_emit_log_not (void)
1687 EMIT_ASM (amd64_log_not,
1688 "test %rax,%rax\n\t"
1689 "sete %cl\n\t"
1690 "movzbq %cl,%rax");
1693 static void
1694 amd64_emit_bit_and (void)
1696 EMIT_ASM (amd64_and,
1697 "and (%rsp),%rax\n\t"
1698 "lea 0x8(%rsp),%rsp");
1701 static void
1702 amd64_emit_bit_or (void)
1704 EMIT_ASM (amd64_or,
1705 "or (%rsp),%rax\n\t"
1706 "lea 0x8(%rsp),%rsp");
1709 static void
1710 amd64_emit_bit_xor (void)
1712 EMIT_ASM (amd64_xor,
1713 "xor (%rsp),%rax\n\t"
1714 "lea 0x8(%rsp),%rsp");
1717 static void
1718 amd64_emit_bit_not (void)
1720 EMIT_ASM (amd64_bit_not,
1721 "xorq $0xffffffffffffffff,%rax");
1724 static void
1725 amd64_emit_equal (void)
1727 EMIT_ASM (amd64_equal,
1728 "cmp %rax,(%rsp)\n\t"
1729 "je .Lamd64_equal_true\n\t"
1730 "xor %rax,%rax\n\t"
1731 "jmp .Lamd64_equal_end\n\t"
1732 ".Lamd64_equal_true:\n\t"
1733 "mov $0x1,%rax\n\t"
1734 ".Lamd64_equal_end:\n\t"
1735 "lea 0x8(%rsp),%rsp");
1738 static void
1739 amd64_emit_less_signed (void)
1741 EMIT_ASM (amd64_less_signed,
1742 "cmp %rax,(%rsp)\n\t"
1743 "jl .Lamd64_less_signed_true\n\t"
1744 "xor %rax,%rax\n\t"
1745 "jmp .Lamd64_less_signed_end\n\t"
1746 ".Lamd64_less_signed_true:\n\t"
1747 "mov $1,%rax\n\t"
1748 ".Lamd64_less_signed_end:\n\t"
1749 "lea 0x8(%rsp),%rsp");
1752 static void
1753 amd64_emit_less_unsigned (void)
1755 EMIT_ASM (amd64_less_unsigned,
1756 "cmp %rax,(%rsp)\n\t"
1757 "jb .Lamd64_less_unsigned_true\n\t"
1758 "xor %rax,%rax\n\t"
1759 "jmp .Lamd64_less_unsigned_end\n\t"
1760 ".Lamd64_less_unsigned_true:\n\t"
1761 "mov $1,%rax\n\t"
1762 ".Lamd64_less_unsigned_end:\n\t"
1763 "lea 0x8(%rsp),%rsp");
1766 static void
1767 amd64_emit_ref (int size)
1769 switch (size)
1771 case 1:
1772 EMIT_ASM (amd64_ref1,
1773 "movb (%rax),%al");
1774 break;
1775 case 2:
1776 EMIT_ASM (amd64_ref2,
1777 "movw (%rax),%ax");
1778 break;
1779 case 4:
1780 EMIT_ASM (amd64_ref4,
1781 "movl (%rax),%eax");
1782 break;
1783 case 8:
1784 EMIT_ASM (amd64_ref8,
1785 "movq (%rax),%rax");
1786 break;
1790 static void
1791 amd64_emit_if_goto (int *offset_p, int *size_p)
1793 EMIT_ASM (amd64_if_goto,
1794 "mov %rax,%rcx\n\t"
1795 "pop %rax\n\t"
1796 "cmp $0,%rcx\n\t"
1797 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1798 if (offset_p)
1799 *offset_p = 10;
1800 if (size_p)
1801 *size_p = 4;
1804 static void
1805 amd64_emit_goto (int *offset_p, int *size_p)
1807 EMIT_ASM (amd64_goto,
1808 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1809 if (offset_p)
1810 *offset_p = 1;
1811 if (size_p)
1812 *size_p = 4;
1815 static void
1816 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1818 int diff = (to - (from + size));
1819 unsigned char buf[sizeof (int)];
1821 if (size != 4)
1823 emit_error = 1;
1824 return;
1827 memcpy (buf, &diff, sizeof (int));
1828 target_write_memory (from, buf, sizeof (int));
1831 static void
1832 amd64_emit_const (LONGEST num)
1834 unsigned char buf[16];
1835 int i;
1836 CORE_ADDR buildaddr = current_insn_ptr;
1838 i = 0;
1839 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1840 memcpy (&buf[i], &num, sizeof (num));
1841 i += 8;
1842 append_insns (&buildaddr, i, buf);
1843 current_insn_ptr = buildaddr;
1846 static void
1847 amd64_emit_call (CORE_ADDR fn)
1849 unsigned char buf[16];
1850 int i;
1851 CORE_ADDR buildaddr;
1852 LONGEST offset64;
1854 /* The destination function being in the shared library, may be
1855 >31-bits away off the compiled code pad. */
1857 buildaddr = current_insn_ptr;
1859 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1861 i = 0;
1863 if (offset64 > INT_MAX || offset64 < INT_MIN)
1865 /* Offset is too large for a call. Use callq, but that requires
1866 a register, so avoid it if possible. Use r10, since it is
1867 call-clobbered, we don't have to push/pop it. */
1868 buf[i++] = 0x48; /* mov $fn,%r10 */
1869 buf[i++] = 0xba;
1870 memcpy (buf + i, &fn, 8);
1871 i += 8;
1872 buf[i++] = 0xff; /* callq *%r10 */
1873 buf[i++] = 0xd2;
1875 else
1877 int offset32 = offset64; /* we know we can't overflow here. */
1879 buf[i++] = 0xe8; /* call <reladdr> */
1880 memcpy (buf + i, &offset32, 4);
1881 i += 4;
1884 append_insns (&buildaddr, i, buf);
1885 current_insn_ptr = buildaddr;
1888 static void
1889 amd64_emit_reg (int reg)
1891 unsigned char buf[16];
1892 int i;
1893 CORE_ADDR buildaddr;
1895 /* Assume raw_regs is still in %rdi. */
1896 buildaddr = current_insn_ptr;
1897 i = 0;
1898 buf[i++] = 0xbe; /* mov $<n>,%esi */
1899 memcpy (&buf[i], &reg, sizeof (reg));
1900 i += 4;
1901 append_insns (&buildaddr, i, buf);
1902 current_insn_ptr = buildaddr;
1903 amd64_emit_call (get_raw_reg_func_addr ());
1906 static void
1907 amd64_emit_pop (void)
1909 EMIT_ASM (amd64_pop,
1910 "pop %rax");
1913 static void
1914 amd64_emit_stack_flush (void)
1916 EMIT_ASM (amd64_stack_flush,
1917 "push %rax");
1920 static void
1921 amd64_emit_zero_ext (int arg)
1923 switch (arg)
1925 case 8:
1926 EMIT_ASM (amd64_zero_ext_8,
1927 "and $0xff,%rax");
1928 break;
1929 case 16:
1930 EMIT_ASM (amd64_zero_ext_16,
1931 "and $0xffff,%rax");
1932 break;
1933 case 32:
1934 EMIT_ASM (amd64_zero_ext_32,
1935 "mov $0xffffffff,%rcx\n\t"
1936 "and %rcx,%rax");
1937 break;
1938 default:
1939 emit_error = 1;
1943 static void
1944 amd64_emit_swap (void)
1946 EMIT_ASM (amd64_swap,
1947 "mov %rax,%rcx\n\t"
1948 "pop %rax\n\t"
1949 "push %rcx");
1952 static void
1953 amd64_emit_stack_adjust (int n)
1955 unsigned char buf[16];
1956 int i;
1957 CORE_ADDR buildaddr = current_insn_ptr;
1959 i = 0;
1960 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1961 buf[i++] = 0x8d;
1962 buf[i++] = 0x64;
1963 buf[i++] = 0x24;
1964 /* This only handles adjustments up to 16, but we don't expect any more. */
1965 buf[i++] = n * 8;
1966 append_insns (&buildaddr, i, buf);
1967 current_insn_ptr = buildaddr;
1970 /* FN's prototype is `LONGEST(*fn)(int)'. */
1972 static void
1973 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1975 unsigned char buf[16];
1976 int i;
1977 CORE_ADDR buildaddr;
1979 buildaddr = current_insn_ptr;
1980 i = 0;
1981 buf[i++] = 0xbf; /* movl $<n>,%edi */
1982 memcpy (&buf[i], &arg1, sizeof (arg1));
1983 i += 4;
1984 append_insns (&buildaddr, i, buf);
1985 current_insn_ptr = buildaddr;
1986 amd64_emit_call (fn);
1989 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1991 static void
1992 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1994 unsigned char buf[16];
1995 int i;
1996 CORE_ADDR buildaddr;
1998 buildaddr = current_insn_ptr;
1999 i = 0;
2000 buf[i++] = 0xbf; /* movl $<n>,%edi */
2001 memcpy (&buf[i], &arg1, sizeof (arg1));
2002 i += 4;
2003 append_insns (&buildaddr, i, buf);
2004 current_insn_ptr = buildaddr;
2005 EMIT_ASM (amd64_void_call_2_a,
2006 /* Save away a copy of the stack top. */
2007 "push %rax\n\t"
2008 /* Also pass top as the second argument. */
2009 "mov %rax,%rsi");
2010 amd64_emit_call (fn);
2011 EMIT_ASM (amd64_void_call_2_b,
2012 /* Restore the stack top, %rax may have been trashed. */
2013 "pop %rax");
2016 static void
2017 amd64_emit_eq_goto (int *offset_p, int *size_p)
2019 EMIT_ASM (amd64_eq,
2020 "cmp %rax,(%rsp)\n\t"
2021 "jne .Lamd64_eq_fallthru\n\t"
2022 "lea 0x8(%rsp),%rsp\n\t"
2023 "pop %rax\n\t"
2024 /* jmp, but don't trust the assembler to choose the right jump */
2025 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2026 ".Lamd64_eq_fallthru:\n\t"
2027 "lea 0x8(%rsp),%rsp\n\t"
2028 "pop %rax");
2030 if (offset_p)
2031 *offset_p = 13;
2032 if (size_p)
2033 *size_p = 4;
2036 static void
2037 amd64_emit_ne_goto (int *offset_p, int *size_p)
2039 EMIT_ASM (amd64_ne,
2040 "cmp %rax,(%rsp)\n\t"
2041 "je .Lamd64_ne_fallthru\n\t"
2042 "lea 0x8(%rsp),%rsp\n\t"
2043 "pop %rax\n\t"
2044 /* jmp, but don't trust the assembler to choose the right jump */
2045 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2046 ".Lamd64_ne_fallthru:\n\t"
2047 "lea 0x8(%rsp),%rsp\n\t"
2048 "pop %rax");
2050 if (offset_p)
2051 *offset_p = 13;
2052 if (size_p)
2053 *size_p = 4;
2056 static void
2057 amd64_emit_lt_goto (int *offset_p, int *size_p)
2059 EMIT_ASM (amd64_lt,
2060 "cmp %rax,(%rsp)\n\t"
2061 "jnl .Lamd64_lt_fallthru\n\t"
2062 "lea 0x8(%rsp),%rsp\n\t"
2063 "pop %rax\n\t"
2064 /* jmp, but don't trust the assembler to choose the right jump */
2065 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2066 ".Lamd64_lt_fallthru:\n\t"
2067 "lea 0x8(%rsp),%rsp\n\t"
2068 "pop %rax");
2070 if (offset_p)
2071 *offset_p = 13;
2072 if (size_p)
2073 *size_p = 4;
2076 static void
2077 amd64_emit_le_goto (int *offset_p, int *size_p)
2079 EMIT_ASM (amd64_le,
2080 "cmp %rax,(%rsp)\n\t"
2081 "jnle .Lamd64_le_fallthru\n\t"
2082 "lea 0x8(%rsp),%rsp\n\t"
2083 "pop %rax\n\t"
2084 /* jmp, but don't trust the assembler to choose the right jump */
2085 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2086 ".Lamd64_le_fallthru:\n\t"
2087 "lea 0x8(%rsp),%rsp\n\t"
2088 "pop %rax");
2090 if (offset_p)
2091 *offset_p = 13;
2092 if (size_p)
2093 *size_p = 4;
2096 static void
2097 amd64_emit_gt_goto (int *offset_p, int *size_p)
2099 EMIT_ASM (amd64_gt,
2100 "cmp %rax,(%rsp)\n\t"
2101 "jng .Lamd64_gt_fallthru\n\t"
2102 "lea 0x8(%rsp),%rsp\n\t"
2103 "pop %rax\n\t"
2104 /* jmp, but don't trust the assembler to choose the right jump */
2105 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2106 ".Lamd64_gt_fallthru:\n\t"
2107 "lea 0x8(%rsp),%rsp\n\t"
2108 "pop %rax");
2110 if (offset_p)
2111 *offset_p = 13;
2112 if (size_p)
2113 *size_p = 4;
2116 static void
2117 amd64_emit_ge_goto (int *offset_p, int *size_p)
2119 EMIT_ASM (amd64_ge,
2120 "cmp %rax,(%rsp)\n\t"
2121 "jnge .Lamd64_ge_fallthru\n\t"
2122 ".Lamd64_ge_jump:\n\t"
2123 "lea 0x8(%rsp),%rsp\n\t"
2124 "pop %rax\n\t"
2125 /* jmp, but don't trust the assembler to choose the right jump */
2126 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2127 ".Lamd64_ge_fallthru:\n\t"
2128 "lea 0x8(%rsp),%rsp\n\t"
2129 "pop %rax");
2131 if (offset_p)
2132 *offset_p = 13;
2133 if (size_p)
2134 *size_p = 4;
2137 static emit_ops amd64_emit_ops =
2139 amd64_emit_prologue,
2140 amd64_emit_epilogue,
2141 amd64_emit_add,
2142 amd64_emit_sub,
2143 amd64_emit_mul,
2144 amd64_emit_lsh,
2145 amd64_emit_rsh_signed,
2146 amd64_emit_rsh_unsigned,
2147 amd64_emit_ext,
2148 amd64_emit_log_not,
2149 amd64_emit_bit_and,
2150 amd64_emit_bit_or,
2151 amd64_emit_bit_xor,
2152 amd64_emit_bit_not,
2153 amd64_emit_equal,
2154 amd64_emit_less_signed,
2155 amd64_emit_less_unsigned,
2156 amd64_emit_ref,
2157 amd64_emit_if_goto,
2158 amd64_emit_goto,
2159 amd64_write_goto_address,
2160 amd64_emit_const,
2161 amd64_emit_call,
2162 amd64_emit_reg,
2163 amd64_emit_pop,
2164 amd64_emit_stack_flush,
2165 amd64_emit_zero_ext,
2166 amd64_emit_swap,
2167 amd64_emit_stack_adjust,
2168 amd64_emit_int_call_1,
2169 amd64_emit_void_call_2,
2170 amd64_emit_eq_goto,
2171 amd64_emit_ne_goto,
2172 amd64_emit_lt_goto,
2173 amd64_emit_le_goto,
2174 amd64_emit_gt_goto,
2175 amd64_emit_ge_goto
2178 #endif /* __x86_64__ */
2180 static void
2181 i386_emit_prologue (void)
2183 EMIT_ASM32 (i386_prologue,
2184 "push %ebp\n\t"
2185 "mov %esp,%ebp\n\t"
2186 "push %ebx");
2187 /* At this point, the raw regs base address is at 8(%ebp), and the
2188 value pointer is at 12(%ebp). */
2191 static void
2192 i386_emit_epilogue (void)
2194 EMIT_ASM32 (i386_epilogue,
2195 "mov 12(%ebp),%ecx\n\t"
2196 "mov %eax,(%ecx)\n\t"
2197 "mov %ebx,0x4(%ecx)\n\t"
2198 "xor %eax,%eax\n\t"
2199 "pop %ebx\n\t"
2200 "pop %ebp\n\t"
2201 "ret");
2204 static void
2205 i386_emit_add (void)
2207 EMIT_ASM32 (i386_add,
2208 "add (%esp),%eax\n\t"
2209 "adc 0x4(%esp),%ebx\n\t"
2210 "lea 0x8(%esp),%esp");
2213 static void
2214 i386_emit_sub (void)
2216 EMIT_ASM32 (i386_sub,
2217 "subl %eax,(%esp)\n\t"
2218 "sbbl %ebx,4(%esp)\n\t"
2219 "pop %eax\n\t"
2220 "pop %ebx\n\t");
2223 static void
2224 i386_emit_mul (void)
2226 emit_error = 1;
2229 static void
2230 i386_emit_lsh (void)
2232 emit_error = 1;
2235 static void
2236 i386_emit_rsh_signed (void)
2238 emit_error = 1;
2241 static void
2242 i386_emit_rsh_unsigned (void)
2244 emit_error = 1;
2247 static void
2248 i386_emit_ext (int arg)
2250 switch (arg)
2252 case 8:
2253 EMIT_ASM32 (i386_ext_8,
2254 "cbtw\n\t"
2255 "cwtl\n\t"
2256 "movl %eax,%ebx\n\t"
2257 "sarl $31,%ebx");
2258 break;
2259 case 16:
2260 EMIT_ASM32 (i386_ext_16,
2261 "cwtl\n\t"
2262 "movl %eax,%ebx\n\t"
2263 "sarl $31,%ebx");
2264 break;
2265 case 32:
2266 EMIT_ASM32 (i386_ext_32,
2267 "movl %eax,%ebx\n\t"
2268 "sarl $31,%ebx");
2269 break;
2270 default:
2271 emit_error = 1;
2275 static void
2276 i386_emit_log_not (void)
2278 EMIT_ASM32 (i386_log_not,
2279 "or %ebx,%eax\n\t"
2280 "test %eax,%eax\n\t"
2281 "sete %cl\n\t"
2282 "xor %ebx,%ebx\n\t"
2283 "movzbl %cl,%eax");
2286 static void
2287 i386_emit_bit_and (void)
2289 EMIT_ASM32 (i386_and,
2290 "and (%esp),%eax\n\t"
2291 "and 0x4(%esp),%ebx\n\t"
2292 "lea 0x8(%esp),%esp");
2295 static void
2296 i386_emit_bit_or (void)
2298 EMIT_ASM32 (i386_or,
2299 "or (%esp),%eax\n\t"
2300 "or 0x4(%esp),%ebx\n\t"
2301 "lea 0x8(%esp),%esp");
2304 static void
2305 i386_emit_bit_xor (void)
2307 EMIT_ASM32 (i386_xor,
2308 "xor (%esp),%eax\n\t"
2309 "xor 0x4(%esp),%ebx\n\t"
2310 "lea 0x8(%esp),%esp");
2313 static void
2314 i386_emit_bit_not (void)
2316 EMIT_ASM32 (i386_bit_not,
2317 "xor $0xffffffff,%eax\n\t"
2318 "xor $0xffffffff,%ebx\n\t");
2321 static void
2322 i386_emit_equal (void)
2324 EMIT_ASM32 (i386_equal,
2325 "cmpl %ebx,4(%esp)\n\t"
2326 "jne .Li386_equal_false\n\t"
2327 "cmpl %eax,(%esp)\n\t"
2328 "je .Li386_equal_true\n\t"
2329 ".Li386_equal_false:\n\t"
2330 "xor %eax,%eax\n\t"
2331 "jmp .Li386_equal_end\n\t"
2332 ".Li386_equal_true:\n\t"
2333 "mov $1,%eax\n\t"
2334 ".Li386_equal_end:\n\t"
2335 "xor %ebx,%ebx\n\t"
2336 "lea 0x8(%esp),%esp");
2339 static void
2340 i386_emit_less_signed (void)
2342 EMIT_ASM32 (i386_less_signed,
2343 "cmpl %ebx,4(%esp)\n\t"
2344 "jl .Li386_less_signed_true\n\t"
2345 "jne .Li386_less_signed_false\n\t"
2346 "cmpl %eax,(%esp)\n\t"
2347 "jl .Li386_less_signed_true\n\t"
2348 ".Li386_less_signed_false:\n\t"
2349 "xor %eax,%eax\n\t"
2350 "jmp .Li386_less_signed_end\n\t"
2351 ".Li386_less_signed_true:\n\t"
2352 "mov $1,%eax\n\t"
2353 ".Li386_less_signed_end:\n\t"
2354 "xor %ebx,%ebx\n\t"
2355 "lea 0x8(%esp),%esp");
2358 static void
2359 i386_emit_less_unsigned (void)
2361 EMIT_ASM32 (i386_less_unsigned,
2362 "cmpl %ebx,4(%esp)\n\t"
2363 "jb .Li386_less_unsigned_true\n\t"
2364 "jne .Li386_less_unsigned_false\n\t"
2365 "cmpl %eax,(%esp)\n\t"
2366 "jb .Li386_less_unsigned_true\n\t"
2367 ".Li386_less_unsigned_false:\n\t"
2368 "xor %eax,%eax\n\t"
2369 "jmp .Li386_less_unsigned_end\n\t"
2370 ".Li386_less_unsigned_true:\n\t"
2371 "mov $1,%eax\n\t"
2372 ".Li386_less_unsigned_end:\n\t"
2373 "xor %ebx,%ebx\n\t"
2374 "lea 0x8(%esp),%esp");
2377 static void
2378 i386_emit_ref (int size)
2380 switch (size)
2382 case 1:
2383 EMIT_ASM32 (i386_ref1,
2384 "movb (%eax),%al");
2385 break;
2386 case 2:
2387 EMIT_ASM32 (i386_ref2,
2388 "movw (%eax),%ax");
2389 break;
2390 case 4:
2391 EMIT_ASM32 (i386_ref4,
2392 "movl (%eax),%eax");
2393 break;
2394 case 8:
2395 EMIT_ASM32 (i386_ref8,
2396 "movl 4(%eax),%ebx\n\t"
2397 "movl (%eax),%eax");
2398 break;
2402 static void
2403 i386_emit_if_goto (int *offset_p, int *size_p)
2405 EMIT_ASM32 (i386_if_goto,
2406 "mov %eax,%ecx\n\t"
2407 "or %ebx,%ecx\n\t"
2408 "pop %eax\n\t"
2409 "pop %ebx\n\t"
2410 "cmpl $0,%ecx\n\t"
2411 /* Don't trust the assembler to choose the right jump */
2412 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2414 if (offset_p)
2415 *offset_p = 11; /* be sure that this matches the sequence above */
2416 if (size_p)
2417 *size_p = 4;
2420 static void
2421 i386_emit_goto (int *offset_p, int *size_p)
2423 EMIT_ASM32 (i386_goto,
2424 /* Don't trust the assembler to choose the right jump */
2425 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2426 if (offset_p)
2427 *offset_p = 1;
2428 if (size_p)
2429 *size_p = 4;
2432 static void
2433 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2435 int diff = (to - (from + size));
2436 unsigned char buf[sizeof (int)];
2438 /* We're only doing 4-byte sizes at the moment. */
2439 if (size != 4)
2441 emit_error = 1;
2442 return;
2445 memcpy (buf, &diff, sizeof (int));
2446 target_write_memory (from, buf, sizeof (int));
2449 static void
2450 i386_emit_const (LONGEST num)
2452 unsigned char buf[16];
2453 int i, hi, lo;
2454 CORE_ADDR buildaddr = current_insn_ptr;
2456 i = 0;
2457 buf[i++] = 0xb8; /* mov $<n>,%eax */
2458 lo = num & 0xffffffff;
2459 memcpy (&buf[i], &lo, sizeof (lo));
2460 i += 4;
2461 hi = ((num >> 32) & 0xffffffff);
2462 if (hi)
2464 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2465 memcpy (&buf[i], &hi, sizeof (hi));
2466 i += 4;
2468 else
2470 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2472 append_insns (&buildaddr, i, buf);
2473 current_insn_ptr = buildaddr;
2476 static void
2477 i386_emit_call (CORE_ADDR fn)
2479 unsigned char buf[16];
2480 int i, offset;
2481 CORE_ADDR buildaddr;
2483 buildaddr = current_insn_ptr;
2484 i = 0;
2485 buf[i++] = 0xe8; /* call <reladdr> */
2486 offset = ((int) fn) - (buildaddr + 5);
2487 memcpy (buf + 1, &offset, 4);
2488 append_insns (&buildaddr, 5, buf);
2489 current_insn_ptr = buildaddr;
2492 static void
2493 i386_emit_reg (int reg)
2495 unsigned char buf[16];
2496 int i;
2497 CORE_ADDR buildaddr;
2499 EMIT_ASM32 (i386_reg_a,
2500 "sub $0x8,%esp");
2501 buildaddr = current_insn_ptr;
2502 i = 0;
2503 buf[i++] = 0xb8; /* mov $<n>,%eax */
2504 memcpy (&buf[i], &reg, sizeof (reg));
2505 i += 4;
2506 append_insns (&buildaddr, i, buf);
2507 current_insn_ptr = buildaddr;
2508 EMIT_ASM32 (i386_reg_b,
2509 "mov %eax,4(%esp)\n\t"
2510 "mov 8(%ebp),%eax\n\t"
2511 "mov %eax,(%esp)");
2512 i386_emit_call (get_raw_reg_func_addr ());
2513 EMIT_ASM32 (i386_reg_c,
2514 "xor %ebx,%ebx\n\t"
2515 "lea 0x8(%esp),%esp");
2518 static void
2519 i386_emit_pop (void)
2521 EMIT_ASM32 (i386_pop,
2522 "pop %eax\n\t"
2523 "pop %ebx");
2526 static void
2527 i386_emit_stack_flush (void)
2529 EMIT_ASM32 (i386_stack_flush,
2530 "push %ebx\n\t"
2531 "push %eax");
2534 static void
2535 i386_emit_zero_ext (int arg)
2537 switch (arg)
2539 case 8:
2540 EMIT_ASM32 (i386_zero_ext_8,
2541 "and $0xff,%eax\n\t"
2542 "xor %ebx,%ebx");
2543 break;
2544 case 16:
2545 EMIT_ASM32 (i386_zero_ext_16,
2546 "and $0xffff,%eax\n\t"
2547 "xor %ebx,%ebx");
2548 break;
2549 case 32:
2550 EMIT_ASM32 (i386_zero_ext_32,
2551 "xor %ebx,%ebx");
2552 break;
2553 default:
2554 emit_error = 1;
2558 static void
2559 i386_emit_swap (void)
2561 EMIT_ASM32 (i386_swap,
2562 "mov %eax,%ecx\n\t"
2563 "mov %ebx,%edx\n\t"
2564 "pop %eax\n\t"
2565 "pop %ebx\n\t"
2566 "push %edx\n\t"
2567 "push %ecx");
2570 static void
2571 i386_emit_stack_adjust (int n)
2573 unsigned char buf[16];
2574 int i;
2575 CORE_ADDR buildaddr = current_insn_ptr;
2577 i = 0;
2578 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2579 buf[i++] = 0x64;
2580 buf[i++] = 0x24;
2581 buf[i++] = n * 8;
2582 append_insns (&buildaddr, i, buf);
2583 current_insn_ptr = buildaddr;
2586 /* FN's prototype is `LONGEST(*fn)(int)'. */
2588 static void
2589 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2591 unsigned char buf[16];
2592 int i;
2593 CORE_ADDR buildaddr;
2595 EMIT_ASM32 (i386_int_call_1_a,
2596 /* Reserve a bit of stack space. */
2597 "sub $0x8,%esp");
2598 /* Put the one argument on the stack. */
2599 buildaddr = current_insn_ptr;
2600 i = 0;
2601 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2602 buf[i++] = 0x04;
2603 buf[i++] = 0x24;
2604 memcpy (&buf[i], &arg1, sizeof (arg1));
2605 i += 4;
2606 append_insns (&buildaddr, i, buf);
2607 current_insn_ptr = buildaddr;
2608 i386_emit_call (fn);
2609 EMIT_ASM32 (i386_int_call_1_c,
2610 "mov %edx,%ebx\n\t"
2611 "lea 0x8(%esp),%esp");
2614 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2616 static void
2617 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2619 unsigned char buf[16];
2620 int i;
2621 CORE_ADDR buildaddr;
2623 EMIT_ASM32 (i386_void_call_2_a,
2624 /* Preserve %eax only; we don't have to worry about %ebx. */
2625 "push %eax\n\t"
2626 /* Reserve a bit of stack space for arguments. */
2627 "sub $0x10,%esp\n\t"
2628 /* Copy "top" to the second argument position. (Note that
2629 we can't assume function won't scribble on its
2630 arguments, so don't try to restore from this.) */
2631 "mov %eax,4(%esp)\n\t"
2632 "mov %ebx,8(%esp)");
2633 /* Put the first argument on the stack. */
2634 buildaddr = current_insn_ptr;
2635 i = 0;
2636 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2637 buf[i++] = 0x04;
2638 buf[i++] = 0x24;
2639 memcpy (&buf[i], &arg1, sizeof (arg1));
2640 i += 4;
2641 append_insns (&buildaddr, i, buf);
2642 current_insn_ptr = buildaddr;
2643 i386_emit_call (fn);
2644 EMIT_ASM32 (i386_void_call_2_b,
2645 "lea 0x10(%esp),%esp\n\t"
2646 /* Restore original stack top. */
2647 "pop %eax");
2651 static void
2652 i386_emit_eq_goto (int *offset_p, int *size_p)
2654 EMIT_ASM32 (eq,
2655 /* Check low half first, more likely to be decider */
2656 "cmpl %eax,(%esp)\n\t"
2657 "jne .Leq_fallthru\n\t"
2658 "cmpl %ebx,4(%esp)\n\t"
2659 "jne .Leq_fallthru\n\t"
2660 "lea 0x8(%esp),%esp\n\t"
2661 "pop %eax\n\t"
2662 "pop %ebx\n\t"
2663 /* jmp, but don't trust the assembler to choose the right jump */
2664 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2665 ".Leq_fallthru:\n\t"
2666 "lea 0x8(%esp),%esp\n\t"
2667 "pop %eax\n\t"
2668 "pop %ebx");
2670 if (offset_p)
2671 *offset_p = 18;
2672 if (size_p)
2673 *size_p = 4;
2676 static void
2677 i386_emit_ne_goto (int *offset_p, int *size_p)
2679 EMIT_ASM32 (ne,
2680 /* Check low half first, more likely to be decider */
2681 "cmpl %eax,(%esp)\n\t"
2682 "jne .Lne_jump\n\t"
2683 "cmpl %ebx,4(%esp)\n\t"
2684 "je .Lne_fallthru\n\t"
2685 ".Lne_jump:\n\t"
2686 "lea 0x8(%esp),%esp\n\t"
2687 "pop %eax\n\t"
2688 "pop %ebx\n\t"
2689 /* jmp, but don't trust the assembler to choose the right jump */
2690 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2691 ".Lne_fallthru:\n\t"
2692 "lea 0x8(%esp),%esp\n\t"
2693 "pop %eax\n\t"
2694 "pop %ebx");
2696 if (offset_p)
2697 *offset_p = 18;
2698 if (size_p)
2699 *size_p = 4;
2702 static void
2703 i386_emit_lt_goto (int *offset_p, int *size_p)
2705 EMIT_ASM32 (lt,
2706 "cmpl %ebx,4(%esp)\n\t"
2707 "jl .Llt_jump\n\t"
2708 "jne .Llt_fallthru\n\t"
2709 "cmpl %eax,(%esp)\n\t"
2710 "jnl .Llt_fallthru\n\t"
2711 ".Llt_jump:\n\t"
2712 "lea 0x8(%esp),%esp\n\t"
2713 "pop %eax\n\t"
2714 "pop %ebx\n\t"
2715 /* jmp, but don't trust the assembler to choose the right jump */
2716 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2717 ".Llt_fallthru:\n\t"
2718 "lea 0x8(%esp),%esp\n\t"
2719 "pop %eax\n\t"
2720 "pop %ebx");
2722 if (offset_p)
2723 *offset_p = 20;
2724 if (size_p)
2725 *size_p = 4;
2728 static void
2729 i386_emit_le_goto (int *offset_p, int *size_p)
2731 EMIT_ASM32 (le,
2732 "cmpl %ebx,4(%esp)\n\t"
2733 "jle .Lle_jump\n\t"
2734 "jne .Lle_fallthru\n\t"
2735 "cmpl %eax,(%esp)\n\t"
2736 "jnle .Lle_fallthru\n\t"
2737 ".Lle_jump:\n\t"
2738 "lea 0x8(%esp),%esp\n\t"
2739 "pop %eax\n\t"
2740 "pop %ebx\n\t"
2741 /* jmp, but don't trust the assembler to choose the right jump */
2742 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2743 ".Lle_fallthru:\n\t"
2744 "lea 0x8(%esp),%esp\n\t"
2745 "pop %eax\n\t"
2746 "pop %ebx");
2748 if (offset_p)
2749 *offset_p = 20;
2750 if (size_p)
2751 *size_p = 4;
2754 static void
2755 i386_emit_gt_goto (int *offset_p, int *size_p)
2757 EMIT_ASM32 (gt,
2758 "cmpl %ebx,4(%esp)\n\t"
2759 "jg .Lgt_jump\n\t"
2760 "jne .Lgt_fallthru\n\t"
2761 "cmpl %eax,(%esp)\n\t"
2762 "jng .Lgt_fallthru\n\t"
2763 ".Lgt_jump:\n\t"
2764 "lea 0x8(%esp),%esp\n\t"
2765 "pop %eax\n\t"
2766 "pop %ebx\n\t"
2767 /* jmp, but don't trust the assembler to choose the right jump */
2768 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2769 ".Lgt_fallthru:\n\t"
2770 "lea 0x8(%esp),%esp\n\t"
2771 "pop %eax\n\t"
2772 "pop %ebx");
2774 if (offset_p)
2775 *offset_p = 20;
2776 if (size_p)
2777 *size_p = 4;
2780 static void
2781 i386_emit_ge_goto (int *offset_p, int *size_p)
2783 EMIT_ASM32 (ge,
2784 "cmpl %ebx,4(%esp)\n\t"
2785 "jge .Lge_jump\n\t"
2786 "jne .Lge_fallthru\n\t"
2787 "cmpl %eax,(%esp)\n\t"
2788 "jnge .Lge_fallthru\n\t"
2789 ".Lge_jump:\n\t"
2790 "lea 0x8(%esp),%esp\n\t"
2791 "pop %eax\n\t"
2792 "pop %ebx\n\t"
2793 /* jmp, but don't trust the assembler to choose the right jump */
2794 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2795 ".Lge_fallthru:\n\t"
2796 "lea 0x8(%esp),%esp\n\t"
2797 "pop %eax\n\t"
2798 "pop %ebx");
2800 if (offset_p)
2801 *offset_p = 20;
2802 if (size_p)
2803 *size_p = 4;
2806 static emit_ops i386_emit_ops =
2808 i386_emit_prologue,
2809 i386_emit_epilogue,
2810 i386_emit_add,
2811 i386_emit_sub,
2812 i386_emit_mul,
2813 i386_emit_lsh,
2814 i386_emit_rsh_signed,
2815 i386_emit_rsh_unsigned,
2816 i386_emit_ext,
2817 i386_emit_log_not,
2818 i386_emit_bit_and,
2819 i386_emit_bit_or,
2820 i386_emit_bit_xor,
2821 i386_emit_bit_not,
2822 i386_emit_equal,
2823 i386_emit_less_signed,
2824 i386_emit_less_unsigned,
2825 i386_emit_ref,
2826 i386_emit_if_goto,
2827 i386_emit_goto,
2828 i386_write_goto_address,
2829 i386_emit_const,
2830 i386_emit_call,
2831 i386_emit_reg,
2832 i386_emit_pop,
2833 i386_emit_stack_flush,
2834 i386_emit_zero_ext,
2835 i386_emit_swap,
2836 i386_emit_stack_adjust,
2837 i386_emit_int_call_1,
2838 i386_emit_void_call_2,
2839 i386_emit_eq_goto,
2840 i386_emit_ne_goto,
2841 i386_emit_lt_goto,
2842 i386_emit_le_goto,
2843 i386_emit_gt_goto,
2844 i386_emit_ge_goto
2848 emit_ops *
2849 x86_target::emit_ops ()
2851 #ifdef __x86_64__
2852 if (is_64bit_tdesc (current_thread))
2853 return &amd64_emit_ops;
2854 else
2855 #endif
2856 return &i386_emit_ops;
2859 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2861 const gdb_byte *
2862 x86_target::sw_breakpoint_from_kind (int kind, int *size)
2864 *size = x86_breakpoint_len;
2865 return x86_breakpoint;
2868 bool
2869 x86_target::low_supports_range_stepping ()
2871 return true;
2875 x86_target::get_ipa_tdesc_idx ()
2877 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2878 const struct target_desc *tdesc = regcache->tdesc;
2880 if (!use_xml)
2882 /* If USE_XML is false then we should be using one of these target
2883 descriptions, see x86_linux_read_description for where we choose
2884 one of these. Both of these descriptions are created from this
2885 fixed xcr0 value X86_XSTATE_SSE_MASK. */
2886 gdb_assert (tdesc == tdesc_i386_linux_no_xml.get ()
2887 #ifdef __x86_64__
2888 || tdesc == tdesc_amd64_linux_no_xml.get ()
2889 #endif /* __x86_64__ */
2891 return x86_linux_xcr0_to_tdesc_idx (X86_XSTATE_SSE_MASK);
2894 /* The xcr0 value and xsave layout value are cached when the target
2895 description is read. Grab their cache location, and use the cached
2896 value to calculate a tdesc index. */
2897 std::pair<uint64_t *, x86_xsave_layout *> storage
2898 = i387_get_xsave_storage ();
2899 uint64_t xcr0 = *storage.first;
2901 return x86_linux_xcr0_to_tdesc_idx (xcr0);
2904 /* The linux target ops object. */
2906 linux_process_target *the_linux_target = &the_x86_target;
2908 void
2909 initialize_low_arch (void)
2911 /* Initialize the Linux target descriptions. */
2912 #ifdef __x86_64__
2913 tdesc_amd64_linux_no_xml = allocate_target_description ();
2914 copy_target_description (tdesc_amd64_linux_no_xml.get (),
2915 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2916 false));
2917 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2918 #endif
2920 tdesc_i386_linux_no_xml = allocate_target_description ();
2921 copy_target_description (tdesc_i386_linux_no_xml.get (),
2922 i386_linux_read_description (X86_XSTATE_SSE_MASK));
2923 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2925 initialize_regsets_info (&x86_regsets_info);