1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
2 Copyright (C) 2002-2021 Free Software Foundation, Inc.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19 #ifndef GDBSERVER_LINUX_LOW_H
20 #define GDBSERVER_LINUX_LOW_H
22 #include "nat/linux-nat.h"
23 #include "nat/gdb_thread_db.h"
26 #include "gdbthread.h"
27 #include "gdb_proc_service.h"
29 /* Included for ptrace type definitions. */
30 #include "nat/linux-ptrace.h"
31 #include "target/waitstatus.h" /* For enum target_stop_reason. */
32 #include "tracepoint.h"
36 #define PTRACE_XFER_TYPE long
38 #ifdef HAVE_LINUX_REGSETS
39 typedef void (*regset_fill_func
) (struct regcache
*, void *);
40 typedef void (*regset_store_func
) (struct regcache
*, const void *);
45 OPTIONAL_REGS
, /* Do not error if the regset cannot be accessed. */
48 /* The arch's regsets array initializer must be terminated with a NULL
51 { 0, 0, 0, -1, (enum regset_type) -1, NULL, NULL }
55 int get_request
, set_request
;
56 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
57 argument and the 4th argument should be "const struct iovec *". */
60 enum regset_type type
;
61 regset_fill_func fill_function
;
62 regset_store_func store_function
;
65 /* Aggregation of all the supported regsets of a given
70 /* The regsets array. */
71 struct regset_info
*regsets
;
73 /* The number of regsets in the REGSETS array. */
76 /* If we get EIO on a regset, do not try it again. Note the set of
77 supported regsets may depend on processor mode on biarch
78 machines. This is a (lazily allocated) array holding one boolean
79 byte (0/1) per regset, with each element corresponding to the
80 regset in the REGSETS array above at the same offset. */
81 char *disabled_regsets
;
86 /* Mapping between the general-purpose registers in `struct user'
87 format and GDB's register array layout. */
91 /* The number of registers accessible. */
94 /* The registers map. */
98 /* All info needed to access an architecture/mode's registers. */
102 /* Regset support bitmap: 1 for registers that are transferred as a part
103 of a regset, 0 for ones that need to be handled individually. This
104 can be NULL if all registers are transferred with regsets or regsets
105 are not supported. */
106 unsigned char *regset_bitmap
;
108 /* Info used when accessing registers with PTRACE_PEEKUSER /
109 PTRACE_POKEUSER. This can be NULL if all registers are
110 transferred with regsets .*/
111 struct usrregs_info
*usrregs
;
113 #ifdef HAVE_LINUX_REGSETS
114 /* Info used when accessing registers with regsets. */
115 struct regsets_info
*regsets_info
;
119 struct process_info_private
121 /* Arch-specific additions. */
122 struct arch_process_info
*arch_private
;
124 /* libthread_db-specific additions. Not NULL if this process has loaded
125 thread_db, and it is active. */
126 struct thread_db
*thread_db
;
128 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */
134 /* Target ops definitions for a Linux target. */
136 class linux_process_target
: public process_stratum_target
140 int create_inferior (const char *program
,
141 const std::vector
<char *> &program_args
) override
;
143 void post_create_inferior () override
;
145 int attach (unsigned long pid
) override
;
147 int kill (process_info
*proc
) override
;
149 int detach (process_info
*proc
) override
;
151 void mourn (process_info
*proc
) override
;
153 void join (int pid
) override
;
155 bool thread_alive (ptid_t pid
) override
;
157 void resume (thread_resume
*resume_info
, size_t n
) override
;
159 ptid_t
wait (ptid_t ptid
, target_waitstatus
*status
,
160 target_wait_flags options
) override
;
162 void fetch_registers (regcache
*regcache
, int regno
) override
;
164 void store_registers (regcache
*regcache
, int regno
) override
;
166 int prepare_to_access_memory () override
;
168 void done_accessing_memory () override
;
170 int read_memory (CORE_ADDR memaddr
, unsigned char *myaddr
,
173 int write_memory (CORE_ADDR memaddr
, const unsigned char *myaddr
,
176 void look_up_symbols () override
;
178 void request_interrupt () override
;
180 bool supports_read_auxv () override
;
182 int read_auxv (CORE_ADDR offset
, unsigned char *myaddr
,
183 unsigned int len
) override
;
185 int insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
186 int size
, raw_breakpoint
*bp
) override
;
188 int remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
189 int size
, raw_breakpoint
*bp
) override
;
191 bool stopped_by_sw_breakpoint () override
;
193 bool supports_stopped_by_sw_breakpoint () override
;
195 bool stopped_by_hw_breakpoint () override
;
197 bool supports_stopped_by_hw_breakpoint () override
;
199 bool supports_hardware_single_step () override
;
201 bool stopped_by_watchpoint () override
;
203 CORE_ADDR
stopped_data_address () override
;
205 bool supports_read_offsets () override
;
207 int read_offsets (CORE_ADDR
*text
, CORE_ADDR
*data
) override
;
209 bool supports_get_tls_address () override
;
211 int get_tls_address (thread_info
*thread
, CORE_ADDR offset
,
212 CORE_ADDR load_module
, CORE_ADDR
*address
) override
;
214 bool supports_qxfer_osdata () override
;
216 int qxfer_osdata (const char *annex
, unsigned char *readbuf
,
217 unsigned const char *writebuf
,
218 CORE_ADDR offset
, int len
) override
;
220 bool supports_qxfer_siginfo () override
;
222 int qxfer_siginfo (const char *annex
, unsigned char *readbuf
,
223 unsigned const char *writebuf
,
224 CORE_ADDR offset
, int len
) override
;
226 bool supports_non_stop () override
;
228 bool async (bool enable
) override
;
230 int start_non_stop (bool enable
) override
;
232 bool supports_multi_process () override
;
234 bool supports_fork_events () override
;
236 bool supports_vfork_events () override
;
238 bool supports_exec_events () override
;
240 void handle_new_gdb_connection () override
;
242 int handle_monitor_command (char *mon
) override
;
244 int core_of_thread (ptid_t ptid
) override
;
246 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
247 bool supports_read_loadmap () override
;
249 int read_loadmap (const char *annex
, CORE_ADDR offset
,
250 unsigned char *myaddr
, unsigned int len
) override
;
253 CORE_ADDR
read_pc (regcache
*regcache
) override
;
255 void write_pc (regcache
*regcache
, CORE_ADDR pc
) override
;
257 bool supports_thread_stopped () override
;
259 bool thread_stopped (thread_info
*thread
) override
;
261 void pause_all (bool freeze
) override
;
263 void unpause_all (bool unfreeze
) override
;
265 void stabilize_threads () override
;
267 bool supports_disable_randomization () override
;
269 bool supports_qxfer_libraries_svr4 () override
;
271 int qxfer_libraries_svr4 (const char *annex
,
272 unsigned char *readbuf
,
273 unsigned const char *writebuf
,
274 CORE_ADDR offset
, int len
) override
;
276 bool supports_agent () override
;
278 #ifdef HAVE_LINUX_BTRACE
279 btrace_target_info
*enable_btrace (ptid_t ptid
,
280 const btrace_config
*conf
) override
;
282 int disable_btrace (btrace_target_info
*tinfo
) override
;
284 int read_btrace (btrace_target_info
*tinfo
, buffer
*buf
,
285 enum btrace_read_type type
) override
;
287 int read_btrace_conf (const btrace_target_info
*tinfo
,
288 buffer
*buf
) override
;
291 bool supports_range_stepping () override
;
293 bool supports_pid_to_exec_file () override
;
295 const char *pid_to_exec_file (int pid
) override
;
297 bool supports_multifs () override
;
299 int multifs_open (int pid
, const char *filename
, int flags
,
300 mode_t mode
) override
;
302 int multifs_unlink (int pid
, const char *filename
) override
;
304 ssize_t
multifs_readlink (int pid
, const char *filename
, char *buf
,
305 size_t bufsiz
) override
;
307 const char *thread_name (ptid_t thread
) override
;
310 bool thread_handle (ptid_t ptid
, gdb_byte
**handle
,
311 int *handle_len
) override
;
314 bool supports_catch_syscall () override
;
316 /* Return the information to access registers. This has public
317 visibility because proc-service uses it. */
318 virtual const regs_info
*get_regs_info () = 0;
322 /* Handle a GNU/Linux extended wait response. If we see a clone,
323 fork, or vfork event, we need to add the new LWP to our list
324 (and return 0 so as not to report the trap to higher layers).
325 If we see an exec event, we will modify ORIG_EVENT_LWP to point
326 to a new LWP representing the new program. */
327 int handle_extended_wait (lwp_info
**orig_event_lwp
, int wstat
);
329 /* Do low-level handling of the event, and check if this is an event we want
330 to report. Is so, store it as a pending status in the lwp_info structure
331 corresponding to LWPID. */
332 void filter_event (int lwpid
, int wstat
);
334 /* Wait for an event from child(ren) WAIT_PTID, and return any that
335 match FILTER_PTID (leaving others pending). The PTIDs can be:
336 minus_one_ptid, to specify any child; a pid PTID, specifying all
337 lwps of a thread group; or a PTID representing a single lwp. Store
338 the stop status through the status pointer WSTAT. OPTIONS is
339 passed to the waitpid call. Return 0 if no event was found and
340 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
341 was found. Return the PID of the stopped child otherwise. */
342 int wait_for_event_filtered (ptid_t wait_ptid
, ptid_t filter_ptid
,
343 int *wstatp
, int options
);
345 /* Wait for an event from child(ren) PTID. PTIDs can be:
346 minus_one_ptid, to specify any child; a pid PTID, specifying all
347 lwps of a thread group; or a PTID representing a single lwp. Store
348 the stop status through the status pointer WSTAT. OPTIONS is
349 passed to the waitpid call. Return 0 if no event was found and
350 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
351 was found. Return the PID of the stopped child otherwise. */
352 int wait_for_event (ptid_t ptid
, int *wstatp
, int options
);
354 /* Wait for all children to stop for the SIGSTOPs we just queued. */
355 void wait_for_sigstop ();
357 /* Wait for process, returns status. */
358 ptid_t
wait_1 (ptid_t ptid
, target_waitstatus
*ourstatus
,
359 target_wait_flags target_options
);
361 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
362 If SUSPEND, then also increase the suspend count of every LWP,
364 void stop_all_lwps (int suspend
, lwp_info
*except
);
366 /* Stopped LWPs that the client wanted to be running, that don't have
367 pending statuses, are set to run again, except for EXCEPT, if not
368 NULL. This undoes a stop_all_lwps call. */
369 void unstop_all_lwps (int unsuspend
, lwp_info
*except
);
371 /* Start a step-over operation on LWP. When LWP stopped at a
372 breakpoint, to make progress, we need to remove the breakpoint out
373 of the way. If we let other threads run while we do that, they may
374 pass by the breakpoint location and miss hitting it. To avoid
375 that, a step-over momentarily stops all threads while LWP is
376 single-stepped by either hardware or software while the breakpoint
377 is temporarily uninserted from the inferior. When the single-step
378 finishes, we reinsert the breakpoint, and let all threads that are
379 supposed to be running, run again. */
380 void start_step_over (lwp_info
*lwp
);
382 /* If there's a step over in progress, wait until all threads stop
383 (that is, until the stepping thread finishes its step), and
384 unsuspend all lwps. The stepping thread ends with its status
385 pending, which is processed later when we get back to processing
387 void complete_ongoing_step_over ();
389 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
390 start_step_over, if still there, and delete any single-step
391 breakpoints we've set, on non hardware single-step targets.
392 Return true if step over finished. */
393 bool finish_step_over (lwp_info
*lwp
);
395 /* When we finish a step-over, set threads running again. If there's
396 another thread that may need a step-over, now's the time to start
397 it. Eventually, we'll move all threads past their breakpoints. */
398 void proceed_all_lwps ();
400 /* The reason we resume in the caller, is because we want to be able
401 to pass lwp->status_pending as WSTAT, and we need to clear
402 status_pending_p before resuming, otherwise, resume_one_lwp
403 refuses to resume. */
404 bool maybe_move_out_of_jump_pad (lwp_info
*lwp
, int *wstat
);
406 /* Move THREAD out of the jump pad. */
407 void move_out_of_jump_pad (thread_info
*thread
);
409 /* Call low_arch_setup on THREAD. */
410 void arch_setup_thread (thread_info
*thread
);
412 #ifdef HAVE_LINUX_USRREGS
413 /* Fetch one register. */
414 void fetch_register (const usrregs_info
*usrregs
, regcache
*regcache
,
417 /* Store one register. */
418 void store_register (const usrregs_info
*usrregs
, regcache
*regcache
,
422 /* Fetch all registers, or just one, from the child process.
423 If REGNO is -1, do this for all registers, skipping any that are
424 assumed to have been retrieved by regsets_fetch_inferior_registers,
425 unless ALL is non-zero.
426 Otherwise, REGNO specifies which register (so we can save time). */
427 void usr_fetch_inferior_registers (const regs_info
*regs_info
,
428 regcache
*regcache
, int regno
, int all
);
430 /* Store our register values back into the inferior.
431 If REGNO is -1, do this for all registers, skipping any that are
432 assumed to have been saved by regsets_store_inferior_registers,
433 unless ALL is non-zero.
434 Otherwise, REGNO specifies which register (so we can save time). */
435 void usr_store_inferior_registers (const regs_info
*regs_info
,
436 regcache
*regcache
, int regno
, int all
);
438 /* Return the PC as read from the regcache of LWP, without any
440 CORE_ADDR
get_pc (lwp_info
*lwp
);
442 /* Called when the LWP stopped for a signal/trap. If it stopped for a
443 trap check what caused it (breakpoint, watchpoint, trace, etc.),
444 and save the result in the LWP's stop_reason field. If it stopped
445 for a breakpoint, decrement the PC if necessary on the lwp's
446 architecture. Returns true if we now have the LWP's stop PC. */
447 bool save_stop_reason (lwp_info
*lwp
);
449 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
450 SIGNAL is nonzero, give it that signal. */
451 void resume_one_lwp_throw (lwp_info
*lwp
, int step
, int signal
,
454 /* Like resume_one_lwp_throw, but no error is thrown if the LWP
455 disappears while we try to resume it. */
456 void resume_one_lwp (lwp_info
*lwp
, int step
, int signal
, siginfo_t
*info
);
458 /* This function is called once per thread. We check the thread's
459 last resume request, which will tell us whether to resume, step, or
460 leave the thread stopped. Any signal the client requested to be
461 delivered has already been enqueued at this point.
463 If any thread that GDB wants running is stopped at an internal
464 breakpoint that needs stepping over, we start a step-over operation
465 on that particular thread, and leave all others stopped. */
466 void proceed_one_lwp (thread_info
*thread
, lwp_info
*except
);
468 /* This function is called once per thread. We check the thread's
469 resume request, which will tell us whether to resume, step, or
470 leave the thread stopped; and what signal, if any, it should be
473 For threads which we aren't explicitly told otherwise, we preserve
474 the stepping flag; this is used for stepping over gdbserver-placed
477 If pending_flags was set in any thread, we queue any needed
478 signals, since we won't actually resume. We already have a pending
479 event to report, so we don't need to preserve any step requests;
480 they should be re-issued if necessary. */
481 void resume_one_thread (thread_info
*thread
, bool leave_all_stopped
);
483 /* Return true if this lwp has an interesting status pending. */
484 bool status_pending_p_callback (thread_info
*thread
, ptid_t ptid
);
486 /* Resume LWPs that are currently stopped without any pending status
487 to report, but are resumed from the core's perspective. */
488 void resume_stopped_resumed_lwps (thread_info
*thread
);
490 /* Unsuspend THREAD, except EXCEPT, and proceed. */
491 void unsuspend_and_proceed_one_lwp (thread_info
*thread
, lwp_info
*except
);
493 /* Return true if this lwp still has an interesting status pending.
494 If not (e.g., it had stopped for a breakpoint that is gone), return
496 bool thread_still_has_status_pending (thread_info
*thread
);
498 /* Return true if this lwp is to-be-resumed and has an interesting
500 bool resume_status_pending (thread_info
*thread
);
502 /* Return true if this lwp that GDB wants running is stopped at an
503 internal breakpoint that we need to step over. It assumes that
504 any required STOP_PC adjustment has already been propagated to
505 the inferior's regcache. */
506 bool thread_needs_step_over (thread_info
*thread
);
508 /* Single step via hardware or software single step.
509 Return 1 if hardware single stepping, 0 if software single stepping
510 or can't single step. */
511 int single_step (lwp_info
* lwp
);
513 /* Return true if THREAD is doing hardware single step. */
514 bool maybe_hw_step (thread_info
*thread
);
516 /* Install breakpoints for software single stepping. */
517 void install_software_single_step_breakpoints (lwp_info
*lwp
);
519 /* Fetch the possibly triggered data watchpoint info and store it in
522 On some archs, like x86, that use debug registers to set
523 watchpoints, it's possible that the way to know which watched
524 address trapped, is to check the register that is used to select
525 which address to watch. Problem is, between setting the watchpoint
526 and reading back which data address trapped, the user may change
527 the set of watchpoints, and, as a consequence, GDB changes the
528 debug registers in the inferior. To avoid reading back a stale
529 stopped-data-address when that happens, we cache in LP the fact
530 that a watchpoint trapped, and the corresponding data address, as
531 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
532 registers meanwhile, we have the cached data we can rely on. */
533 bool check_stopped_by_watchpoint (lwp_info
*child
);
535 /* Convert a native/host siginfo object, into/from the siginfo in the
536 layout of the inferiors' architecture. */
537 void siginfo_fixup (siginfo_t
*siginfo
, gdb_byte
*inf_siginfo
,
540 /* Add a process to the common process list, and set its private
542 process_info
*add_linux_process (int pid
, int attached
);
544 /* Add a new thread. */
545 lwp_info
*add_lwp (ptid_t ptid
);
547 /* Delete a thread. */
548 void delete_lwp (lwp_info
*lwp
);
550 public: /* Make this public because it's used from outside. */
551 /* Attach to an inferior process. Returns 0 on success, ERRNO on
553 int attach_lwp (ptid_t ptid
);
555 private: /* Back to private. */
556 /* Detach from LWP. */
557 void detach_one_lwp (lwp_info
*lwp
);
559 /* Detect zombie thread group leaders, and "exit" them. We can't
560 reap their exits until all other threads in the group have
562 void check_zombie_leaders ();
564 /* Convenience function that is called when the kernel reports an exit
565 event. This decides whether to report the event to GDB as a
566 process exit event, a thread exit event, or to suppress the
568 ptid_t
filter_exit_event (lwp_info
*event_child
,
569 target_waitstatus
*ourstatus
);
571 /* Returns true if THREAD is stopped in a jump pad, and we can't
572 move it out, because we need to report the stop event to GDB. For
573 example, if the user puts a breakpoint in the jump pad, it's
574 because she wants to debug it. */
575 bool stuck_in_jump_pad (thread_info
*thread
);
577 /* Convenience wrapper. Returns information about LWP's fast tracepoint
578 collection status. */
579 fast_tpoint_collect_result linux_fast_tracepoint_collecting
580 (lwp_info
*lwp
, fast_tpoint_collect_status
*status
);
582 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
583 Fill *SYSNO with the syscall nr trapped. */
584 void get_syscall_trapinfo (lwp_info
*lwp
, int *sysno
);
586 /* Returns true if GDB is interested in the event_child syscall.
587 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
588 bool gdb_catch_this_syscall (lwp_info
*event_child
);
591 /* The architecture-specific "low" methods are listed below. */
593 /* Architecture-specific setup for the current thread. */
594 virtual void low_arch_setup () = 0;
596 /* Return false if we can fetch/store the register, true if we cannot
597 fetch/store the register. */
598 virtual bool low_cannot_fetch_register (int regno
) = 0;
600 virtual bool low_cannot_store_register (int regno
) = 0;
602 /* Hook to fetch a register in some non-standard way. Used for
603 example by backends that have read-only registers with hardcoded
604 values (e.g., IA64's gr0/fr0/fr1). Returns true if register
605 REGNO was supplied, false if not, and we should fallback to the
606 standard ptrace methods. */
607 virtual bool low_fetch_register (regcache
*regcache
, int regno
);
609 /* Return true if breakpoints are supported. Such targets must
610 implement the GET_PC and SET_PC methods. */
611 virtual bool low_supports_breakpoints ();
613 virtual CORE_ADDR
low_get_pc (regcache
*regcache
);
615 virtual void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
);
617 /* Find the next possible PCs after the current instruction executes.
618 Targets that override this method should also override
619 'supports_software_single_step' to return true. */
620 virtual std::vector
<CORE_ADDR
> low_get_next_pcs (regcache
*regcache
);
622 /* Return true if there is a breakpoint at PC. */
623 virtual bool low_breakpoint_at (CORE_ADDR pc
) = 0;
625 /* Breakpoint and watchpoint related functions. See target.h for
627 virtual int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
628 int size
, raw_breakpoint
*bp
);
630 virtual int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
631 int size
, raw_breakpoint
*bp
);
633 virtual bool low_stopped_by_watchpoint ();
635 virtual CORE_ADDR
low_stopped_data_address ();
637 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
638 for registers smaller than an xfer unit). */
639 virtual void low_collect_ptrace_register (regcache
*regcache
, int regno
,
642 virtual void low_supply_ptrace_register (regcache
*regcache
, int regno
,
645 /* Hook to convert from target format to ptrace format and back.
646 Returns true if any conversion was done; false otherwise.
647 If DIRECTION is 1, then copy from INF to NATIVE.
648 If DIRECTION is 0, copy from NATIVE to INF. */
649 virtual bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
652 /* Hook to call when a new process is created or attached to.
653 If extra per-process architecture-specific data is needed,
655 virtual arch_process_info
*low_new_process ();
657 /* Hook to call when a process is being deleted. If extra per-process
658 architecture-specific data is needed, delete it here. */
659 virtual void low_delete_process (arch_process_info
*info
);
661 /* Hook to call when a new thread is detected.
662 If extra per-thread architecture-specific data is needed,
664 virtual void low_new_thread (lwp_info
*);
666 /* Hook to call when a thread is being deleted. If extra per-thread
667 architecture-specific data is needed, delete it here. */
668 virtual void low_delete_thread (arch_lwp_info
*);
670 /* Hook to call, if any, when a new fork is attached. */
671 virtual void low_new_fork (process_info
*parent
, process_info
*child
);
673 /* Hook to call prior to resuming a thread. */
674 virtual void low_prepare_to_resume (lwp_info
*lwp
);
676 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on
677 success, -1 on failure. */
678 virtual int low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
);
680 /* Returns true if the low target supports range stepping. */
681 virtual bool low_supports_range_stepping ();
683 /* Return true if the target supports catch syscall. Such targets
684 override the low_get_syscall_trapinfo method below. */
685 virtual bool low_supports_catch_syscall ();
687 /* Fill *SYSNO with the syscall nr trapped. Only to be called when
688 inferior is stopped due to SYSCALL_SIGTRAP. */
689 virtual void low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
);
691 /* How many bytes the PC should be decremented after a break. */
692 virtual int low_decr_pc_after_break ();
695 extern linux_process_target
*the_linux_target
;
697 #define get_thread_lwp(thr) ((struct lwp_info *) (thread_target_data (thr)))
698 #define get_lwp_thread(lwp) ((lwp)->thread)
700 /* Information about a signal that is to be delivered to a thread. */
702 struct pending_signal
704 pending_signal (int signal
)
712 /* This struct is recorded in the target_data field of struct thread_info.
714 On linux ``all_threads'' is keyed by the LWP ID, which we use as the
715 GDB protocol representation of the thread ID. Threads also have
716 a "process ID" (poorly named) which is (presently) the same as the
719 There is also ``all_processes'' is keyed by the "overall process ID",
720 which GNU/Linux calls tgid, "thread group ID". */
724 /* Backlink to the parent object. */
725 struct thread_info
*thread
= nullptr;
727 /* If this flag is set, the next SIGSTOP will be ignored (the
728 process will be immediately resumed). This means that either we
729 sent the SIGSTOP to it ourselves and got some other pending event
730 (so the SIGSTOP is still pending), or that we stopped the
731 inferior implicitly via PTRACE_ATTACH and have not waited for it
733 int stop_expected
= 0;
735 /* When this is true, we shall not try to resume this thread, even
736 if last_resume_kind isn't resume_stop. */
739 /* If this flag is set, the lwp is known to be stopped right now (stop
740 event already received in a wait()). */
743 /* Signal whether we are in a SYSCALL_ENTRY or
744 in a SYSCALL_RETURN event.
746 - TARGET_WAITKIND_SYSCALL_ENTRY
747 - TARGET_WAITKIND_SYSCALL_RETURN */
748 enum target_waitkind syscall_state
= TARGET_WAITKIND_SYSCALL_ENTRY
;
750 /* When stopped is set, the last wait status recorded for this lwp. */
753 /* If WAITSTATUS->KIND != TARGET_WAITKIND_IGNORE, the waitstatus for
754 this LWP's last event, to pass to GDB without any further
755 processing. This is used to store extended ptrace event
756 information or exit status until it can be reported to GDB. */
757 struct target_waitstatus waitstatus
;
759 /* A pointer to the fork child/parent relative. Valid only while
760 the parent fork event is not reported to higher layers. Used to
761 avoid wildcard vCont actions resuming a fork child before GDB is
762 notified about the parent's fork event. */
763 struct lwp_info
*fork_relative
= nullptr;
765 /* When stopped is set, this is where the lwp last stopped, with
766 decr_pc_after_break already accounted for. If the LWP is
767 running, this is the address at which the lwp was resumed. */
768 CORE_ADDR stop_pc
= 0;
770 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
772 int status_pending_p
= 0;
773 int status_pending
= 0;
775 /* The reason the LWP last stopped, if we need to track it
776 (breakpoint, watchpoint, etc.) */
777 enum target_stop_reason stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
779 /* On architectures where it is possible to know the data address of
780 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
781 contains such data address. Only valid if STOPPED_BY_WATCHPOINT
783 CORE_ADDR stopped_data_address
= 0;
785 /* If this is non-zero, it is a breakpoint to be reinserted at our next
786 stop (SIGTRAP stops only). */
787 CORE_ADDR bp_reinsert
= 0;
789 /* If this flag is set, the last continue operation at the ptrace
790 level on this process was a single-step. */
793 /* Range to single step within. This is a copy of the step range
794 passed along the last resume request. See 'struct
796 CORE_ADDR step_range_start
= 0; /* Inclusive */
797 CORE_ADDR step_range_end
= 0; /* Exclusive */
799 /* If this flag is set, we need to set the event request flags the
800 next time we see this LWP stop. */
801 int must_set_ptrace_flags
= 0;
803 /* A chain of signals that need to be delivered to this process. */
804 std::list
<pending_signal
> pending_signals
;
806 /* A link used when resuming. It is initialized from the resume request,
807 and then processed and cleared in linux_resume_one_lwp. */
808 struct thread_resume
*resume
= nullptr;
810 /* Information bout this lwp's fast tracepoint collection status (is it
811 currently stopped in the jump pad, and if so, before or at/after the
812 relocated instruction). Normally, we won't care about this, but we will
813 if a signal arrives to this lwp while it is collecting. */
814 fast_tpoint_collect_result collecting_fast_tracepoint
815 = fast_tpoint_collect_result::not_collecting
;
817 /* A chain of signals that need to be reported to GDB. These were
818 deferred because the thread was doing a fast tracepoint collect
819 when they arrived. */
820 std::list
<pending_signal
> pending_signals_to_report
;
822 /* When collecting_fast_tracepoint is first found to be 1, we insert
823 a exit-jump-pad-quickly breakpoint. This is it. */
824 struct breakpoint
*exit_jump_pad_bkpt
= nullptr;
827 int thread_known
= 0;
828 /* The thread handle, used for e.g. TLS access. Only valid if
829 THREAD_KNOWN is set. */
830 td_thrhandle_t th
{};
832 /* The pthread_t handle. */
833 thread_t thread_handle
{};
836 /* Arch-specific additions. */
837 struct arch_lwp_info
*arch_private
= nullptr;
840 int linux_pid_exe_is_elf_64_file (int pid
, unsigned int *machine
);
842 /* Attach to PTID. Returns 0 on success, non-zero otherwise (an
844 int linux_attach_lwp (ptid_t ptid
);
846 struct lwp_info
*find_lwp_pid (ptid_t ptid
);
847 /* For linux_stop_lwp see nat/linux-nat.h. */
849 #ifdef HAVE_LINUX_REGSETS
850 void initialize_regsets_info (struct regsets_info
*regsets_info
);
853 void initialize_low_arch (void);
855 void linux_set_pc_32bit (struct regcache
*regcache
, CORE_ADDR pc
);
856 CORE_ADDR
linux_get_pc_32bit (struct regcache
*regcache
);
858 void linux_set_pc_64bit (struct regcache
*regcache
, CORE_ADDR pc
);
859 CORE_ADDR
linux_get_pc_64bit (struct regcache
*regcache
);
861 /* From thread-db.c */
862 int thread_db_init (void);
863 void thread_db_detach (struct process_info
*);
864 void thread_db_mourn (struct process_info
*);
865 int thread_db_handle_monitor_command (char *);
866 int thread_db_get_tls_address (struct thread_info
*thread
, CORE_ADDR offset
,
867 CORE_ADDR load_module
, CORE_ADDR
*address
);
868 int thread_db_look_up_one_symbol (const char *name
, CORE_ADDR
*addrp
);
870 /* Called from linux-low.c when a clone event is detected. Upon entry,
871 both the clone and the parent should be stopped. This function does
872 whatever is required have the clone under thread_db's control. */
874 void thread_db_notice_clone (struct thread_info
*parent_thr
, ptid_t child_ptid
);
876 bool thread_db_thread_handle (ptid_t ptid
, gdb_byte
**handle
, int *handle_len
);
878 extern int have_ptrace_getregset
;
880 /* Search for the value with type MATCH in the auxv vector with
881 entries of length WORDSIZE bytes. If found, store the value in
882 *VALP and return 1. If not found or if there is an error, return
885 int linux_get_auxv (int wordsize
, CORE_ADDR match
,
888 /* Fetch the AT_HWCAP entry from the auxv vector, where entries are length
889 WORDSIZE. If no entry was found, return zero. */
891 CORE_ADDR
linux_get_hwcap (int wordsize
);
893 /* Fetch the AT_HWCAP2 entry from the auxv vector, where entries are length
894 WORDSIZE. If no entry was found, return zero. */
896 CORE_ADDR
linux_get_hwcap2 (int wordsize
);
898 #endif /* GDBSERVER_LINUX_LOW_H */