1 /* Select target systems and architectures at runtime for GDB.
3 Copyright (C) 1990-2024 Free Software Foundation, Inc.
5 Contributed by Cygnus Support.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "extract-store-integer.h"
24 #include "target-dcache.h"
25 #include "cli/cli-cmds.h"
29 #include "observable.h"
37 #include "target-descriptions.h"
38 #include "gdbthread.h"
41 #include "inline-frame.h"
42 #include "tracepoint.h"
43 #include "gdbsupport/fileio.h"
44 #include "gdbsupport/agent.h"
46 #include "target-debug.h"
48 #include "event-top.h"
50 #include "gdbsupport/byte-vector.h"
51 #include "gdbsupport/search.h"
53 #include <unordered_map>
54 #include "target-connection.h"
56 #include "cli/cli-decode.h"
57 #include "cli/cli-style.h"
59 [[noreturn
]] static void generic_tls_error (void);
61 static void default_rcmd (struct target_ops
*, const char *, struct ui_file
*);
63 static int default_verify_memory (struct target_ops
*self
,
65 CORE_ADDR memaddr
, ULONGEST size
);
67 [[noreturn
]] static void tcomplain (void);
69 /* Mapping between target_info objects (which have address identity)
70 and corresponding open/factory function/callback. Each add_target
71 call adds one entry to this map, and registers a "target
72 TARGET_NAME" command that when invoked calls the factory registered
73 here. The target_info object is associated with the command via
74 the command's context. */
75 static std::unordered_map
<const target_info
*, target_open_ftype
*>
78 /* The singleton debug target. */
80 static struct target_ops
*the_debug_target
;
82 /* Command list for target. */
84 static struct cmd_list_element
*targetlist
= NULL
;
88 bool trust_readonly
= false;
90 /* Nonzero if we should show true memory content including
91 memory breakpoint inserted by gdb. */
93 static int show_memory_breakpoints
= 0;
95 /* These globals control whether GDB attempts to perform these
96 operations; they are useful for targets that need to prevent
97 inadvertent disruption, such as in non-stop mode. */
99 bool may_write_registers
= true;
101 bool may_write_memory
= true;
103 bool may_insert_breakpoints
= true;
105 bool may_insert_tracepoints
= true;
107 bool may_insert_fast_tracepoints
= true;
109 bool may_stop
= true;
111 /* Non-zero if we want to see trace of target level stuff. */
113 static unsigned int targetdebug
= 0;
115 /* Print a "target" debug statement with the function name prefix. */
117 #define target_debug_printf(fmt, ...) \
118 debug_prefixed_printf_cond (targetdebug > 0, "target", fmt, ##__VA_ARGS__)
120 /* Print a "target" debug statement without the function name prefix. */
122 #define target_debug_printf_nofunc(fmt, ...) \
123 debug_prefixed_printf_cond_nofunc (targetdebug > 0, "target", fmt, ##__VA_ARGS__)
126 set_targetdebug (const char *args
, int from_tty
, struct cmd_list_element
*c
)
129 current_inferior ()->push_target (the_debug_target
);
131 current_inferior ()->unpush_target (the_debug_target
);
135 show_targetdebug (struct ui_file
*file
, int from_tty
,
136 struct cmd_list_element
*c
, const char *value
)
138 gdb_printf (file
, _("Target debugging is %s.\n"), value
);
144 for (target_ops
*t
= current_inferior ()->top_target ();
147 if (t
->has_memory ())
156 for (target_ops
*t
= current_inferior ()->top_target ();
166 target_has_registers ()
168 for (target_ops
*t
= current_inferior ()->top_target ();
171 if (t
->has_registers ())
178 target_has_execution (inferior
*inf
)
181 inf
= current_inferior ();
183 for (target_ops
*t
= inf
->top_target ();
185 t
= inf
->find_target_beneath (t
))
186 if (t
->has_execution (inf
))
195 return current_inferior ()->top_target ()->shortname ();
201 target_attach_no_wait ()
203 return current_inferior ()->top_target ()->attach_no_wait ();
209 target_post_attach (int pid
)
211 return current_inferior ()->top_target ()->post_attach (pid
);
217 target_prepare_to_store (regcache
*regcache
)
219 return current_inferior ()->top_target ()->prepare_to_store (regcache
);
225 target_supports_enable_disable_tracepoint ()
227 target_ops
*target
= current_inferior ()->top_target ();
229 return target
->supports_enable_disable_tracepoint ();
233 target_supports_string_tracing ()
235 return current_inferior ()->top_target ()->supports_string_tracing ();
241 target_supports_evaluation_of_breakpoint_conditions ()
243 target_ops
*target
= current_inferior ()->top_target ();
245 return target
->supports_evaluation_of_breakpoint_conditions ();
251 target_supports_dumpcore ()
253 return current_inferior ()->top_target ()->supports_dumpcore ();
259 target_dumpcore (const char *filename
)
261 return current_inferior ()->top_target ()->dumpcore (filename
);
267 target_can_run_breakpoint_commands ()
269 return current_inferior ()->top_target ()->can_run_breakpoint_commands ();
277 return current_inferior ()->top_target ()->files_info ();
283 target_insert_fork_catchpoint (int pid
)
285 return current_inferior ()->top_target ()->insert_fork_catchpoint (pid
);
291 target_remove_fork_catchpoint (int pid
)
293 return current_inferior ()->top_target ()->remove_fork_catchpoint (pid
);
299 target_insert_vfork_catchpoint (int pid
)
301 return current_inferior ()->top_target ()->insert_vfork_catchpoint (pid
);
307 target_remove_vfork_catchpoint (int pid
)
309 return current_inferior ()->top_target ()->remove_vfork_catchpoint (pid
);
315 target_insert_exec_catchpoint (int pid
)
317 return current_inferior ()->top_target ()->insert_exec_catchpoint (pid
);
323 target_remove_exec_catchpoint (int pid
)
325 return current_inferior ()->top_target ()->remove_exec_catchpoint (pid
);
331 target_set_syscall_catchpoint (int pid
, bool needed
, int any_count
,
332 gdb::array_view
<const int> syscall_counts
)
334 target_ops
*target
= current_inferior ()->top_target ();
336 return target
->set_syscall_catchpoint (pid
, needed
, any_count
,
343 target_rcmd (const char *command
, struct ui_file
*outbuf
)
345 return current_inferior ()->top_target ()->rcmd (command
, outbuf
);
351 target_can_lock_scheduler ()
353 target_ops
*target
= current_inferior ()->top_target ();
355 return (target
->get_thread_control_capabilities ()& tc_schedlock
) != 0;
361 target_can_async_p ()
363 return target_can_async_p (current_inferior ()->top_target ());
369 target_can_async_p (struct target_ops
*target
)
371 if (!target_async_permitted
)
373 return target
->can_async_p ();
381 bool result
= current_inferior ()->top_target ()->is_async_p ();
382 gdb_assert (target_async_permitted
|| !result
);
387 target_execution_direction ()
389 return current_inferior ()->top_target ()->execution_direction ();
395 target_extra_thread_info (thread_info
*tp
)
397 return current_inferior ()->top_target ()->extra_thread_info (tp
);
403 target_pid_to_exec_file (int pid
)
405 return current_inferior ()->top_target ()->pid_to_exec_file (pid
);
411 target_thread_architecture (ptid_t ptid
)
413 return current_inferior ()->top_target ()->thread_architecture (ptid
);
419 target_find_memory_regions (find_memory_region_ftype func
, void *data
)
421 return current_inferior ()->top_target ()->find_memory_regions (func
, data
);
426 gdb::unique_xmalloc_ptr
<char>
427 target_make_corefile_notes (bfd
*bfd
, int *size_p
)
429 return current_inferior ()->top_target ()->make_corefile_notes (bfd
, size_p
);
433 target_get_bookmark (const char *args
, int from_tty
)
435 return current_inferior ()->top_target ()->get_bookmark (args
, from_tty
);
439 target_goto_bookmark (const gdb_byte
*arg
, int from_tty
)
441 return current_inferior ()->top_target ()->goto_bookmark (arg
, from_tty
);
447 target_stopped_by_watchpoint ()
449 return current_inferior ()->top_target ()->stopped_by_watchpoint ();
455 target_stopped_by_sw_breakpoint ()
457 return current_inferior ()->top_target ()->stopped_by_sw_breakpoint ();
461 target_supports_stopped_by_sw_breakpoint ()
463 target_ops
*target
= current_inferior ()->top_target ();
465 return target
->supports_stopped_by_sw_breakpoint ();
469 target_stopped_by_hw_breakpoint ()
471 return current_inferior ()->top_target ()->stopped_by_hw_breakpoint ();
475 target_supports_stopped_by_hw_breakpoint ()
477 target_ops
*target
= current_inferior ()->top_target ();
479 return target
->supports_stopped_by_hw_breakpoint ();
485 target_have_steppable_watchpoint ()
487 return current_inferior ()->top_target ()->have_steppable_watchpoint ();
493 target_can_use_hardware_watchpoint (bptype type
, int cnt
, int othertype
)
495 target_ops
*target
= current_inferior ()->top_target ();
497 return target
->can_use_hw_breakpoint (type
, cnt
, othertype
);
503 target_region_ok_for_hw_watchpoint (CORE_ADDR addr
, int len
)
505 target_ops
*target
= current_inferior ()->top_target ();
507 return target
->region_ok_for_hw_watchpoint (addr
, len
);
512 target_can_do_single_step ()
514 return current_inferior ()->top_target ()->can_do_single_step ();
520 target_insert_watchpoint (CORE_ADDR addr
, int len
, target_hw_bp_type type
,
523 target_ops
*target
= current_inferior ()->top_target ();
525 return target
->insert_watchpoint (addr
, len
, type
, cond
);
531 target_remove_watchpoint (CORE_ADDR addr
, int len
, target_hw_bp_type type
,
534 target_ops
*target
= current_inferior ()->top_target ();
536 return target
->remove_watchpoint (addr
, len
, type
, cond
);
542 target_insert_hw_breakpoint (gdbarch
*gdbarch
, bp_target_info
*bp_tgt
)
544 target_ops
*target
= current_inferior ()->top_target ();
546 return target
->insert_hw_breakpoint (gdbarch
, bp_tgt
);
552 target_remove_hw_breakpoint (gdbarch
*gdbarch
, bp_target_info
*bp_tgt
)
554 target_ops
*target
= current_inferior ()->top_target ();
556 return target
->remove_hw_breakpoint (gdbarch
, bp_tgt
);
562 target_can_accel_watchpoint_condition (CORE_ADDR addr
, int len
, int type
,
565 target_ops
*target
= current_inferior ()->top_target ();
567 return target
->can_accel_watchpoint_condition (addr
, len
, type
, cond
);
573 target_can_execute_reverse ()
575 return current_inferior ()->top_target ()->can_execute_reverse ();
579 target_get_ada_task_ptid (long lwp
, ULONGEST tid
)
581 return current_inferior ()->top_target ()->get_ada_task_ptid (lwp
, tid
);
585 target_filesystem_is_local ()
587 return current_inferior ()->top_target ()->filesystem_is_local ();
593 return current_inferior ()->top_target ()->trace_init ();
597 target_download_tracepoint (bp_location
*location
)
599 return current_inferior ()->top_target ()->download_tracepoint (location
);
603 target_can_download_tracepoint ()
605 return current_inferior ()->top_target ()->can_download_tracepoint ();
609 target_download_trace_state_variable (const trace_state_variable
&tsv
)
611 target_ops
*target
= current_inferior ()->top_target ();
613 return target
->download_trace_state_variable (tsv
);
617 target_enable_tracepoint (bp_location
*loc
)
619 return current_inferior ()->top_target ()->enable_tracepoint (loc
);
623 target_disable_tracepoint (bp_location
*loc
)
625 return current_inferior ()->top_target ()->disable_tracepoint (loc
);
629 target_trace_start ()
631 return current_inferior ()->top_target ()->trace_start ();
635 target_trace_set_readonly_regions ()
637 return current_inferior ()->top_target ()->trace_set_readonly_regions ();
641 target_get_trace_status (trace_status
*ts
)
643 return current_inferior ()->top_target ()->get_trace_status (ts
);
647 target_get_tracepoint_status (tracepoint
*tp
, uploaded_tp
*utp
)
649 return current_inferior ()->top_target ()->get_tracepoint_status (tp
, utp
);
655 return current_inferior ()->top_target ()->trace_stop ();
659 target_trace_find (trace_find_type type
, int num
,
660 CORE_ADDR addr1
, CORE_ADDR addr2
, int *tpp
)
662 target_ops
*target
= current_inferior ()->top_target ();
664 return target
->trace_find (type
, num
, addr1
, addr2
, tpp
);
668 target_get_trace_state_variable_value (int tsv
, LONGEST
*val
)
670 target_ops
*target
= current_inferior ()->top_target ();
672 return target
->get_trace_state_variable_value (tsv
, val
);
676 target_save_trace_data (const char *filename
)
678 return current_inferior ()->top_target ()->save_trace_data (filename
);
682 target_upload_tracepoints (uploaded_tp
**utpp
)
684 return current_inferior ()->top_target ()->upload_tracepoints (utpp
);
688 target_upload_trace_state_variables (uploaded_tsv
**utsvp
)
690 target_ops
*target
= current_inferior ()->top_target ();
692 return target
->upload_trace_state_variables (utsvp
);
696 target_get_raw_trace_data (gdb_byte
*buf
, ULONGEST offset
, LONGEST len
)
698 target_ops
*target
= current_inferior ()->top_target ();
700 return target
->get_raw_trace_data (buf
, offset
, len
);
704 target_get_min_fast_tracepoint_insn_len ()
706 target_ops
*target
= current_inferior ()->top_target ();
708 return target
->get_min_fast_tracepoint_insn_len ();
712 target_set_disconnected_tracing (int val
)
714 return current_inferior ()->top_target ()->set_disconnected_tracing (val
);
718 target_set_circular_trace_buffer (int val
)
720 return current_inferior ()->top_target ()->set_circular_trace_buffer (val
);
724 target_set_trace_buffer_size (LONGEST val
)
726 return current_inferior ()->top_target ()->set_trace_buffer_size (val
);
730 target_set_trace_notes (const char *user
, const char *notes
,
731 const char *stopnotes
)
733 target_ops
*target
= current_inferior ()->top_target ();
735 return target
->set_trace_notes (user
, notes
, stopnotes
);
739 target_get_tib_address (ptid_t ptid
, CORE_ADDR
*addr
)
741 return current_inferior ()->top_target ()->get_tib_address (ptid
, addr
);
745 target_set_permissions ()
747 return current_inferior ()->top_target ()->set_permissions ();
751 target_static_tracepoint_marker_at (CORE_ADDR addr
,
752 static_tracepoint_marker
*marker
)
754 target_ops
*target
= current_inferior ()->top_target ();
756 return target
->static_tracepoint_marker_at (addr
, marker
);
759 std::vector
<static_tracepoint_marker
>
760 target_static_tracepoint_markers_by_strid (const char *marker_id
)
762 target_ops
*target
= current_inferior ()->top_target ();
764 return target
->static_tracepoint_markers_by_strid (marker_id
);
768 target_traceframe_info ()
770 return current_inferior ()->top_target ()->traceframe_info ();
774 target_use_agent (bool use
)
776 return current_inferior ()->top_target ()->use_agent (use
);
780 target_can_use_agent ()
782 return current_inferior ()->top_target ()->can_use_agent ();
786 target_augmented_libraries_svr4_read ()
788 return current_inferior ()->top_target ()->augmented_libraries_svr4_read ();
792 target_supports_memory_tagging ()
794 return current_inferior ()->top_target ()->supports_memory_tagging ();
798 target_fetch_memtags (CORE_ADDR address
, size_t len
, gdb::byte_vector
&tags
,
801 return current_inferior ()->top_target ()->fetch_memtags (address
, len
, tags
, type
);
805 target_store_memtags (CORE_ADDR address
, size_t len
,
806 const gdb::byte_vector
&tags
, int type
)
808 return current_inferior ()->top_target ()->store_memtags (address
, len
, tags
, type
);
812 target_is_address_tagged (gdbarch
*gdbarch
, CORE_ADDR address
)
814 return current_inferior ()->top_target ()->is_address_tagged (gdbarch
, address
);
818 target_fetch_x86_xsave_layout ()
820 return current_inferior ()->top_target ()->fetch_x86_xsave_layout ();
824 target_log_command (const char *p
)
826 return current_inferior ()->top_target ()->log_command (p
);
829 /* This is used to implement the various target commands. */
832 open_target (const char *args
, int from_tty
, struct cmd_list_element
*command
)
834 auto *ti
= static_cast<target_info
*> (command
->context ());
835 target_open_ftype
*func
= target_factories
[ti
];
837 target_debug_printf_nofunc ("-> %s->open (...)", ti
->shortname
);
838 func (args
, from_tty
);
839 target_debug_printf_nofunc ("<- %s->open (%s, %d)", ti
->shortname
, args
, from_tty
);
845 add_target (const target_info
&t
, target_open_ftype
*func
,
846 completer_ftype
*completer
)
848 struct cmd_list_element
*c
;
850 auto &func_slot
= target_factories
[&t
];
851 if (func_slot
!= nullptr)
852 internal_error (_("target already added (\"%s\")."), t
.shortname
);
855 if (targetlist
== NULL
)
856 add_basic_prefix_cmd ("target", class_run
, _("\
857 Connect to a target machine or process.\n\
858 The first argument is the type or protocol of the target machine.\n\
859 Remaining arguments are interpreted by the target protocol. For more\n\
860 information on the arguments for a particular protocol, type\n\
861 `help target ' followed by the protocol name."),
862 &targetlist
, 0, &cmdlist
);
863 c
= add_cmd (t
.shortname
, no_class
, t
.doc
, &targetlist
);
864 c
->set_context ((void *) &t
);
865 c
->func
= open_target
;
866 if (completer
!= NULL
)
867 set_cmd_completer (c
, completer
);
873 add_deprecated_target_alias (const target_info
&tinfo
, const char *alias
)
875 struct cmd_list_element
*c
;
877 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
879 c
= add_cmd (alias
, no_class
, tinfo
.doc
, &targetlist
);
880 c
->func
= open_target
;
881 c
->set_context ((void *) &tinfo
);
882 gdb::unique_xmalloc_ptr
<char> alt
883 = xstrprintf ("target %s", tinfo
.shortname
);
884 deprecate_cmd (c
, alt
.release ());
893 /* If the commit_resume_state of the to-be-killed-inferior's process stratum
894 is true, and this inferior is the last live inferior with resumed threads
895 of that target, then we want to leave commit_resume_state to false, as the
896 target won't have any resumed threads anymore. We achieve this with
897 this scoped_disable_commit_resumed. On construction, it will set the flag
898 to false. On destruction, it will only set it to true if there are resumed
900 scoped_disable_commit_resumed
disable ("killing");
901 current_inferior ()->top_target ()->kill ();
905 target_load (const char *arg
, int from_tty
)
907 target_dcache_invalidate (current_program_space
->aspace
);
908 current_inferior ()->top_target ()->load (arg
, from_tty
);
913 target_terminal_state
target_terminal::m_terminal_state
914 = target_terminal_state::is_ours
;
916 /* See target/target.h. */
919 target_terminal::init (void)
921 current_inferior ()->top_target ()->terminal_init ();
923 m_terminal_state
= target_terminal_state::is_ours
;
926 /* See target/target.h. */
929 target_terminal::inferior (void)
931 struct ui
*ui
= current_ui
;
933 /* A background resume (``run&'') should leave GDB in control of the
935 if (ui
->prompt_state
!= PROMPT_BLOCKED
)
938 /* Since we always run the inferior in the main console (unless "set
939 inferior-tty" is in effect), when some UI other than the main one
940 calls target_terminal::inferior, then we leave the main UI's
941 terminal settings as is. */
945 /* If GDB is resuming the inferior in the foreground, install
946 inferior's terminal modes. */
948 struct inferior
*inf
= current_inferior ();
950 if (inf
->terminal_state
!= target_terminal_state::is_inferior
)
952 current_inferior ()->top_target ()->terminal_inferior ();
953 inf
->terminal_state
= target_terminal_state::is_inferior
;
956 m_terminal_state
= target_terminal_state::is_inferior
;
958 /* If the user hit C-c before, pretend that it was hit right
960 if (check_quit_flag ())
961 target_pass_ctrlc ();
964 /* See target/target.h. */
967 target_terminal::restore_inferior (void)
969 struct ui
*ui
= current_ui
;
971 /* See target_terminal::inferior(). */
972 if (ui
->prompt_state
!= PROMPT_BLOCKED
|| ui
!= main_ui
)
975 /* Restore the terminal settings of inferiors that were in the
976 foreground but are now ours_for_output due to a temporary
977 target_target::ours_for_output() call. */
980 scoped_restore_current_inferior restore_inferior
;
982 for (::inferior
*inf
: all_inferiors ())
984 if (inf
->terminal_state
== target_terminal_state::is_ours_for_output
)
986 set_current_inferior (inf
);
987 current_inferior ()->top_target ()->terminal_inferior ();
988 inf
->terminal_state
= target_terminal_state::is_inferior
;
993 m_terminal_state
= target_terminal_state::is_inferior
;
995 /* If the user hit C-c before, pretend that it was hit right
997 if (check_quit_flag ())
998 target_pass_ctrlc ();
1001 /* Switch terminal state to DESIRED_STATE, either is_ours, or
1002 is_ours_for_output. */
1005 target_terminal_is_ours_kind (target_terminal_state desired_state
)
1007 scoped_restore_current_inferior restore_inferior
;
1009 /* Must do this in two passes. First, have all inferiors save the
1010 current terminal settings. Then, after all inferiors have add a
1011 chance to safely save the terminal settings, restore GDB's
1012 terminal settings. */
1014 for (inferior
*inf
: all_inferiors ())
1016 if (inf
->terminal_state
== target_terminal_state::is_inferior
)
1018 set_current_inferior (inf
);
1019 current_inferior ()->top_target ()->terminal_save_inferior ();
1023 for (inferior
*inf
: all_inferiors ())
1025 /* Note we don't check is_inferior here like above because we
1026 need to handle 'is_ours_for_output -> is_ours' too. Careful
1027 to never transition from 'is_ours' to 'is_ours_for_output',
1029 if (inf
->terminal_state
!= target_terminal_state::is_ours
1030 && inf
->terminal_state
!= desired_state
)
1032 set_current_inferior (inf
);
1033 if (desired_state
== target_terminal_state::is_ours
)
1034 current_inferior ()->top_target ()->terminal_ours ();
1035 else if (desired_state
== target_terminal_state::is_ours_for_output
)
1036 current_inferior ()->top_target ()->terminal_ours_for_output ();
1038 gdb_assert_not_reached ("unhandled desired state");
1039 inf
->terminal_state
= desired_state
;
1044 /* See target/target.h. */
1047 target_terminal::ours ()
1049 struct ui
*ui
= current_ui
;
1051 /* See target_terminal::inferior. */
1055 if (m_terminal_state
== target_terminal_state::is_ours
)
1058 target_terminal_is_ours_kind (target_terminal_state::is_ours
);
1059 m_terminal_state
= target_terminal_state::is_ours
;
1062 /* See target/target.h. */
1065 target_terminal::ours_for_output ()
1067 struct ui
*ui
= current_ui
;
1069 /* See target_terminal::inferior. */
1073 if (!target_terminal::is_inferior ())
1076 target_terminal_is_ours_kind (target_terminal_state::is_ours_for_output
);
1077 target_terminal::m_terminal_state
= target_terminal_state::is_ours_for_output
;
1080 /* See target/target.h. */
1083 target_terminal::info (const char *arg
, int from_tty
)
1085 current_inferior ()->top_target ()->terminal_info (arg
, from_tty
);
1091 target_supports_terminal_ours (void)
1093 /* The current top target is the target at the top of the target
1094 stack of the current inferior. While normally there's always an
1095 inferior, we must check for nullptr here because we can get here
1096 very early during startup, before the initial inferior is first
1098 inferior
*inf
= current_inferior ();
1102 return inf
->top_target ()->supports_terminal_ours ();
1108 error (_("You can't do that when your target is `%s'"),
1109 current_inferior ()->top_target ()->shortname ());
1115 error (_("You can't do that without a process to debug."));
1119 default_terminal_info (struct target_ops
*self
, const char *args
, int from_tty
)
1121 gdb_printf (_("No saved terminal information.\n"));
1124 /* A default implementation for the to_get_ada_task_ptid target method.
1126 This function builds the PTID by using both LWP and TID as part of
1127 the PTID lwp and tid elements. The pid used is the pid of the
1131 default_get_ada_task_ptid (struct target_ops
*self
, long lwp
, ULONGEST tid
)
1133 return ptid_t (inferior_ptid
.pid (), lwp
, tid
);
1136 static enum exec_direction_kind
1137 default_execution_direction (struct target_ops
*self
)
1139 if (!target_can_execute_reverse ())
1140 return EXEC_FORWARD
;
1141 else if (!target_can_async_p ())
1142 return EXEC_FORWARD
;
1144 gdb_assert_not_reached ("\
1145 to_execution_direction must be implemented for reverse async");
1151 target_ops_ref_policy::decref (target_ops
*t
)
1154 if (t
->refcount () == 0)
1156 if (t
->stratum () == process_stratum
)
1157 connection_list_remove (as_process_stratum_target (t
));
1159 for (inferior
*inf
: all_inferiors ())
1160 gdb_assert (!inf
->target_is_pushed (t
));
1162 fileio_handles_invalidate_target (t
);
1166 target_debug_printf_nofunc ("closing target");
1173 target_stack::push (target_ops
*t
)
1175 /* We must create a new reference first. It is possible that T is
1176 already pushed on this target stack, in which case we will first
1177 unpush it below, before re-pushing it. If we don't increment the
1178 reference count now, then when we unpush it, we might end up deleting
1179 T, which is not good. */
1180 auto ref
= target_ops_ref::new_reference (t
);
1182 strata stratum
= t
->stratum ();
1184 /* If there's already a target at this stratum, remove it. */
1186 if (m_stack
[stratum
].get () != nullptr)
1187 unpush (m_stack
[stratum
].get ());
1189 /* Now add the new one. */
1190 m_stack
[stratum
] = std::move (ref
);
1192 if (m_top
< stratum
)
1195 if (stratum
== process_stratum
)
1196 connection_list_add (as_process_stratum_target (t
));
1202 target_stack::unpush (target_ops
*t
)
1204 gdb_assert (t
!= NULL
);
1206 strata stratum
= t
->stratum ();
1208 if (stratum
== dummy_stratum
)
1209 internal_error (_("Attempt to unpush the dummy target"));
1211 /* Look for the specified target. Note that a target can only occur
1212 once in the target stack. */
1214 if (m_stack
[stratum
] != t
)
1216 /* If T wasn't pushed, quit. Only open targets should be
1221 if (m_top
== stratum
)
1222 m_top
= this->find_beneath (t
)->stratum ();
1224 /* Move the target reference off the target stack, this sets the pointer
1225 held in m_stack to nullptr, and places the reference in ref. When
1226 ref goes out of scope its reference count will be decremented, which
1227 might cause the target to close.
1229 We have to do it this way, and not just set the value in m_stack to
1230 nullptr directly, because doing so would decrement the reference
1231 count first, which might close the target, and closing the target
1232 does a check that the target is not on any inferiors target_stack. */
1233 auto ref
= std::move (m_stack
[stratum
]);
1239 target_unpusher::operator() (struct target_ops
*ops
) const
1241 current_inferior ()->unpush_target (ops
);
1244 /* Default implementation of to_get_thread_local_address. */
1247 generic_tls_error (void)
1249 throw_error (TLS_GENERIC_ERROR
,
1250 _("Cannot find thread-local variables on this target"));
1253 /* Using the objfile specified in OBJFILE, find the address for the
1254 current thread's thread-local storage with offset OFFSET. */
1256 target_translate_tls_address (struct objfile
*objfile
, CORE_ADDR offset
)
1258 volatile CORE_ADDR addr
= 0;
1259 struct target_ops
*target
= current_inferior ()->top_target ();
1260 gdbarch
*gdbarch
= current_inferior ()->arch ();
1262 /* If OBJFILE is a separate debug object file, look for the
1263 original object file. */
1264 if (objfile
->separate_debug_objfile_backlink
!= NULL
)
1265 objfile
= objfile
->separate_debug_objfile_backlink
;
1267 if (gdbarch_fetch_tls_load_module_address_p (gdbarch
))
1269 ptid_t ptid
= inferior_ptid
;
1275 /* Fetch the load module address for this objfile. */
1276 lm_addr
= gdbarch_fetch_tls_load_module_address (gdbarch
,
1279 if (gdbarch_get_thread_local_address_p (gdbarch
))
1280 addr
= gdbarch_get_thread_local_address (gdbarch
, ptid
, lm_addr
,
1283 addr
= target
->get_thread_local_address (ptid
, lm_addr
, offset
);
1285 /* If an error occurred, print TLS related messages here. Otherwise,
1286 throw the error to some higher catcher. */
1287 catch (const gdb_exception
&ex
)
1289 int objfile_is_library
= (objfile
->flags
& OBJF_SHARED
);
1293 case TLS_NO_LIBRARY_SUPPORT_ERROR
:
1294 error (_("Cannot find thread-local variables "
1295 "in this thread library."));
1297 case TLS_LOAD_MODULE_NOT_FOUND_ERROR
:
1298 if (objfile_is_library
)
1299 error (_("Cannot find shared library `%s' in dynamic"
1300 " linker's load module list"), objfile_name (objfile
));
1302 error (_("Cannot find executable file `%s' in dynamic"
1303 " linker's load module list"), objfile_name (objfile
));
1305 case TLS_NOT_ALLOCATED_YET_ERROR
:
1306 if (objfile_is_library
)
1307 error (_("The inferior has not yet allocated storage for"
1308 " thread-local variables in\n"
1309 "the shared library `%s'\n"
1311 objfile_name (objfile
),
1312 target_pid_to_str (ptid
).c_str ());
1314 error (_("The inferior has not yet allocated storage for"
1315 " thread-local variables in\n"
1316 "the executable `%s'\n"
1318 objfile_name (objfile
),
1319 target_pid_to_str (ptid
).c_str ());
1321 case TLS_GENERIC_ERROR
:
1322 if (objfile_is_library
)
1323 error (_("Cannot find thread-local storage for %s, "
1324 "shared library %s:\n%s"),
1325 target_pid_to_str (ptid
).c_str (),
1326 objfile_name (objfile
), ex
.what ());
1328 error (_("Cannot find thread-local storage for %s, "
1329 "executable file %s:\n%s"),
1330 target_pid_to_str (ptid
).c_str (),
1331 objfile_name (objfile
), ex
.what ());
1340 error (_("Cannot find thread-local variables on this target"));
1346 target_xfer_status_to_string (enum target_xfer_status status
)
1348 #define CASE(X) case X: return #X
1351 CASE(TARGET_XFER_E_IO
);
1352 CASE(TARGET_XFER_UNAVAILABLE
);
1360 const std::vector
<target_section
> *
1361 target_get_section_table (struct target_ops
*target
)
1363 return target
->get_section_table ();
1366 /* Find a section containing ADDR. */
1368 const struct target_section
*
1369 target_section_by_addr (struct target_ops
*target
, CORE_ADDR addr
)
1371 const std::vector
<target_section
> *table
= target_get_section_table (target
);
1376 for (const target_section
&secp
: *table
)
1378 if (addr
>= secp
.addr
&& addr
< secp
.endaddr
)
1386 const std::vector
<target_section
> *
1387 default_get_section_table ()
1389 return ¤t_program_space
->target_sections ();
1392 /* Helper for the memory xfer routines. Checks the attributes of the
1393 memory region of MEMADDR against the read or write being attempted.
1394 If the access is permitted returns true, otherwise returns false.
1395 REGION_P is an optional output parameter. If not-NULL, it is
1396 filled with a pointer to the memory region of MEMADDR. REG_LEN
1397 returns LEN trimmed to the end of the region. This is how much the
1398 caller can continue requesting, if the access is permitted. A
1399 single xfer request must not straddle memory region boundaries. */
1402 memory_xfer_check_region (gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1403 ULONGEST memaddr
, ULONGEST len
, ULONGEST
*reg_len
,
1404 struct mem_region
**region_p
)
1406 struct mem_region
*region
;
1408 region
= lookup_mem_region (memaddr
);
1410 if (region_p
!= NULL
)
1413 switch (region
->attrib
.mode
)
1416 if (writebuf
!= NULL
)
1421 if (readbuf
!= NULL
)
1426 /* We only support writing to flash during "load" for now. */
1427 if (writebuf
!= NULL
)
1428 error (_("Writing to flash memory forbidden in this context"));
1435 /* region->hi == 0 means there's no upper bound. */
1436 if (memaddr
+ len
< region
->hi
|| region
->hi
== 0)
1439 *reg_len
= region
->hi
- memaddr
;
1444 /* Read memory from more than one valid target. A core file, for
1445 instance, could have some of memory but delegate other bits to
1446 the target below it. So, we must manually try all targets. */
1448 enum target_xfer_status
1449 raw_memory_xfer_partial (struct target_ops
*ops
, gdb_byte
*readbuf
,
1450 const gdb_byte
*writebuf
, ULONGEST memaddr
, LONGEST len
,
1451 ULONGEST
*xfered_len
)
1453 enum target_xfer_status res
;
1457 res
= ops
->xfer_partial (TARGET_OBJECT_MEMORY
, NULL
,
1458 readbuf
, writebuf
, memaddr
, len
,
1460 if (res
== TARGET_XFER_OK
)
1463 /* Stop if the target reports that the memory is not available. */
1464 if (res
== TARGET_XFER_UNAVAILABLE
)
1467 /* Don't continue past targets which have all the memory.
1468 At one time, this code was necessary to read data from
1469 executables / shared libraries when data for the requested
1470 addresses weren't available in the core file. But now the
1471 core target handles this case itself. */
1472 if (ops
->has_all_memory ())
1475 ops
= ops
->beneath ();
1477 while (ops
!= NULL
);
1479 /* The cache works at the raw memory level. Make sure the cache
1480 gets updated with raw contents no matter what kind of memory
1481 object was originally being written. Note we do write-through
1482 first, so that if it fails, we don't write to the cache contents
1483 that never made it to the target. */
1484 if (writebuf
!= NULL
1485 && inferior_ptid
!= null_ptid
1486 && target_dcache_init_p (current_program_space
->aspace
)
1487 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
1489 DCACHE
*dcache
= target_dcache_get (current_program_space
->aspace
);
1491 /* Note that writing to an area of memory which wasn't present
1492 in the cache doesn't cause it to be loaded in. */
1493 dcache_update (dcache
, res
, memaddr
, writebuf
, *xfered_len
);
1499 /* Perform a partial memory transfer.
1500 For docs see target.h, to_xfer_partial. */
1502 static enum target_xfer_status
1503 memory_xfer_partial_1 (struct target_ops
*ops
, enum target_object object
,
1504 gdb_byte
*readbuf
, const gdb_byte
*writebuf
, ULONGEST memaddr
,
1505 ULONGEST len
, ULONGEST
*xfered_len
)
1507 enum target_xfer_status res
;
1509 struct mem_region
*region
;
1510 struct inferior
*inf
;
1512 /* For accesses to unmapped overlay sections, read directly from
1513 files. Must do this first, as MEMADDR may need adjustment. */
1514 if (readbuf
!= NULL
&& overlay_debugging
)
1516 struct obj_section
*section
= find_pc_overlay (memaddr
);
1518 if (pc_in_unmapped_range (memaddr
, section
))
1520 const std::vector
<target_section
> *table
= target_get_section_table (ops
);
1521 const char *section_name
= section
->the_bfd_section
->name
;
1523 memaddr
= overlay_mapped_address (memaddr
, section
);
1525 auto match_cb
= [=] (const struct target_section
*s
)
1527 return (strcmp (section_name
, s
->the_bfd_section
->name
) == 0);
1530 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1531 memaddr
, len
, xfered_len
,
1536 /* Try the executable files, if "trust-readonly-sections" is set. */
1537 if (readbuf
!= NULL
&& trust_readonly
)
1539 const struct target_section
*secp
1540 = target_section_by_addr (ops
, memaddr
);
1542 && (bfd_section_flags (secp
->the_bfd_section
) & SEC_READONLY
))
1544 const std::vector
<target_section
> *table
= target_get_section_table (ops
);
1545 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1546 memaddr
, len
, xfered_len
,
1551 /* Try GDB's internal data cache. */
1553 if (!memory_xfer_check_region (readbuf
, writebuf
, memaddr
, len
, ®_len
,
1555 return TARGET_XFER_E_IO
;
1557 if (inferior_ptid
!= null_ptid
)
1558 inf
= current_inferior ();
1564 /* The dcache reads whole cache lines; that doesn't play well
1565 with reading from a trace buffer, because reading outside of
1566 the collected memory range fails. */
1567 && get_traceframe_number () == -1
1568 && (region
->attrib
.cache
1569 || (stack_cache_enabled_p () && object
== TARGET_OBJECT_STACK_MEMORY
)
1570 || (code_cache_enabled_p () && object
== TARGET_OBJECT_CODE_MEMORY
)))
1573 = target_dcache_get_or_init (current_program_space
->aspace
);
1575 return dcache_read_memory_partial (ops
, dcache
, memaddr
, readbuf
,
1576 reg_len
, xfered_len
);
1579 /* If none of those methods found the memory we wanted, fall back
1580 to a target partial transfer. Normally a single call to
1581 to_xfer_partial is enough; if it doesn't recognize an object
1582 it will call the to_xfer_partial of the next target down.
1583 But for memory this won't do. Memory is the only target
1584 object which can be read from more than one valid target.
1585 A core file, for instance, could have some of memory but
1586 delegate other bits to the target below it. So, we must
1587 manually try all targets. */
1589 res
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, memaddr
, reg_len
,
1592 /* If we still haven't got anything, return the last error. We
1597 /* Perform a partial memory transfer. For docs see target.h,
1600 static enum target_xfer_status
1601 memory_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1602 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1603 ULONGEST memaddr
, ULONGEST len
, ULONGEST
*xfered_len
)
1605 enum target_xfer_status res
;
1607 /* Zero length requests are ok and require no work. */
1609 return TARGET_XFER_EOF
;
1612 = gdbarch_remove_non_address_bits_memory (current_inferior ()->arch (),
1615 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1616 breakpoint insns, thus hiding out from higher layers whether
1617 there are software breakpoints inserted in the code stream. */
1618 if (readbuf
!= NULL
)
1620 res
= memory_xfer_partial_1 (ops
, object
, readbuf
, NULL
, memaddr
, len
,
1623 if (res
== TARGET_XFER_OK
&& !show_memory_breakpoints
)
1624 breakpoint_xfer_memory (readbuf
, NULL
, NULL
, memaddr
, *xfered_len
);
1628 /* A large write request is likely to be partially satisfied
1629 by memory_xfer_partial_1. We will continually malloc
1630 and free a copy of the entire write request for breakpoint
1631 shadow handling even though we only end up writing a small
1632 subset of it. Cap writes to a limit specified by the target
1633 to mitigate this. */
1634 len
= std::min (ops
->get_memory_xfer_limit (), len
);
1636 gdb::byte_vector
buf (writebuf
, writebuf
+ len
);
1637 breakpoint_xfer_memory (NULL
, buf
.data (), writebuf
, memaddr
, len
);
1638 res
= memory_xfer_partial_1 (ops
, object
, NULL
, buf
.data (), memaddr
, len
,
1645 scoped_restore_tmpl
<int>
1646 make_scoped_restore_show_memory_breakpoints (int show
)
1648 return make_scoped_restore (&show_memory_breakpoints
, show
);
1651 /* For docs see target.h, to_xfer_partial. */
1653 enum target_xfer_status
1654 target_xfer_partial (struct target_ops
*ops
,
1655 enum target_object object
, const char *annex
,
1656 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1657 ULONGEST offset
, ULONGEST len
,
1658 ULONGEST
*xfered_len
)
1660 enum target_xfer_status retval
;
1662 /* Transfer is done when LEN is zero. */
1664 return TARGET_XFER_EOF
;
1666 if (writebuf
&& !may_write_memory
)
1667 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1668 core_addr_to_string_nz (offset
), plongest (len
));
1672 /* If this is a memory transfer, let the memory-specific code
1673 have a look at it instead. Memory transfers are more
1675 if (object
== TARGET_OBJECT_MEMORY
|| object
== TARGET_OBJECT_STACK_MEMORY
1676 || object
== TARGET_OBJECT_CODE_MEMORY
)
1677 retval
= memory_xfer_partial (ops
, object
, readbuf
,
1678 writebuf
, offset
, len
, xfered_len
);
1679 else if (object
== TARGET_OBJECT_RAW_MEMORY
)
1681 /* Skip/avoid accessing the target if the memory region
1682 attributes block the access. Check this here instead of in
1683 raw_memory_xfer_partial as otherwise we'd end up checking
1684 this twice in the case of the memory_xfer_partial path is
1685 taken; once before checking the dcache, and another in the
1686 tail call to raw_memory_xfer_partial. */
1687 if (!memory_xfer_check_region (readbuf
, writebuf
, offset
, len
, &len
,
1689 return TARGET_XFER_E_IO
;
1691 /* Request the normal memory object from other layers. */
1692 retval
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, offset
, len
,
1696 retval
= ops
->xfer_partial (object
, annex
, readbuf
,
1697 writebuf
, offset
, len
, xfered_len
);
1701 const unsigned char *myaddr
= NULL
;
1703 = string_printf ("%s:target_xfer_partial "
1704 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1705 ops
->shortname (), (int) object
,
1706 (annex
? annex
: "(null)"),
1707 host_address_to_string (readbuf
),
1708 host_address_to_string (writebuf
),
1709 core_addr_to_string_nz (offset
), pulongest (len
),
1710 retval
, pulongest (*xfered_len
));
1716 if (retval
== TARGET_XFER_OK
&& myaddr
!= NULL
)
1720 string_appendf (s
, ", bytes =");
1721 for (i
= 0; i
< *xfered_len
; i
++)
1723 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
1725 if (targetdebug
< 2 && i
> 0)
1727 string_appendf (s
, " ...");
1731 target_debug_printf_nofunc ("%s", s
.c_str ());
1735 string_appendf (s
, " %02x", myaddr
[i
] & 0xff);
1739 target_debug_printf_nofunc ("%s", s
.c_str ());
1742 /* Check implementations of to_xfer_partial update *XFERED_LEN
1743 properly. Do assertion after printing debug messages, so that we
1744 can find more clues on assertion failure from debugging messages. */
1745 if (retval
== TARGET_XFER_OK
|| retval
== TARGET_XFER_UNAVAILABLE
)
1746 gdb_assert (*xfered_len
> 0);
1751 /* Read LEN bytes of target memory at address MEMADDR, placing the
1752 results in GDB's memory at MYADDR. Returns either 0 for success or
1753 -1 if any error occurs.
1755 If an error occurs, no guarantee is made about the contents of the data at
1756 MYADDR. In particular, the caller should not depend upon partial reads
1757 filling the buffer with good data. There is no way for the caller to know
1758 how much good data might have been transferred anyway. Callers that can
1759 deal with partial reads should call target_read (which will retry until
1760 it makes no progress, and then return how much was transferred). */
1763 target_read_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1765 if (target_read (current_inferior ()->top_target (),
1766 TARGET_OBJECT_MEMORY
, NULL
,
1767 myaddr
, memaddr
, len
) == len
)
1773 /* See target/target.h. */
1776 target_read_uint32 (CORE_ADDR memaddr
, uint32_t *result
)
1781 r
= target_read_memory (memaddr
, buf
, sizeof buf
);
1784 *result
= extract_unsigned_integer
1786 gdbarch_byte_order (current_inferior ()->arch ()));
1790 /* Like target_read_memory, but specify explicitly that this is a read
1791 from the target's raw memory. That is, this read bypasses the
1792 dcache, breakpoint shadowing, etc. */
1795 target_read_raw_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1797 if (target_read (current_inferior ()->top_target (),
1798 TARGET_OBJECT_RAW_MEMORY
, NULL
,
1799 myaddr
, memaddr
, len
) == len
)
1805 /* Like target_read_memory, but specify explicitly that this is a read from
1806 the target's stack. This may trigger different cache behavior. */
1809 target_read_stack (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1811 if (target_read (current_inferior ()->top_target (),
1812 TARGET_OBJECT_STACK_MEMORY
, NULL
,
1813 myaddr
, memaddr
, len
) == len
)
1819 /* Like target_read_memory, but specify explicitly that this is a read from
1820 the target's code. This may trigger different cache behavior. */
1823 target_read_code (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1825 if (target_read (current_inferior ()->top_target (),
1826 TARGET_OBJECT_CODE_MEMORY
, NULL
,
1827 myaddr
, memaddr
, len
) == len
)
1833 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1834 Returns either 0 for success or -1 if any error occurs. If an
1835 error occurs, no guarantee is made about how much data got written.
1836 Callers that can deal with partial writes should call
1840 target_write_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1842 if (target_write (current_inferior ()->top_target (),
1843 TARGET_OBJECT_MEMORY
, NULL
,
1844 myaddr
, memaddr
, len
) == len
)
1850 /* Write LEN bytes from MYADDR to target raw memory at address
1851 MEMADDR. Returns either 0 for success or -1 if any error occurs.
1852 If an error occurs, no guarantee is made about how much data got
1853 written. Callers that can deal with partial writes should call
1857 target_write_raw_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1859 if (target_write (current_inferior ()->top_target (),
1860 TARGET_OBJECT_RAW_MEMORY
, NULL
,
1861 myaddr
, memaddr
, len
) == len
)
1867 /* Fetch the target's memory map. */
1869 std::vector
<mem_region
>
1870 target_memory_map (void)
1872 target_ops
*target
= current_inferior ()->top_target ();
1873 std::vector
<mem_region
> result
= target
->memory_map ();
1874 if (result
.empty ())
1877 std::sort (result
.begin (), result
.end ());
1879 /* Check that regions do not overlap. Simultaneously assign
1880 a numbering for the "mem" commands to use to refer to
1882 mem_region
*last_one
= NULL
;
1883 for (size_t ix
= 0; ix
< result
.size (); ix
++)
1885 mem_region
*this_one
= &result
[ix
];
1886 this_one
->number
= ix
;
1888 if (last_one
!= NULL
&& last_one
->hi
> this_one
->lo
)
1890 warning (_("Overlapping regions in memory map: ignoring"));
1891 return std::vector
<mem_region
> ();
1894 last_one
= this_one
;
1901 target_flash_erase (ULONGEST address
, LONGEST length
)
1903 current_inferior ()->top_target ()->flash_erase (address
, length
);
1907 target_flash_done (void)
1909 current_inferior ()->top_target ()->flash_done ();
1913 show_trust_readonly (struct ui_file
*file
, int from_tty
,
1914 struct cmd_list_element
*c
, const char *value
)
1917 _("Mode for reading from readonly sections is %s.\n"),
1921 /* Target vector read/write partial wrapper functions. */
1923 static enum target_xfer_status
1924 target_read_partial (struct target_ops
*ops
,
1925 enum target_object object
,
1926 const char *annex
, gdb_byte
*buf
,
1927 ULONGEST offset
, ULONGEST len
,
1928 ULONGEST
*xfered_len
)
1930 return target_xfer_partial (ops
, object
, annex
, buf
, NULL
, offset
, len
,
1934 static enum target_xfer_status
1935 target_write_partial (struct target_ops
*ops
,
1936 enum target_object object
,
1937 const char *annex
, const gdb_byte
*buf
,
1938 ULONGEST offset
, LONGEST len
, ULONGEST
*xfered_len
)
1940 return target_xfer_partial (ops
, object
, annex
, NULL
, buf
, offset
, len
,
1944 /* Wrappers to perform the full transfer. */
1946 /* For docs on target_read see target.h. */
1949 target_read (struct target_ops
*ops
,
1950 enum target_object object
,
1951 const char *annex
, gdb_byte
*buf
,
1952 ULONGEST offset
, LONGEST len
)
1954 LONGEST xfered_total
= 0;
1957 /* If we are reading from a memory object, find the length of an addressable
1958 unit for that architecture. */
1959 if (object
== TARGET_OBJECT_MEMORY
1960 || object
== TARGET_OBJECT_STACK_MEMORY
1961 || object
== TARGET_OBJECT_CODE_MEMORY
1962 || object
== TARGET_OBJECT_RAW_MEMORY
)
1963 unit_size
= gdbarch_addressable_memory_unit_size
1964 (current_inferior ()->arch ());
1966 while (xfered_total
< len
)
1968 ULONGEST xfered_partial
;
1969 enum target_xfer_status status
;
1971 status
= target_read_partial (ops
, object
, annex
,
1972 buf
+ xfered_total
* unit_size
,
1973 offset
+ xfered_total
, len
- xfered_total
,
1976 /* Call an observer, notifying them of the xfer progress? */
1977 if (status
== TARGET_XFER_EOF
)
1978 return xfered_total
;
1979 else if (status
== TARGET_XFER_OK
)
1981 xfered_total
+= xfered_partial
;
1985 return TARGET_XFER_E_IO
;
1991 /* Assuming that the entire [begin, end) range of memory cannot be
1992 read, try to read whatever subrange is possible to read.
1994 The function returns, in RESULT, either zero or one memory block.
1995 If there's a readable subrange at the beginning, it is completely
1996 read and returned. Any further readable subrange will not be read.
1997 Otherwise, if there's a readable subrange at the end, it will be
1998 completely read and returned. Any readable subranges before it
1999 (obviously, not starting at the beginning), will be ignored. In
2000 other cases -- either no readable subrange, or readable subrange(s)
2001 that is neither at the beginning, or end, nothing is returned.
2003 The purpose of this function is to handle a read across a boundary
2004 of accessible memory in a case when memory map is not available.
2005 The above restrictions are fine for this case, but will give
2006 incorrect results if the memory is 'patchy'. However, supporting
2007 'patchy' memory would require trying to read every single byte,
2008 and it seems unacceptable solution. Explicit memory map is
2009 recommended for this case -- and target_read_memory_robust will
2010 take care of reading multiple ranges then. */
2013 read_whatever_is_readable (struct target_ops
*ops
,
2014 const ULONGEST begin
, const ULONGEST end
,
2016 std::vector
<memory_read_result
> *result
)
2018 ULONGEST current_begin
= begin
;
2019 ULONGEST current_end
= end
;
2021 ULONGEST xfered_len
;
2023 /* If we previously failed to read 1 byte, nothing can be done here. */
2024 if (end
- begin
<= 1)
2027 gdb::unique_xmalloc_ptr
<gdb_byte
> buf ((gdb_byte
*) xmalloc (end
- begin
));
2029 /* Check that either first or the last byte is readable, and give up
2030 if not. This heuristic is meant to permit reading accessible memory
2031 at the boundary of accessible region. */
2032 if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2033 buf
.get (), begin
, 1, &xfered_len
) == TARGET_XFER_OK
)
2038 else if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2039 buf
.get () + (end
- begin
) - 1, end
- 1, 1,
2040 &xfered_len
) == TARGET_XFER_OK
)
2048 /* Loop invariant is that the [current_begin, current_end) was previously
2049 found to be not readable as a whole.
2051 Note loop condition -- if the range has 1 byte, we can't divide the range
2052 so there's no point trying further. */
2053 while (current_end
- current_begin
> 1)
2055 ULONGEST first_half_begin
, first_half_end
;
2056 ULONGEST second_half_begin
, second_half_end
;
2058 ULONGEST middle
= current_begin
+ (current_end
- current_begin
) / 2;
2062 first_half_begin
= current_begin
;
2063 first_half_end
= middle
;
2064 second_half_begin
= middle
;
2065 second_half_end
= current_end
;
2069 first_half_begin
= middle
;
2070 first_half_end
= current_end
;
2071 second_half_begin
= current_begin
;
2072 second_half_end
= middle
;
2075 xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2076 buf
.get () + (first_half_begin
- begin
) * unit_size
,
2078 first_half_end
- first_half_begin
);
2080 if (xfer
== first_half_end
- first_half_begin
)
2082 /* This half reads up fine. So, the error must be in the
2084 current_begin
= second_half_begin
;
2085 current_end
= second_half_end
;
2089 /* This half is not readable. Because we've tried one byte, we
2090 know some part of this half if actually readable. Go to the next
2091 iteration to divide again and try to read.
2093 We don't handle the other half, because this function only tries
2094 to read a single readable subrange. */
2095 current_begin
= first_half_begin
;
2096 current_end
= first_half_end
;
2102 /* The [begin, current_begin) range has been read. */
2103 result
->emplace_back (begin
, current_end
, std::move (buf
));
2107 /* The [current_end, end) range has been read. */
2108 LONGEST region_len
= end
- current_end
;
2110 gdb::unique_xmalloc_ptr
<gdb_byte
> data
2111 ((gdb_byte
*) xmalloc (region_len
* unit_size
));
2112 memcpy (data
.get (), buf
.get () + (current_end
- begin
) * unit_size
,
2113 region_len
* unit_size
);
2114 result
->emplace_back (current_end
, end
, std::move (data
));
2118 std::vector
<memory_read_result
>
2119 read_memory_robust (struct target_ops
*ops
,
2120 const ULONGEST offset
, const LONGEST len
)
2122 std::vector
<memory_read_result
> result
;
2124 = gdbarch_addressable_memory_unit_size (current_inferior ()->arch ());
2126 LONGEST xfered_total
= 0;
2127 while (xfered_total
< len
)
2129 struct mem_region
*region
= lookup_mem_region (offset
+ xfered_total
);
2132 /* If there is no explicit region, a fake one should be created. */
2133 gdb_assert (region
);
2135 if (region
->hi
== 0)
2136 region_len
= len
- xfered_total
;
2138 region_len
= region
->hi
- offset
;
2140 if (region
->attrib
.mode
== MEM_NONE
|| region
->attrib
.mode
== MEM_WO
)
2142 /* Cannot read this region. Note that we can end up here only
2143 if the region is explicitly marked inaccessible, or
2144 'inaccessible-by-default' is in effect. */
2145 xfered_total
+= region_len
;
2149 LONGEST to_read
= std::min (len
- xfered_total
, region_len
);
2150 gdb::unique_xmalloc_ptr
<gdb_byte
> buffer
2151 ((gdb_byte
*) xmalloc (to_read
* unit_size
));
2153 LONGEST xfered_partial
=
2154 target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
, buffer
.get (),
2155 offset
+ xfered_total
, to_read
);
2156 /* Call an observer, notifying them of the xfer progress? */
2157 if (xfered_partial
<= 0)
2159 /* Got an error reading full chunk. See if maybe we can read
2161 read_whatever_is_readable (ops
, offset
+ xfered_total
,
2162 offset
+ xfered_total
+ to_read
,
2163 unit_size
, &result
);
2164 xfered_total
+= to_read
;
2168 result
.emplace_back (offset
+ xfered_total
,
2169 offset
+ xfered_total
+ xfered_partial
,
2170 std::move (buffer
));
2171 xfered_total
+= xfered_partial
;
2181 /* An alternative to target_write with progress callbacks. */
2184 target_write_with_progress (struct target_ops
*ops
,
2185 enum target_object object
,
2186 const char *annex
, const gdb_byte
*buf
,
2187 ULONGEST offset
, LONGEST len
,
2188 void (*progress
) (ULONGEST
, void *), void *baton
)
2190 LONGEST xfered_total
= 0;
2193 /* If we are writing to a memory object, find the length of an addressable
2194 unit for that architecture. */
2195 if (object
== TARGET_OBJECT_MEMORY
2196 || object
== TARGET_OBJECT_STACK_MEMORY
2197 || object
== TARGET_OBJECT_CODE_MEMORY
2198 || object
== TARGET_OBJECT_RAW_MEMORY
)
2199 unit_size
= gdbarch_addressable_memory_unit_size
2200 (current_inferior ()->arch ());
2202 /* Give the progress callback a chance to set up. */
2204 (*progress
) (0, baton
);
2206 while (xfered_total
< len
)
2208 ULONGEST xfered_partial
;
2209 enum target_xfer_status status
;
2211 status
= target_write_partial (ops
, object
, annex
,
2212 buf
+ xfered_total
* unit_size
,
2213 offset
+ xfered_total
, len
- xfered_total
,
2216 if (status
!= TARGET_XFER_OK
)
2217 return status
== TARGET_XFER_EOF
? xfered_total
: TARGET_XFER_E_IO
;
2220 (*progress
) (xfered_partial
, baton
);
2222 xfered_total
+= xfered_partial
;
2228 /* For docs on target_write see target.h. */
2231 target_write (struct target_ops
*ops
,
2232 enum target_object object
,
2233 const char *annex
, const gdb_byte
*buf
,
2234 ULONGEST offset
, LONGEST len
)
2236 return target_write_with_progress (ops
, object
, annex
, buf
, offset
, len
,
2240 /* Help for target_read_alloc and target_read_stralloc. See their comments
2243 template <typename T
>
2244 std::optional
<gdb::def_vector
<T
>>
2245 target_read_alloc_1 (struct target_ops
*ops
, enum target_object object
,
2248 gdb::def_vector
<T
> buf
;
2250 const int chunk
= 4096;
2252 /* This function does not have a length parameter; it reads the
2253 entire OBJECT). Also, it doesn't support objects fetched partly
2254 from one target and partly from another (in a different stratum,
2255 e.g. a core file and an executable). Both reasons make it
2256 unsuitable for reading memory. */
2257 gdb_assert (object
!= TARGET_OBJECT_MEMORY
);
2259 /* Start by reading up to 4K at a time. The target will throttle
2260 this number down if necessary. */
2263 ULONGEST xfered_len
;
2264 enum target_xfer_status status
;
2266 buf
.resize (buf_pos
+ chunk
);
2268 status
= target_read_partial (ops
, object
, annex
,
2269 (gdb_byte
*) &buf
[buf_pos
],
2273 if (status
== TARGET_XFER_EOF
)
2275 /* Read all there was. */
2276 buf
.resize (buf_pos
);
2279 else if (status
!= TARGET_XFER_OK
)
2281 /* An error occurred. */
2285 buf_pos
+= xfered_len
;
2293 std::optional
<gdb::byte_vector
>
2294 target_read_alloc (struct target_ops
*ops
, enum target_object object
,
2297 return target_read_alloc_1
<gdb_byte
> (ops
, object
, annex
);
2302 std::optional
<gdb::char_vector
>
2303 target_read_stralloc (struct target_ops
*ops
, enum target_object object
,
2306 std::optional
<gdb::char_vector
> buf
2307 = target_read_alloc_1
<char> (ops
, object
, annex
);
2312 if (buf
->empty () || buf
->back () != '\0')
2313 buf
->push_back ('\0');
2315 /* Check for embedded NUL bytes; but allow trailing NULs. */
2316 for (auto it
= std::find (buf
->begin (), buf
->end (), '\0');
2317 it
!= buf
->end (); it
++)
2320 warning (_("target object %d, annex %s, "
2321 "contained unexpected null characters"),
2322 (int) object
, annex
? annex
: "(none)");
2329 /* Memory transfer methods. */
2332 get_target_memory (struct target_ops
*ops
, CORE_ADDR addr
, gdb_byte
*buf
,
2335 /* This method is used to read from an alternate, non-current
2336 target. This read must bypass the overlay support (as symbols
2337 don't match this target), and GDB's internal cache (wrong cache
2338 for this target). */
2339 if (target_read (ops
, TARGET_OBJECT_RAW_MEMORY
, NULL
, buf
, addr
, len
)
2341 memory_error (TARGET_XFER_E_IO
, addr
);
2345 get_target_memory_unsigned (struct target_ops
*ops
, CORE_ADDR addr
,
2346 int len
, enum bfd_endian byte_order
)
2348 gdb_byte buf
[sizeof (ULONGEST
)];
2350 gdb_assert (len
<= sizeof (buf
));
2351 get_target_memory (ops
, addr
, buf
, len
);
2352 return extract_unsigned_integer (buf
, len
, byte_order
);
2358 target_insert_breakpoint (struct gdbarch
*gdbarch
,
2359 struct bp_target_info
*bp_tgt
)
2361 if (!may_insert_breakpoints
)
2363 warning (_("May not insert breakpoints"));
2367 target_ops
*target
= current_inferior ()->top_target ();
2369 return target
->insert_breakpoint (gdbarch
, bp_tgt
);
2375 target_remove_breakpoint (struct gdbarch
*gdbarch
,
2376 struct bp_target_info
*bp_tgt
,
2377 enum remove_bp_reason reason
)
2379 /* This is kind of a weird case to handle, but the permission might
2380 have been changed after breakpoints were inserted - in which case
2381 we should just take the user literally and assume that any
2382 breakpoints should be left in place. */
2383 if (!may_insert_breakpoints
)
2385 warning (_("May not remove breakpoints"));
2389 target_ops
*target
= current_inferior ()->top_target ();
2391 return target
->remove_breakpoint (gdbarch
, bp_tgt
, reason
);
2395 info_target_command (const char *args
, int from_tty
)
2397 int has_all_mem
= 0;
2399 if (current_program_space
->symfile_object_file
!= NULL
)
2401 objfile
*objf
= current_program_space
->symfile_object_file
;
2402 gdb_printf (_("Symbols from \"%ps\".\n"),
2403 styled_string (file_name_style
.style (),
2404 objfile_name (objf
)));
2407 for (target_ops
*t
= current_inferior ()->top_target ();
2411 if (!t
->has_memory ())
2414 if ((int) (t
->stratum ()) <= (int) dummy_stratum
)
2417 gdb_printf (_("\tWhile running this, "
2418 "GDB does not access memory from...\n"));
2419 gdb_printf ("%s:\n", t
->longname ());
2421 has_all_mem
= t
->has_all_memory ();
2425 /* This function is called before any new inferior is created, e.g.
2426 by running a program, attaching, or connecting to a target.
2427 It cleans up any state from previous invocations which might
2428 change between runs. This is a subset of what target_preopen
2429 resets (things which might change between targets). */
2432 target_pre_inferior ()
2434 /* Clear out solib state. Otherwise the solib state of the previous
2435 inferior might have survived and is entirely wrong for the new
2436 target. This has been observed on GNU/Linux using glibc 2.3. How
2448 Cannot access memory at address 0xdeadbeef
2451 /* In some OSs, the shared library list is the same/global/shared
2452 across inferiors. If code is shared between processes, so are
2453 memory regions and features. */
2454 if (!gdbarch_has_global_solist (current_inferior ()->arch ()))
2456 no_shared_libraries (current_program_space
);
2458 invalidate_target_mem_regions ();
2460 target_clear_description ();
2463 /* attach_flag may be set if the previous process associated with
2464 the inferior was attached to. */
2465 current_inferior ()->attach_flag
= false;
2467 current_inferior ()->highest_thread_num
= 0;
2469 update_previous_thread ();
2471 agent_capability_invalidate ();
2474 /* This is to be called by the open routine before it does
2478 target_preopen (int from_tty
)
2482 if (current_inferior ()->pid
!= 0)
2485 || !target_has_execution ()
2486 || query (_("A program is being debugged already. Kill it? ")))
2488 /* Core inferiors actually should be detached, not
2490 if (target_has_execution ())
2493 target_detach (current_inferior (), 0);
2496 error (_("Program not killed."));
2499 /* Release reference to old previous thread. */
2500 update_previous_thread ();
2502 /* Calling target_kill may remove the target from the stack. But if
2503 it doesn't (which seems like a win for UDI), remove it now. */
2504 /* Leave the exec target, though. The user may be switching from a
2505 live process to a core of the same program. */
2506 current_inferior ()->pop_all_targets_above (file_stratum
);
2508 target_pre_inferior ();
2514 target_detach (inferior
*inf
, int from_tty
)
2516 /* Thread's don't need to be resumed until the end of this function. */
2517 scoped_disable_commit_resumed
disable_commit_resumed ("detaching");
2519 /* After we have detached, we will clear the register cache for this inferior
2520 by calling registers_changed_ptid. We must save the pid_ptid before
2521 detaching, as the target detach method will clear inf->pid. */
2522 ptid_t save_pid_ptid
= ptid_t (inf
->pid
);
2524 /* As long as some to_detach implementations rely on the current_inferior
2525 (either directly, or indirectly, like through reading memory), INF needs
2526 to be the current inferior. When that requirement will become no longer
2527 true, then we can remove this assertion. */
2528 gdb_assert (inf
== current_inferior ());
2530 prepare_for_detach ();
2532 gdb::observers::inferior_pre_detach
.notify (inf
);
2534 /* Hold a strong reference because detaching may unpush the
2536 auto proc_target_ref
= target_ops_ref::new_reference (inf
->process_target ());
2538 current_inferior ()->top_target ()->detach (inf
, from_tty
);
2540 process_stratum_target
*proc_target
2541 = as_process_stratum_target (proc_target_ref
.get ());
2543 registers_changed_ptid (proc_target
, save_pid_ptid
);
2545 /* We have to ensure we have no frame cache left. Normally,
2546 registers_changed_ptid (save_pid_ptid) calls reinit_frame_cache when
2547 inferior_ptid matches save_pid_ptid, but in our case, it does not
2548 call it, as inferior_ptid has been reset. */
2549 reinit_frame_cache ();
2551 disable_commit_resumed
.reset_and_commit ();
2555 target_disconnect (const char *args
, int from_tty
)
2557 /* If we're in breakpoints-always-inserted mode or if breakpoints
2558 are global across processes, we have to remove them before
2560 remove_breakpoints ();
2562 current_inferior ()->top_target ()->disconnect (args
, from_tty
);
2565 /* See target/target.h. */
2568 target_wait (ptid_t ptid
, struct target_waitstatus
*status
,
2569 target_wait_flags options
)
2571 target_ops
*target
= current_inferior ()->top_target ();
2572 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
2574 gdb_assert (!proc_target
->commit_resumed_state
);
2576 if (!target_can_async_p (target
))
2577 gdb_assert ((options
& TARGET_WNOHANG
) == 0);
2579 ptid_t event_ptid
= null_ptid
;
2580 SCOPE_EXIT
{ gdb::observers::target_post_wait
.notify (event_ptid
); };
2581 gdb::observers::target_pre_wait
.notify (ptid
);
2582 event_ptid
= target
->wait (ptid
, status
, options
);
2590 default_target_wait (struct target_ops
*ops
,
2591 ptid_t ptid
, struct target_waitstatus
*status
,
2592 target_wait_flags options
)
2594 status
->set_ignore ();
2595 return minus_one_ptid
;
2599 target_pid_to_str (ptid_t ptid
)
2601 return current_inferior ()->top_target ()->pid_to_str (ptid
);
2605 target_thread_name (struct thread_info
*info
)
2607 gdb_assert (info
->inf
== current_inferior ());
2609 return current_inferior ()->top_target ()->thread_name (info
);
2612 struct thread_info
*
2613 target_thread_handle_to_thread_info (const gdb_byte
*thread_handle
,
2615 struct inferior
*inf
)
2617 target_ops
*target
= current_inferior ()->top_target ();
2619 return target
->thread_handle_to_thread_info (thread_handle
, handle_len
, inf
);
2624 gdb::array_view
<const gdb_byte
>
2625 target_thread_info_to_thread_handle (struct thread_info
*tip
)
2627 target_ops
*target
= current_inferior ()->top_target ();
2629 return target
->thread_info_to_thread_handle (tip
);
2633 target_resume (ptid_t scope_ptid
, int step
, enum gdb_signal signal
)
2635 process_stratum_target
*curr_target
= current_inferior ()->process_target ();
2636 gdb_assert (!curr_target
->commit_resumed_state
);
2638 gdb_assert (inferior_ptid
!= null_ptid
);
2639 gdb_assert (inferior_ptid
.matches (scope_ptid
));
2641 target_dcache_invalidate (current_program_space
->aspace
);
2643 current_inferior ()->top_target ()->resume (scope_ptid
, step
, signal
);
2645 registers_changed_ptid (curr_target
, scope_ptid
);
2646 /* We only set the internal executing state here. The user/frontend
2647 running state is set at a higher level. This also clears the
2648 thread's stop_pc as side effect. */
2649 set_executing (curr_target
, scope_ptid
, true);
2650 clear_inline_frame_state (curr_target
, scope_ptid
);
2652 if (target_can_async_p ())
2653 target_async (true);
2659 target_commit_resumed ()
2661 gdb_assert (current_inferior ()->process_target ()->commit_resumed_state
);
2662 current_inferior ()->top_target ()->commit_resumed ();
2668 target_has_pending_events ()
2670 return current_inferior ()->top_target ()->has_pending_events ();
2674 target_pass_signals (gdb::array_view
<const unsigned char> pass_signals
)
2676 current_inferior ()->top_target ()->pass_signals (pass_signals
);
2680 target_program_signals (gdb::array_view
<const unsigned char> program_signals
)
2682 current_inferior ()->top_target ()->program_signals (program_signals
);
2686 default_follow_fork (struct target_ops
*self
, inferior
*child_inf
,
2687 ptid_t child_ptid
, target_waitkind fork_kind
,
2688 bool follow_child
, bool detach_fork
)
2690 /* Some target returned a fork event, but did not know how to follow it. */
2691 internal_error (_("could not find a target to follow fork"));
2695 default_follow_clone (struct target_ops
*self
, ptid_t child_ptid
)
2697 /* Some target returned a clone event, but did not know how to follow it. */
2698 internal_error (_("could not find a target to follow clone"));
2704 target_follow_fork (inferior
*child_inf
, ptid_t child_ptid
,
2705 target_waitkind fork_kind
, bool follow_child
,
2708 target_ops
*target
= current_inferior ()->top_target ();
2710 /* Check consistency between CHILD_INF, CHILD_PTID, FOLLOW_CHILD and
2712 if (child_inf
!= nullptr)
2714 gdb_assert (follow_child
|| !detach_fork
);
2715 gdb_assert (child_inf
->pid
== child_ptid
.pid ());
2718 gdb_assert (!follow_child
&& detach_fork
);
2720 return target
->follow_fork (child_inf
, child_ptid
, fork_kind
, follow_child
,
2727 target_follow_exec (inferior
*follow_inf
, ptid_t ptid
,
2728 const char *execd_pathname
)
2730 current_inferior ()->top_target ()->follow_exec (follow_inf
, ptid
,
2735 default_mourn_inferior (struct target_ops
*self
)
2737 internal_error (_("could not find a target to follow mourn inferior"));
2741 target_mourn_inferior (ptid_t ptid
)
2743 gdb_assert (ptid
.pid () == inferior_ptid
.pid ());
2744 current_inferior ()->top_target ()->mourn_inferior ();
2747 /* Look for a target which can describe architectural features, starting
2748 from TARGET. If we find one, return its description. */
2750 const struct target_desc
*
2751 target_read_description (struct target_ops
*target
)
2753 return target
->read_description ();
2757 /* Default implementation of memory-searching. */
2760 default_search_memory (struct target_ops
*self
,
2761 CORE_ADDR start_addr
, ULONGEST search_space_len
,
2762 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2763 CORE_ADDR
*found_addrp
)
2765 auto read_memory
= [=] (CORE_ADDR addr
, gdb_byte
*result
, size_t len
)
2767 return target_read (current_inferior ()->top_target (),
2768 TARGET_OBJECT_MEMORY
, NULL
,
2769 result
, addr
, len
) == len
;
2772 /* Start over from the top of the target stack. */
2773 return simple_search_memory (read_memory
, start_addr
, search_space_len
,
2774 pattern
, pattern_len
, found_addrp
);
2777 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2778 sequence of bytes in PATTERN with length PATTERN_LEN.
2780 The result is 1 if found, 0 if not found, and -1 if there was an error
2781 requiring halting of the search (e.g. memory read error).
2782 If the pattern is found the address is recorded in FOUND_ADDRP. */
2785 target_search_memory (CORE_ADDR start_addr
, ULONGEST search_space_len
,
2786 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2787 CORE_ADDR
*found_addrp
)
2789 target_ops
*target
= current_inferior ()->top_target ();
2791 return target
->search_memory (start_addr
, search_space_len
, pattern
,
2792 pattern_len
, found_addrp
);
2795 /* Look through the currently pushed targets. If none of them will
2796 be able to restart the currently running process, issue an error
2800 target_require_runnable (void)
2802 for (target_ops
*t
= current_inferior ()->top_target ();
2806 /* If this target knows how to create a new program, then
2807 assume we will still be able to after killing the current
2808 one. Either killing and mourning will not pop T, or else
2809 find_default_run_target will find it again. */
2810 if (t
->can_create_inferior ())
2813 /* Do not worry about targets at certain strata that can not
2814 create inferiors. Assume they will be pushed again if
2815 necessary, and continue to the process_stratum. */
2816 if (t
->stratum () > process_stratum
)
2819 error (_("The \"%s\" target does not support \"run\". "
2820 "Try \"help target\" or \"continue\"."),
2824 /* This function is only called if the target is running. In that
2825 case there should have been a process_stratum target and it
2826 should either know how to create inferiors, or not... */
2827 internal_error (_("No targets found"));
2830 /* Whether GDB is allowed to fall back to the default run target for
2831 "run", "attach", etc. when no target is connected yet. */
2832 static bool auto_connect_native_target
= true;
2835 show_auto_connect_native_target (struct ui_file
*file
, int from_tty
,
2836 struct cmd_list_element
*c
, const char *value
)
2839 _("Whether GDB may automatically connect to the "
2840 "native target is %s.\n"),
2844 /* A pointer to the target that can respond to "run" or "attach".
2845 Native targets are always singletons and instantiated early at GDB
2847 static target_ops
*the_native_target
;
2852 set_native_target (target_ops
*target
)
2854 if (the_native_target
!= NULL
)
2855 internal_error (_("native target already set (\"%s\")."),
2856 the_native_target
->longname ());
2858 the_native_target
= target
;
2864 get_native_target ()
2866 return the_native_target
;
2869 /* Look through the list of possible targets for a target that can
2870 execute a run or attach command without any other data. This is
2871 used to locate the default process stratum.
2873 If DO_MESG is not NULL, the result is always valid (error() is
2874 called for errors); else, return NULL on error. */
2876 static struct target_ops
*
2877 find_default_run_target (const char *do_mesg
)
2879 if (auto_connect_native_target
&& the_native_target
!= NULL
)
2880 return the_native_target
;
2882 if (do_mesg
!= NULL
)
2883 error (_("Don't know how to %s. Try \"help target\"."), do_mesg
);
2890 find_attach_target (void)
2892 /* If a target on the current stack can attach, use it. */
2893 for (target_ops
*t
= current_inferior ()->top_target ();
2897 if (t
->can_attach ())
2901 /* Otherwise, use the default run target for attaching. */
2902 return find_default_run_target ("attach");
2908 find_run_target (void)
2910 /* If a target on the current stack can run, use it. */
2911 for (target_ops
*t
= current_inferior ()->top_target ();
2915 if (t
->can_create_inferior ())
2919 /* Otherwise, use the default run target. */
2920 return find_default_run_target ("run");
2924 target_ops::info_proc (const char *args
, enum info_proc_what what
)
2929 /* Implement the "info proc" command. */
2932 target_info_proc (const char *args
, enum info_proc_what what
)
2934 struct target_ops
*t
;
2936 /* If we're already connected to something that can get us OS
2937 related data, use it. Otherwise, try using the native
2939 t
= find_target_at (process_stratum
);
2941 t
= find_default_run_target (NULL
);
2943 for (; t
!= NULL
; t
= t
->beneath ())
2945 if (t
->info_proc (args
, what
))
2947 target_debug_printf_nofunc ("target_info_proc (\"%s\", %d)", args
, what
);
2956 find_default_supports_disable_randomization (struct target_ops
*self
)
2958 struct target_ops
*t
;
2960 t
= find_default_run_target (NULL
);
2962 return t
->supports_disable_randomization ();
2967 target_supports_disable_randomization (void)
2969 return current_inferior ()->top_target ()->supports_disable_randomization ();
2972 /* See target/target.h. */
2975 target_supports_multi_process (void)
2977 return current_inferior ()->top_target ()->supports_multi_process ();
2982 std::optional
<gdb::char_vector
>
2983 target_get_osdata (const char *type
)
2985 struct target_ops
*t
;
2987 /* If we're already connected to something that can get us OS
2988 related data, use it. Otherwise, try using the native
2990 t
= find_target_at (process_stratum
);
2992 t
= find_default_run_target ("get OS data");
2997 return target_read_stralloc (t
, TARGET_OBJECT_OSDATA
, type
);
3003 target_ops::beneath () const
3005 return current_inferior ()->find_target_beneath (this);
3009 target_ops::close ()
3014 target_ops::can_attach ()
3020 target_ops::attach (const char *, int)
3022 gdb_assert_not_reached ("target_ops::attach called");
3026 target_ops::can_create_inferior ()
3032 target_ops::create_inferior (const char *, const std::string
&,
3035 gdb_assert_not_reached ("target_ops::create_inferior called");
3039 target_ops::can_run ()
3047 for (target_ops
*t
= current_inferior ()->top_target ();
3058 /* Target file operations. */
3060 static struct target_ops
*
3061 default_fileio_target (void)
3063 struct target_ops
*t
;
3065 /* If we're already connected to something that can perform
3066 file I/O, use it. Otherwise, try using the native target. */
3067 t
= find_target_at (process_stratum
);
3070 return find_default_run_target ("file I/O");
3073 /* File handle for target file operations. */
3077 /* The target on which this file is open. NULL if the target is
3078 meanwhile closed while the handle is open. */
3081 /* The file descriptor on the target. */
3084 /* Check whether this fileio_fh_t represents a closed file. */
3087 return target_fd
< 0;
3091 /* Vector of currently open file handles. The value returned by
3092 target_fileio_open and passed as the FD argument to other
3093 target_fileio_* functions is an index into this vector. This
3094 vector's entries are never freed; instead, files are marked as
3095 closed, and the handle becomes available for reuse. */
3096 static std::vector
<fileio_fh_t
> fileio_fhandles
;
3098 /* Index into fileio_fhandles of the lowest handle that might be
3099 closed. This permits handle reuse without searching the whole
3100 list each time a new file is opened. */
3101 static int lowest_closed_fd
;
3106 fileio_handles_invalidate_target (target_ops
*targ
)
3108 for (fileio_fh_t
&fh
: fileio_fhandles
)
3109 if (fh
.target
== targ
)
3113 /* Acquire a target fileio file descriptor. */
3116 acquire_fileio_fd (target_ops
*target
, int target_fd
)
3118 /* Search for closed handles to reuse. */
3119 for (; lowest_closed_fd
< fileio_fhandles
.size (); lowest_closed_fd
++)
3121 fileio_fh_t
&fh
= fileio_fhandles
[lowest_closed_fd
];
3123 if (fh
.is_closed ())
3127 /* Push a new handle if no closed handles were found. */
3128 if (lowest_closed_fd
== fileio_fhandles
.size ())
3129 fileio_fhandles
.push_back (fileio_fh_t
{target
, target_fd
});
3131 fileio_fhandles
[lowest_closed_fd
] = {target
, target_fd
};
3133 /* Should no longer be marked closed. */
3134 gdb_assert (!fileio_fhandles
[lowest_closed_fd
].is_closed ());
3136 /* Return its index, and start the next lookup at
3138 return lowest_closed_fd
++;
3141 /* Release a target fileio file descriptor. */
3144 release_fileio_fd (int fd
, fileio_fh_t
*fh
)
3147 lowest_closed_fd
= std::min (lowest_closed_fd
, fd
);
3150 /* Return a pointer to the fileio_fhandle_t corresponding to FD. */
3152 static fileio_fh_t
*
3153 fileio_fd_to_fh (int fd
)
3155 return &fileio_fhandles
[fd
];
3159 /* Default implementations of file i/o methods. We don't want these
3160 to delegate automatically, because we need to know which target
3161 supported the method, in order to call it directly from within
3162 pread/pwrite, etc. */
3165 target_ops::fileio_open (struct inferior
*inf
, const char *filename
,
3166 int flags
, int mode
, int warn_if_slow
,
3167 fileio_error
*target_errno
)
3169 *target_errno
= FILEIO_ENOSYS
;
3174 target_ops::fileio_pwrite (int fd
, const gdb_byte
*write_buf
, int len
,
3175 ULONGEST offset
, fileio_error
*target_errno
)
3177 *target_errno
= FILEIO_ENOSYS
;
3182 target_ops::fileio_pread (int fd
, gdb_byte
*read_buf
, int len
,
3183 ULONGEST offset
, fileio_error
*target_errno
)
3185 *target_errno
= FILEIO_ENOSYS
;
3190 target_ops::fileio_fstat (int fd
, struct stat
*sb
, fileio_error
*target_errno
)
3192 *target_errno
= FILEIO_ENOSYS
;
3197 target_ops::fileio_stat (struct inferior
*inf
, const char *filename
,
3198 struct stat
*sb
, fileio_error
*target_errno
)
3200 *target_errno
= FILEIO_ENOSYS
;
3205 target_ops::fileio_close (int fd
, fileio_error
*target_errno
)
3207 *target_errno
= FILEIO_ENOSYS
;
3212 target_ops::fileio_unlink (struct inferior
*inf
, const char *filename
,
3213 fileio_error
*target_errno
)
3215 *target_errno
= FILEIO_ENOSYS
;
3219 std::optional
<std::string
>
3220 target_ops::fileio_readlink (struct inferior
*inf
, const char *filename
,
3221 fileio_error
*target_errno
)
3223 *target_errno
= FILEIO_ENOSYS
;
3230 target_fileio_open (struct inferior
*inf
, const char *filename
,
3231 int flags
, int mode
, bool warn_if_slow
, fileio_error
*target_errno
)
3233 for (target_ops
*t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath ())
3235 int fd
= t
->fileio_open (inf
, filename
, flags
, mode
,
3236 warn_if_slow
, target_errno
);
3238 if (fd
== -1 && *target_errno
== FILEIO_ENOSYS
)
3244 fd
= acquire_fileio_fd (t
, fd
);
3246 target_debug_printf_nofunc ("target_fileio_open (%d,%s,0x%x,0%o,%d) = %d (%d)",
3247 inf
== NULL
? 0 : inf
->num
, filename
, flags
, mode
,
3248 warn_if_slow
, fd
, fd
!= -1 ? 0 : *target_errno
);
3252 *target_errno
= FILEIO_ENOSYS
;
3259 target_fileio_pwrite (int fd
, const gdb_byte
*write_buf
, int len
,
3260 ULONGEST offset
, fileio_error
*target_errno
)
3262 fileio_fh_t
*fh
= fileio_fd_to_fh (fd
);
3265 if (fh
->is_closed ())
3266 *target_errno
= FILEIO_EBADF
;
3267 else if (fh
->target
== NULL
)
3268 *target_errno
= FILEIO_EIO
;
3270 ret
= fh
->target
->fileio_pwrite (fh
->target_fd
, write_buf
,
3271 len
, offset
, target_errno
);
3273 target_debug_printf_nofunc ("target_fileio_pwrite (%d,...,%d,%s) = %d (%d)", fd
,
3274 len
, pulongest (offset
), ret
,
3275 ret
!= -1 ? 0 : *target_errno
);
3282 target_fileio_pread (int fd
, gdb_byte
*read_buf
, int len
,
3283 ULONGEST offset
, fileio_error
*target_errno
)
3285 fileio_fh_t
*fh
= fileio_fd_to_fh (fd
);
3288 if (fh
->is_closed ())
3289 *target_errno
= FILEIO_EBADF
;
3290 else if (fh
->target
== NULL
)
3291 *target_errno
= FILEIO_EIO
;
3293 ret
= fh
->target
->fileio_pread (fh
->target_fd
, read_buf
,
3294 len
, offset
, target_errno
);
3296 target_debug_printf_nofunc ("target_fileio_pread (%d,...,%d,%s) = %d (%d)", fd
, len
,
3297 pulongest (offset
), ret
, ret
!= -1 ? 0 : *target_errno
);
3304 target_fileio_fstat (int fd
, struct stat
*sb
, fileio_error
*target_errno
)
3306 fileio_fh_t
*fh
= fileio_fd_to_fh (fd
);
3309 if (fh
->is_closed ())
3310 *target_errno
= FILEIO_EBADF
;
3311 else if (fh
->target
== NULL
)
3312 *target_errno
= FILEIO_EIO
;
3314 ret
= fh
->target
->fileio_fstat (fh
->target_fd
, sb
, target_errno
);
3316 target_debug_printf_nofunc ("target_fileio_fstat (%d) = %d (%d)", fd
, ret
,
3317 ret
!= -1 ? 0 : *target_errno
);
3324 target_fileio_stat (struct inferior
*inf
, const char *filename
,
3325 struct stat
*sb
, fileio_error
*target_errno
)
3327 for (target_ops
*t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath ())
3329 int ret
= t
->fileio_stat (inf
, filename
, sb
, target_errno
);
3331 if (ret
== -1 && *target_errno
== FILEIO_ENOSYS
)
3334 target_debug_printf_nofunc ("target_fileio_stat (%s) = %d (%d)",
3336 ret
!= -1 ? 0 : *target_errno
);
3340 *target_errno
= FILEIO_ENOSYS
;
3347 target_fileio_close (int fd
, fileio_error
*target_errno
)
3349 fileio_fh_t
*fh
= fileio_fd_to_fh (fd
);
3352 if (fh
->is_closed ())
3353 *target_errno
= FILEIO_EBADF
;
3356 if (fh
->target
!= NULL
)
3357 ret
= fh
->target
->fileio_close (fh
->target_fd
,
3361 release_fileio_fd (fd
, fh
);
3364 target_debug_printf_nofunc ("target_fileio_close (%d) = %d (%d)", fd
, ret
,
3365 ret
!= -1 ? 0 : *target_errno
);
3372 target_fileio_unlink (struct inferior
*inf
, const char *filename
,
3373 fileio_error
*target_errno
)
3375 for (target_ops
*t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath ())
3377 int ret
= t
->fileio_unlink (inf
, filename
, target_errno
);
3379 if (ret
== -1 && *target_errno
== FILEIO_ENOSYS
)
3382 target_debug_printf_nofunc ("target_fileio_unlink (%d,%s) = %d (%d)",
3383 inf
== NULL
? 0 : inf
->num
, filename
, ret
,
3384 ret
!= -1 ? 0 : *target_errno
);
3388 *target_errno
= FILEIO_ENOSYS
;
3394 std::optional
<std::string
>
3395 target_fileio_readlink (struct inferior
*inf
, const char *filename
,
3396 fileio_error
*target_errno
)
3398 for (target_ops
*t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath ())
3400 std::optional
<std::string
> ret
3401 = t
->fileio_readlink (inf
, filename
, target_errno
);
3403 if (!ret
.has_value () && *target_errno
== FILEIO_ENOSYS
)
3406 target_debug_printf_nofunc ("target_fileio_readlink (%d,%s) = %s (%d)",
3407 inf
== NULL
? 0 : inf
->num
, filename
,
3408 ret
? ret
->c_str () : "(nil)",
3409 ret
? 0 : *target_errno
);
3413 *target_errno
= FILEIO_ENOSYS
;
3417 /* Like scoped_fd, but specific to target fileio. */
3419 class scoped_target_fd
3422 explicit scoped_target_fd (int fd
) noexcept
3427 ~scoped_target_fd ()
3431 fileio_error target_errno
;
3433 target_fileio_close (m_fd
, &target_errno
);
3437 DISABLE_COPY_AND_ASSIGN (scoped_target_fd
);
3439 int get () const noexcept
3448 /* Read target file FILENAME, in the filesystem as seen by INF. If
3449 INF is NULL, use the filesystem seen by the debugger (GDB or, for
3450 remote targets, the remote stub). Store the result in *BUF_P and
3451 return the size of the transferred data. PADDING additional bytes
3452 are available in *BUF_P. This is a helper function for
3453 target_fileio_read_alloc; see the declaration of that function for
3454 more information. */
3457 target_fileio_read_alloc_1 (struct inferior
*inf
, const char *filename
,
3458 gdb_byte
**buf_p
, int padding
)
3460 size_t buf_alloc
, buf_pos
;
3463 fileio_error target_errno
;
3465 scoped_target_fd
fd (target_fileio_open (inf
, filename
, FILEIO_O_RDONLY
,
3466 0700, false, &target_errno
));
3467 if (fd
.get () == -1)
3470 /* Start by reading up to 4K at a time. The target will throttle
3471 this number down if necessary. */
3473 buf
= (gdb_byte
*) xmalloc (buf_alloc
);
3477 n
= target_fileio_pread (fd
.get (), &buf
[buf_pos
],
3478 buf_alloc
- buf_pos
- padding
, buf_pos
,
3482 /* An error occurred. */
3488 /* Read all there was. */
3498 /* If the buffer is filling up, expand it. */
3499 if (buf_alloc
< buf_pos
* 2)
3502 buf
= (gdb_byte
*) xrealloc (buf
, buf_alloc
);
3512 target_fileio_read_alloc (struct inferior
*inf
, const char *filename
,
3515 return target_fileio_read_alloc_1 (inf
, filename
, buf_p
, 0);
3520 gdb::unique_xmalloc_ptr
<char>
3521 target_fileio_read_stralloc (struct inferior
*inf
, const char *filename
)
3525 LONGEST i
, transferred
;
3527 transferred
= target_fileio_read_alloc_1 (inf
, filename
, &buffer
, 1);
3528 bufstr
= (char *) buffer
;
3530 if (transferred
< 0)
3531 return gdb::unique_xmalloc_ptr
<char> (nullptr);
3533 if (transferred
== 0)
3534 return make_unique_xstrdup ("");
3536 bufstr
[transferred
] = 0;
3538 /* Check for embedded NUL bytes; but allow trailing NULs. */
3539 for (i
= strlen (bufstr
); i
< transferred
; i
++)
3542 warning (_("target file %s "
3543 "contained unexpected null characters"),
3548 return gdb::unique_xmalloc_ptr
<char> (bufstr
);
3553 default_region_ok_for_hw_watchpoint (struct target_ops
*self
,
3554 CORE_ADDR addr
, int len
)
3556 gdbarch
*arch
= current_inferior ()->arch ();
3557 return (len
<= gdbarch_ptr_bit (arch
) / TARGET_CHAR_BIT
);
3561 default_watchpoint_addr_within_range (struct target_ops
*target
,
3563 CORE_ADDR start
, int length
)
3565 return addr
>= start
&& addr
< start
+ length
;
3571 target_stack::find_beneath (const target_ops
*t
) const
3573 /* Look for a non-empty slot at stratum levels beneath T's. */
3574 for (int stratum
= t
->stratum () - 1; stratum
>= 0; --stratum
)
3575 if (m_stack
[stratum
].get () != NULL
)
3576 return m_stack
[stratum
].get ();
3584 find_target_at (enum strata stratum
)
3586 return current_inferior ()->target_at (stratum
);
3594 target_announce_detach (int from_tty
)
3597 const char *exec_file
;
3602 pid
= inferior_ptid
.pid ();
3603 exec_file
= current_program_space
->exec_filename ();
3604 if (exec_file
== nullptr)
3605 gdb_printf ("Detaching from pid %s\n",
3606 target_pid_to_str (ptid_t (pid
)).c_str ());
3608 gdb_printf (_("Detaching from program: %ps, %s\n"),
3609 styled_string (file_name_style
.style (), exec_file
),
3610 target_pid_to_str (ptid_t (pid
)).c_str ());
3616 target_announce_attach (int from_tty
, int pid
)
3621 const char *exec_file
= current_program_space
->exec_filename ();
3623 if (exec_file
!= nullptr)
3624 gdb_printf ("Attaching to program: %ps, %s\n",
3625 styled_string (file_name_style
.style (), exec_file
),
3626 target_pid_to_str (ptid_t (pid
)).c_str ());
3628 gdb_printf ("Attaching to %s\n",
3629 target_pid_to_str (ptid_t (pid
)).c_str ());
3632 /* The inferior process has died. Long live the inferior! */
3635 generic_mourn_inferior (void)
3637 inferior
*inf
= current_inferior ();
3639 switch_to_no_thread ();
3641 /* Mark breakpoints uninserted in case something tries to delete a
3642 breakpoint while we delete the inferior's threads (which would
3643 fail, since the inferior is long gone). */
3644 mark_breakpoints_out (inf
->pspace
);
3647 exit_inferior (inf
);
3649 /* Note this wipes step-resume breakpoints, so needs to be done
3650 after exit_inferior, which ends up referencing the step-resume
3651 breakpoints through clear_thread_inferior_resources. */
3652 breakpoint_init_inferior (inf
, inf_exited
);
3654 registers_changed ();
3656 reopen_exec_file ();
3657 reinit_frame_cache ();
3659 if (deprecated_detach_hook
)
3660 deprecated_detach_hook ();
3663 /* Convert a normal process ID to a string. Returns the string in a
3667 normal_pid_to_str (ptid_t ptid
)
3669 return string_printf ("process %d", ptid
.pid ());
3673 default_pid_to_str (struct target_ops
*ops
, ptid_t ptid
)
3675 return normal_pid_to_str (ptid
);
3678 /* Error-catcher for target_find_memory_regions. */
3680 dummy_find_memory_regions (struct target_ops
*self
,
3681 find_memory_region_ftype ignore1
, void *ignore2
)
3683 error (_("Command not implemented for this target."));
3687 /* Error-catcher for target_make_corefile_notes. */
3688 static gdb::unique_xmalloc_ptr
<char>
3689 dummy_make_corefile_notes (struct target_ops
*self
,
3690 bfd
*ignore1
, int *ignore2
)
3692 error (_("Command not implemented for this target."));
3696 #include "target-delegates-gen.c"
3698 /* The initial current target, so that there is always a semi-valid
3701 static dummy_target the_dummy_target
;
3708 return &the_dummy_target
;
3711 static const target_info dummy_target_info
= {
3718 dummy_target::stratum () const
3720 return dummy_stratum
;
3724 debug_target::stratum () const
3726 return debug_stratum
;
3730 dummy_target::info () const
3732 return dummy_target_info
;
3736 debug_target::info () const
3738 return beneath ()->info ();
3744 target_thread_alive (ptid_t ptid
)
3746 return current_inferior ()->top_target ()->thread_alive (ptid
);
3750 target_update_thread_list (void)
3752 current_inferior ()->top_target ()->update_thread_list ();
3756 target_stop (ptid_t ptid
)
3758 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
3760 gdb_assert (!proc_target
->commit_resumed_state
);
3764 warning (_("May not interrupt or stop the target, ignoring attempt"));
3768 current_inferior ()->top_target ()->stop (ptid
);
3776 warning (_("May not interrupt or stop the target, ignoring attempt"));
3780 current_inferior ()->top_target ()->interrupt ();
3786 target_pass_ctrlc (void)
3788 /* Pass the Ctrl-C to the first target that has a thread
3790 for (inferior
*inf
: all_inferiors ())
3792 target_ops
*proc_target
= inf
->process_target ();
3793 if (proc_target
== NULL
)
3796 for (thread_info
*thr
: inf
->non_exited_threads ())
3798 /* A thread can be THREAD_STOPPED and executing, while
3799 running an infcall. */
3800 if (thr
->state
== THREAD_RUNNING
|| thr
->executing ())
3802 /* We can get here quite deep in target layers. Avoid
3803 switching thread context or anything that would
3804 communicate with the target (e.g., to fetch
3805 registers), or flushing e.g., the frame cache. We
3806 just switch inferior in order to be able to call
3807 through the target_stack. */
3808 scoped_restore_current_inferior restore_inferior
;
3809 set_current_inferior (inf
);
3810 current_inferior ()->top_target ()->pass_ctrlc ();
3820 default_target_pass_ctrlc (struct target_ops
*ops
)
3822 target_interrupt ();
3825 /* See target/target.h. */
3828 target_stop_and_wait (ptid_t ptid
)
3830 struct target_waitstatus status
;
3831 bool was_non_stop
= non_stop
;
3836 target_wait (ptid
, &status
, 0);
3838 non_stop
= was_non_stop
;
3841 /* See target/target.h. */
3844 target_continue_no_signal (ptid_t ptid
)
3846 target_resume (ptid
, 0, GDB_SIGNAL_0
);
3849 /* See target/target.h. */
3852 target_continue (ptid_t ptid
, enum gdb_signal signal
)
3854 target_resume (ptid
, 0, signal
);
3857 /* Concatenate ELEM to LIST, a comma-separated list. */
3860 str_comma_list_concat_elem (std::string
*list
, const char *elem
)
3862 if (!list
->empty ())
3863 list
->append (", ");
3865 list
->append (elem
);
3868 /* Helper for target_options_to_string. If OPT is present in
3869 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3870 OPT is removed from TARGET_OPTIONS. */
3873 do_option (target_wait_flags
*target_options
, std::string
*ret
,
3874 target_wait_flag opt
, const char *opt_str
)
3876 if ((*target_options
& opt
) != 0)
3878 str_comma_list_concat_elem (ret
, opt_str
);
3879 *target_options
&= ~opt
;
3886 target_options_to_string (target_wait_flags target_options
)
3890 #define DO_TARG_OPTION(OPT) \
3891 do_option (&target_options, &ret, OPT, #OPT)
3893 DO_TARG_OPTION (TARGET_WNOHANG
);
3895 if (target_options
!= 0)
3896 str_comma_list_concat_elem (&ret
, "unknown???");
3902 target_fetch_registers (struct regcache
*regcache
, int regno
)
3904 current_inferior ()->top_target ()->fetch_registers (regcache
, regno
);
3905 target_debug_printf ("%s", regcache
->register_debug_string (regno
).c_str ());
3909 target_store_registers (struct regcache
*regcache
, int regno
)
3911 if (!may_write_registers
)
3912 error (_("Writing to registers is not allowed (regno %d)"), regno
);
3914 current_inferior ()->top_target ()->store_registers (regcache
, regno
);
3915 target_debug_printf ("%s", regcache
->register_debug_string (regno
).c_str ());
3919 target_core_of_thread (ptid_t ptid
)
3921 return current_inferior ()->top_target ()->core_of_thread (ptid
);
3925 simple_verify_memory (struct target_ops
*ops
,
3926 const gdb_byte
*data
, CORE_ADDR lma
, ULONGEST size
)
3928 LONGEST total_xfered
= 0;
3930 while (total_xfered
< size
)
3932 ULONGEST xfered_len
;
3933 enum target_xfer_status status
;
3935 ULONGEST howmuch
= std::min
<ULONGEST
> (sizeof (buf
), size
- total_xfered
);
3937 status
= target_xfer_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
3938 buf
, NULL
, lma
+ total_xfered
, howmuch
,
3940 if (status
== TARGET_XFER_OK
3941 && memcmp (data
+ total_xfered
, buf
, xfered_len
) == 0)
3943 total_xfered
+= xfered_len
;
3952 /* Default implementation of memory verification. */
3955 default_verify_memory (struct target_ops
*self
,
3956 const gdb_byte
*data
, CORE_ADDR memaddr
, ULONGEST size
)
3958 /* Start over from the top of the target stack. */
3959 return simple_verify_memory (current_inferior ()->top_target (),
3960 data
, memaddr
, size
);
3964 target_verify_memory (const gdb_byte
*data
, CORE_ADDR memaddr
, ULONGEST size
)
3966 target_ops
*target
= current_inferior ()->top_target ();
3968 return target
->verify_memory (data
, memaddr
, size
);
3971 /* The documentation for this function is in its prototype declaration in
3975 target_insert_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
,
3976 enum target_hw_bp_type rw
)
3978 target_ops
*target
= current_inferior ()->top_target ();
3980 return target
->insert_mask_watchpoint (addr
, mask
, rw
);
3983 /* The documentation for this function is in its prototype declaration in
3987 target_remove_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
,
3988 enum target_hw_bp_type rw
)
3990 target_ops
*target
= current_inferior ()->top_target ();
3992 return target
->remove_mask_watchpoint (addr
, mask
, rw
);
3995 /* The documentation for this function is in its prototype declaration
3999 target_masked_watch_num_registers (CORE_ADDR addr
, CORE_ADDR mask
)
4001 target_ops
*target
= current_inferior ()->top_target ();
4003 return target
->masked_watch_num_registers (addr
, mask
);
4006 /* The documentation for this function is in its prototype declaration
4010 target_ranged_break_num_registers (void)
4012 return current_inferior ()->top_target ()->ranged_break_num_registers ();
4017 struct btrace_target_info
*
4018 target_enable_btrace (thread_info
*tp
, const struct btrace_config
*conf
)
4020 return current_inferior ()->top_target ()->enable_btrace (tp
, conf
);
4026 target_disable_btrace (struct btrace_target_info
*btinfo
)
4028 current_inferior ()->top_target ()->disable_btrace (btinfo
);
4034 target_teardown_btrace (struct btrace_target_info
*btinfo
)
4036 current_inferior ()->top_target ()->teardown_btrace (btinfo
);
4042 target_read_btrace (struct btrace_data
*btrace
,
4043 struct btrace_target_info
*btinfo
,
4044 enum btrace_read_type type
)
4046 target_ops
*target
= current_inferior ()->top_target ();
4048 return target
->read_btrace (btrace
, btinfo
, type
);
4053 const struct btrace_config
*
4054 target_btrace_conf (const struct btrace_target_info
*btinfo
)
4056 return current_inferior ()->top_target ()->btrace_conf (btinfo
);
4062 target_stop_recording (void)
4064 current_inferior ()->top_target ()->stop_recording ();
4070 target_save_record (const char *filename
)
4072 current_inferior ()->top_target ()->save_record (filename
);
4078 target_supports_delete_record ()
4080 return current_inferior ()->top_target ()->supports_delete_record ();
4086 target_delete_record (void)
4088 current_inferior ()->top_target ()->delete_record ();
4094 target_record_method (ptid_t ptid
)
4096 return current_inferior ()->top_target ()->record_method (ptid
);
4102 target_record_is_replaying (ptid_t ptid
)
4104 return current_inferior ()->top_target ()->record_is_replaying (ptid
);
4110 target_record_will_replay (ptid_t ptid
, int dir
)
4112 return current_inferior ()->top_target ()->record_will_replay (ptid
, dir
);
4118 target_record_stop_replaying (void)
4120 current_inferior ()->top_target ()->record_stop_replaying ();
4126 target_goto_record_begin (void)
4128 current_inferior ()->top_target ()->goto_record_begin ();
4134 target_goto_record_end (void)
4136 current_inferior ()->top_target ()->goto_record_end ();
4142 target_goto_record (ULONGEST insn
)
4144 current_inferior ()->top_target ()->goto_record (insn
);
4150 target_insn_history (int size
, gdb_disassembly_flags flags
)
4152 current_inferior ()->top_target ()->insn_history (size
, flags
);
4158 target_insn_history_from (ULONGEST from
, int size
,
4159 gdb_disassembly_flags flags
)
4161 current_inferior ()->top_target ()->insn_history_from (from
, size
, flags
);
4167 target_insn_history_range (ULONGEST begin
, ULONGEST end
,
4168 gdb_disassembly_flags flags
)
4170 current_inferior ()->top_target ()->insn_history_range (begin
, end
, flags
);
4176 target_call_history (int size
, record_print_flags flags
)
4178 current_inferior ()->top_target ()->call_history (size
, flags
);
4184 target_call_history_from (ULONGEST begin
, int size
, record_print_flags flags
)
4186 current_inferior ()->top_target ()->call_history_from (begin
, size
, flags
);
4192 target_call_history_range (ULONGEST begin
, ULONGEST end
, record_print_flags flags
)
4194 current_inferior ()->top_target ()->call_history_range (begin
, end
, flags
);
4199 const struct frame_unwind
*
4200 target_get_unwinder (void)
4202 return current_inferior ()->top_target ()->get_unwinder ();
4207 const struct frame_unwind
*
4208 target_get_tailcall_unwinder (void)
4210 return current_inferior ()->top_target ()->get_tailcall_unwinder ();
4216 target_prepare_to_generate_core (void)
4218 current_inferior ()->top_target ()->prepare_to_generate_core ();
4224 target_done_generating_core (void)
4226 current_inferior ()->top_target ()->done_generating_core ();
4231 static char targ_desc
[] =
4232 "Names of targets and files being debugged.\nShows the entire \
4233 stack of targets currently in use (including the exec-file,\n\
4234 core-file, and process, if any), as well as the symbol file name.";
4237 default_rcmd (struct target_ops
*self
, const char *command
,
4238 struct ui_file
*output
)
4240 error (_("\"monitor\" command not supported by this target."));
4244 do_monitor_command (const char *cmd
, int from_tty
)
4246 target_rcmd (cmd
, gdb_stdtarg
);
4249 /* Erases all the memory regions marked as flash. CMD and FROM_TTY are
4253 flash_erase_command (const char *cmd
, int from_tty
)
4255 /* Used to communicate termination of flash operations to the target. */
4256 bool found_flash_region
= false;
4257 gdbarch
*gdbarch
= current_inferior ()->arch ();
4259 std::vector
<mem_region
> mem_regions
= target_memory_map ();
4261 /* Iterate over all memory regions. */
4262 for (const mem_region
&m
: mem_regions
)
4264 /* Is this a flash memory region? */
4265 if (m
.attrib
.mode
== MEM_FLASH
)
4267 found_flash_region
= true;
4268 target_flash_erase (m
.lo
, m
.hi
- m
.lo
);
4270 ui_out_emit_tuple
tuple_emitter (current_uiout
, "erased-regions");
4272 current_uiout
->message (_("Erasing flash memory region at address "));
4273 current_uiout
->field_core_addr ("address", gdbarch
, m
.lo
);
4274 current_uiout
->message (", size = ");
4275 current_uiout
->field_string ("size", hex_string (m
.hi
- m
.lo
));
4276 current_uiout
->message ("\n");
4280 /* Did we do any flash operations? If so, we need to finalize them. */
4281 if (found_flash_region
)
4282 target_flash_done ();
4284 current_uiout
->message (_("No flash memory regions found.\n"));
4287 /* Print the name of each layers of our target stack. */
4290 maintenance_print_target_stack (const char *cmd
, int from_tty
)
4292 gdb_printf (_("The current target stack is:\n"));
4294 for (target_ops
*t
= current_inferior ()->top_target ();
4298 if (t
->stratum () == debug_stratum
)
4300 gdb_printf (" - %s (%s)\n", t
->shortname (), t
->longname ());
4307 target_async (bool enable
)
4309 /* If we are trying to enable async mode then it must be the case that
4310 async mode is possible for this target. */
4311 gdb_assert (!enable
|| target_can_async_p ());
4312 infrun_async (enable
);
4313 current_inferior ()->top_target ()->async (enable
);
4319 target_thread_events (bool enable
)
4321 current_inferior ()->top_target ()->thread_events (enable
);
4327 target_supports_set_thread_options (gdb_thread_options options
)
4329 inferior
*inf
= current_inferior ();
4330 return inf
->top_target ()->supports_set_thread_options (options
);
4333 /* Controls if targets can report that they can/are async. This is
4334 just for maintainers to use when debugging gdb. */
4335 bool target_async_permitted
= true;
4338 set_maint_target_async (bool permitted
)
4340 if (have_live_inferiors ())
4341 error (_("Cannot change this setting while the inferior is running."));
4343 target_async_permitted
= permitted
;
4347 get_maint_target_async ()
4349 return target_async_permitted
;
4353 show_maint_target_async (ui_file
*file
, int from_tty
,
4354 cmd_list_element
*c
, const char *value
)
4357 _("Controlling the inferior in "
4358 "asynchronous mode is %s.\n"), value
);
4361 /* Return true if the target operates in non-stop mode even with "set
4365 target_always_non_stop_p (void)
4367 return current_inferior ()->top_target ()->always_non_stop_p ();
4373 target_is_non_stop_p ()
4376 || target_non_stop_enabled
== AUTO_BOOLEAN_TRUE
4377 || (target_non_stop_enabled
== AUTO_BOOLEAN_AUTO
4378 && target_always_non_stop_p ()))
4379 && target_can_async_p ());
4385 exists_non_stop_target ()
4387 if (target_is_non_stop_p ())
4390 scoped_restore_current_thread restore_thread
;
4392 for (inferior
*inf
: all_inferiors ())
4394 switch_to_inferior_no_thread (inf
);
4395 if (target_is_non_stop_p ())
4402 /* Controls if targets can report that they always run in non-stop
4403 mode. This is just for maintainers to use when debugging gdb. */
4404 enum auto_boolean target_non_stop_enabled
= AUTO_BOOLEAN_AUTO
;
4406 /* Set callback for maint target-non-stop setting. */
4409 set_maint_target_non_stop (auto_boolean enabled
)
4411 if (have_live_inferiors ())
4412 error (_("Cannot change this setting while the inferior is running."));
4414 target_non_stop_enabled
= enabled
;
4417 /* Get callback for maint target-non-stop setting. */
4420 get_maint_target_non_stop ()
4422 return target_non_stop_enabled
;
4426 show_maint_target_non_stop (ui_file
*file
, int from_tty
,
4427 cmd_list_element
*c
, const char *value
)
4429 if (target_non_stop_enabled
== AUTO_BOOLEAN_AUTO
)
4431 _("Whether the target is always in non-stop mode "
4432 "is %s (currently %s).\n"), value
,
4433 target_always_non_stop_p () ? "on" : "off");
4436 _("Whether the target is always in non-stop mode "
4437 "is %s.\n"), value
);
4440 /* Temporary copies of permission settings. */
4442 static bool may_write_registers_1
= true;
4443 static bool may_write_memory_1
= true;
4444 static bool may_insert_breakpoints_1
= true;
4445 static bool may_insert_tracepoints_1
= true;
4446 static bool may_insert_fast_tracepoints_1
= true;
4447 static bool may_stop_1
= true;
4449 /* Make the user-set values match the real values again. */
4452 update_target_permissions (void)
4454 may_write_registers_1
= may_write_registers
;
4455 may_write_memory_1
= may_write_memory
;
4456 may_insert_breakpoints_1
= may_insert_breakpoints
;
4457 may_insert_tracepoints_1
= may_insert_tracepoints
;
4458 may_insert_fast_tracepoints_1
= may_insert_fast_tracepoints
;
4459 may_stop_1
= may_stop
;
4462 /* The one function handles (most of) the permission flags in the same
4466 set_target_permissions (const char *args
, int from_tty
,
4467 struct cmd_list_element
*c
)
4469 if (target_has_execution ())
4471 update_target_permissions ();
4472 error (_("Cannot change this setting while the inferior is running."));
4475 /* Make the real values match the user-changed values. */
4476 may_insert_breakpoints
= may_insert_breakpoints_1
;
4477 may_insert_tracepoints
= may_insert_tracepoints_1
;
4478 may_insert_fast_tracepoints
= may_insert_fast_tracepoints_1
;
4479 may_stop
= may_stop_1
;
4480 update_observer_mode ();
4483 /* Set some permissions independently of observer mode. */
4486 set_write_memory_registers_permission (const char *args
, int from_tty
,
4487 struct cmd_list_element
*c
)
4489 /* Make the real values match the user-changed values. */
4490 may_write_memory
= may_write_memory_1
;
4491 may_write_registers
= may_write_registers_1
;
4492 update_observer_mode ();
4495 void _initialize_target ();
4498 _initialize_target ()
4500 the_debug_target
= new debug_target ();
4502 add_info ("target", info_target_command
, targ_desc
);
4503 add_info ("files", info_target_command
, targ_desc
);
4505 add_setshow_zuinteger_cmd ("target", class_maintenance
, &targetdebug
, _("\
4506 Set target debugging."), _("\
4507 Show target debugging."), _("\
4508 When non-zero, target debugging is enabled. Higher numbers are more\n\
4512 &setdebuglist
, &showdebuglist
);
4514 add_setshow_boolean_cmd ("trust-readonly-sections", class_support
,
4515 &trust_readonly
, _("\
4516 Set mode for reading from readonly sections."), _("\
4517 Show mode for reading from readonly sections."), _("\
4518 When this mode is on, memory reads from readonly sections (such as .text)\n\
4519 will be read from the object file instead of from the target. This will\n\
4520 result in significant performance improvement for remote targets."),
4522 show_trust_readonly
,
4523 &setlist
, &showlist
);
4525 add_com ("monitor", class_obscure
, do_monitor_command
,
4526 _("Send a command to the remote monitor (remote targets only)."));
4528 add_cmd ("target-stack", class_maintenance
, maintenance_print_target_stack
,
4529 _("Print the name of each layer of the internal target stack."),
4530 &maintenanceprintlist
);
4532 add_setshow_boolean_cmd ("target-async", no_class
,
4534 Set whether gdb controls the inferior in asynchronous mode."), _("\
4535 Show whether gdb controls the inferior in asynchronous mode."), _("\
4536 Tells gdb whether to control the inferior in asynchronous mode."),
4537 set_maint_target_async
,
4538 get_maint_target_async
,
4539 show_maint_target_async
,
4540 &maintenance_set_cmdlist
,
4541 &maintenance_show_cmdlist
);
4543 add_setshow_auto_boolean_cmd ("target-non-stop", no_class
,
4545 Set whether gdb always controls the inferior in non-stop mode."), _("\
4546 Show whether gdb always controls the inferior in non-stop mode."), _("\
4547 Tells gdb whether to control the inferior in non-stop mode."),
4548 set_maint_target_non_stop
,
4549 get_maint_target_non_stop
,
4550 show_maint_target_non_stop
,
4551 &maintenance_set_cmdlist
,
4552 &maintenance_show_cmdlist
);
4554 add_setshow_boolean_cmd ("may-write-registers", class_support
,
4555 &may_write_registers_1
, _("\
4556 Set permission to write into registers."), _("\
4557 Show permission to write into registers."), _("\
4558 When this permission is on, GDB may write into the target's registers.\n\
4559 Otherwise, any sort of write attempt will result in an error."),
4560 set_write_memory_registers_permission
, NULL
,
4561 &setlist
, &showlist
);
4563 add_setshow_boolean_cmd ("may-write-memory", class_support
,
4564 &may_write_memory_1
, _("\
4565 Set permission to write into target memory."), _("\
4566 Show permission to write into target memory."), _("\
4567 When this permission is on, GDB may write into the target's memory.\n\
4568 Otherwise, any sort of write attempt will result in an error."),
4569 set_write_memory_registers_permission
, NULL
,
4570 &setlist
, &showlist
);
4572 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support
,
4573 &may_insert_breakpoints_1
, _("\
4574 Set permission to insert breakpoints in the target."), _("\
4575 Show permission to insert breakpoints in the target."), _("\
4576 When this permission is on, GDB may insert breakpoints in the program.\n\
4577 Otherwise, any sort of insertion attempt will result in an error."),
4578 set_target_permissions
, NULL
,
4579 &setlist
, &showlist
);
4581 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support
,
4582 &may_insert_tracepoints_1
, _("\
4583 Set permission to insert tracepoints in the target."), _("\
4584 Show permission to insert tracepoints in the target."), _("\
4585 When this permission is on, GDB may insert tracepoints in the program.\n\
4586 Otherwise, any sort of insertion attempt will result in an error."),
4587 set_target_permissions
, NULL
,
4588 &setlist
, &showlist
);
4590 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support
,
4591 &may_insert_fast_tracepoints_1
, _("\
4592 Set permission to insert fast tracepoints in the target."), _("\
4593 Show permission to insert fast tracepoints in the target."), _("\
4594 When this permission is on, GDB may insert fast tracepoints.\n\
4595 Otherwise, any sort of insertion attempt will result in an error."),
4596 set_target_permissions
, NULL
,
4597 &setlist
, &showlist
);
4599 add_setshow_boolean_cmd ("may-interrupt", class_support
,
4601 Set permission to interrupt or signal the target."), _("\
4602 Show permission to interrupt or signal the target."), _("\
4603 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4604 Otherwise, any attempt to interrupt or stop will be ignored."),
4605 set_target_permissions
, NULL
,
4606 &setlist
, &showlist
);
4608 add_com ("flash-erase", no_class
, flash_erase_command
,
4609 _("Erase all flash memory regions."));
4611 add_setshow_boolean_cmd ("auto-connect-native-target", class_support
,
4612 &auto_connect_native_target
, _("\
4613 Set whether GDB may automatically connect to the native target."), _("\
4614 Show whether GDB may automatically connect to the native target."), _("\
4615 When on, and GDB is not connected to a target yet, GDB\n\
4616 attempts \"run\" and other commands with the native target."),
4617 NULL
, show_auto_connect_native_target
,
4618 &setlist
, &showlist
);