1 /* Select target systems and architectures at runtime for GDB.
3 Copyright (C) 1990-2024 Free Software Foundation, Inc.
5 Contributed by Cygnus Support.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "extract-store-integer.h"
24 #include "target-dcache.h"
25 #include "cli/cli-cmds.h"
29 #include "observable.h"
37 #include "target-descriptions.h"
38 #include "gdbthread.h"
41 #include "inline-frame.h"
42 #include "tracepoint.h"
43 #include "gdbsupport/fileio.h"
44 #include "gdbsupport/agent.h"
46 #include "target-debug.h"
48 #include "event-top.h"
50 #include "gdbsupport/byte-vector.h"
51 #include "gdbsupport/search.h"
53 #include <unordered_map>
54 #include "target-connection.h"
56 #include "cli/cli-decode.h"
57 #include "cli/cli-style.h"
59 [[noreturn
]] static void generic_tls_error (void);
61 static void default_rcmd (struct target_ops
*, const char *, struct ui_file
*);
63 static int default_verify_memory (struct target_ops
*self
,
65 CORE_ADDR memaddr
, ULONGEST size
);
67 [[noreturn
]] static void tcomplain (void);
69 /* Mapping between target_info objects (which have address identity)
70 and corresponding open/factory function/callback. Each add_target
71 call adds one entry to this map, and registers a "target
72 TARGET_NAME" command that when invoked calls the factory registered
73 here. The target_info object is associated with the command via
74 the command's context. */
75 static std::unordered_map
<const target_info
*, target_open_ftype
*>
78 /* The singleton debug target. */
80 static struct target_ops
*the_debug_target
;
82 /* Command list for target. */
84 static struct cmd_list_element
*targetlist
= NULL
;
88 bool trust_readonly
= false;
90 /* Nonzero if we should show true memory content including
91 memory breakpoint inserted by gdb. */
93 static int show_memory_breakpoints
= 0;
95 /* These globals control whether GDB attempts to perform these
96 operations; they are useful for targets that need to prevent
97 inadvertent disruption, such as in non-stop mode. */
99 bool may_write_registers
= true;
101 bool may_write_memory
= true;
103 bool may_insert_breakpoints
= true;
105 bool may_insert_tracepoints
= true;
107 bool may_insert_fast_tracepoints
= true;
109 bool may_stop
= true;
111 /* Non-zero if we want to see trace of target level stuff. */
113 static unsigned int targetdebug
= 0;
115 /* Print a "target" debug statement with the function name prefix. */
117 #define target_debug_printf(fmt, ...) \
118 debug_prefixed_printf_cond (targetdebug > 0, "target", fmt, ##__VA_ARGS__)
120 /* Print a "target" debug statement without the function name prefix. */
122 #define target_debug_printf_nofunc(fmt, ...) \
123 debug_prefixed_printf_cond_nofunc (targetdebug > 0, "target", fmt, ##__VA_ARGS__)
126 set_targetdebug (const char *args
, int from_tty
, struct cmd_list_element
*c
)
129 current_inferior ()->push_target (the_debug_target
);
131 current_inferior ()->unpush_target (the_debug_target
);
135 show_targetdebug (struct ui_file
*file
, int from_tty
,
136 struct cmd_list_element
*c
, const char *value
)
138 gdb_printf (file
, _("Target debugging is %s.\n"), value
);
144 for (target_ops
*t
= current_inferior ()->top_target ();
147 if (t
->has_memory ())
156 for (target_ops
*t
= current_inferior ()->top_target ();
166 target_has_registers ()
168 for (target_ops
*t
= current_inferior ()->top_target ();
171 if (t
->has_registers ())
178 target_has_execution (inferior
*inf
)
181 inf
= current_inferior ();
183 for (target_ops
*t
= inf
->top_target ();
185 t
= inf
->find_target_beneath (t
))
186 if (t
->has_execution (inf
))
195 return current_inferior ()->top_target ()->shortname ();
201 target_attach_no_wait ()
203 return current_inferior ()->top_target ()->attach_no_wait ();
209 target_post_attach (int pid
)
211 return current_inferior ()->top_target ()->post_attach (pid
);
217 target_prepare_to_store (regcache
*regcache
)
219 return current_inferior ()->top_target ()->prepare_to_store (regcache
);
225 target_supports_enable_disable_tracepoint ()
227 target_ops
*target
= current_inferior ()->top_target ();
229 return target
->supports_enable_disable_tracepoint ();
233 target_supports_string_tracing ()
235 return current_inferior ()->top_target ()->supports_string_tracing ();
241 target_supports_evaluation_of_breakpoint_conditions ()
243 target_ops
*target
= current_inferior ()->top_target ();
245 return target
->supports_evaluation_of_breakpoint_conditions ();
251 target_supports_dumpcore ()
253 return current_inferior ()->top_target ()->supports_dumpcore ();
259 target_dumpcore (const char *filename
)
261 return current_inferior ()->top_target ()->dumpcore (filename
);
267 target_can_run_breakpoint_commands ()
269 return current_inferior ()->top_target ()->can_run_breakpoint_commands ();
277 return current_inferior ()->top_target ()->files_info ();
283 target_insert_fork_catchpoint (int pid
)
285 return current_inferior ()->top_target ()->insert_fork_catchpoint (pid
);
291 target_remove_fork_catchpoint (int pid
)
293 return current_inferior ()->top_target ()->remove_fork_catchpoint (pid
);
299 target_insert_vfork_catchpoint (int pid
)
301 return current_inferior ()->top_target ()->insert_vfork_catchpoint (pid
);
307 target_remove_vfork_catchpoint (int pid
)
309 return current_inferior ()->top_target ()->remove_vfork_catchpoint (pid
);
315 target_insert_exec_catchpoint (int pid
)
317 return current_inferior ()->top_target ()->insert_exec_catchpoint (pid
);
323 target_remove_exec_catchpoint (int pid
)
325 return current_inferior ()->top_target ()->remove_exec_catchpoint (pid
);
331 target_set_syscall_catchpoint (int pid
, bool needed
, int any_count
,
332 gdb::array_view
<const int> syscall_counts
)
334 target_ops
*target
= current_inferior ()->top_target ();
336 return target
->set_syscall_catchpoint (pid
, needed
, any_count
,
343 target_rcmd (const char *command
, struct ui_file
*outbuf
)
345 return current_inferior ()->top_target ()->rcmd (command
, outbuf
);
351 target_can_lock_scheduler ()
353 target_ops
*target
= current_inferior ()->top_target ();
355 return (target
->get_thread_control_capabilities ()& tc_schedlock
) != 0;
361 target_can_async_p ()
363 return target_can_async_p (current_inferior ()->top_target ());
369 target_can_async_p (struct target_ops
*target
)
371 if (!target_async_permitted
)
373 return target
->can_async_p ();
381 bool result
= current_inferior ()->top_target ()->is_async_p ();
382 gdb_assert (target_async_permitted
|| !result
);
387 target_execution_direction ()
389 return current_inferior ()->top_target ()->execution_direction ();
395 target_extra_thread_info (thread_info
*tp
)
397 return current_inferior ()->top_target ()->extra_thread_info (tp
);
403 target_pid_to_exec_file (int pid
)
405 return current_inferior ()->top_target ()->pid_to_exec_file (pid
);
411 target_thread_architecture (ptid_t ptid
)
413 return current_inferior ()->top_target ()->thread_architecture (ptid
);
419 target_find_memory_regions (find_memory_region_ftype func
, void *data
)
421 return current_inferior ()->top_target ()->find_memory_regions (func
, data
);
426 gdb::unique_xmalloc_ptr
<char>
427 target_make_corefile_notes (bfd
*bfd
, int *size_p
)
429 return current_inferior ()->top_target ()->make_corefile_notes (bfd
, size_p
);
433 target_get_bookmark (const char *args
, int from_tty
)
435 return current_inferior ()->top_target ()->get_bookmark (args
, from_tty
);
439 target_goto_bookmark (const gdb_byte
*arg
, int from_tty
)
441 return current_inferior ()->top_target ()->goto_bookmark (arg
, from_tty
);
447 target_stopped_by_watchpoint ()
449 return current_inferior ()->top_target ()->stopped_by_watchpoint ();
455 target_stopped_by_sw_breakpoint ()
457 return current_inferior ()->top_target ()->stopped_by_sw_breakpoint ();
461 target_supports_stopped_by_sw_breakpoint ()
463 target_ops
*target
= current_inferior ()->top_target ();
465 return target
->supports_stopped_by_sw_breakpoint ();
469 target_stopped_by_hw_breakpoint ()
471 return current_inferior ()->top_target ()->stopped_by_hw_breakpoint ();
475 target_supports_stopped_by_hw_breakpoint ()
477 target_ops
*target
= current_inferior ()->top_target ();
479 return target
->supports_stopped_by_hw_breakpoint ();
485 target_have_steppable_watchpoint ()
487 return current_inferior ()->top_target ()->have_steppable_watchpoint ();
493 target_can_use_hardware_watchpoint (bptype type
, int cnt
, int othertype
)
495 target_ops
*target
= current_inferior ()->top_target ();
497 return target
->can_use_hw_breakpoint (type
, cnt
, othertype
);
503 target_region_ok_for_hw_watchpoint (CORE_ADDR addr
, int len
)
505 target_ops
*target
= current_inferior ()->top_target ();
507 return target
->region_ok_for_hw_watchpoint (addr
, len
);
512 target_can_do_single_step ()
514 return current_inferior ()->top_target ()->can_do_single_step ();
520 target_insert_watchpoint (CORE_ADDR addr
, int len
, target_hw_bp_type type
,
523 target_ops
*target
= current_inferior ()->top_target ();
525 return target
->insert_watchpoint (addr
, len
, type
, cond
);
531 target_remove_watchpoint (CORE_ADDR addr
, int len
, target_hw_bp_type type
,
534 target_ops
*target
= current_inferior ()->top_target ();
536 return target
->remove_watchpoint (addr
, len
, type
, cond
);
542 target_insert_hw_breakpoint (gdbarch
*gdbarch
, bp_target_info
*bp_tgt
)
544 target_ops
*target
= current_inferior ()->top_target ();
546 return target
->insert_hw_breakpoint (gdbarch
, bp_tgt
);
552 target_remove_hw_breakpoint (gdbarch
*gdbarch
, bp_target_info
*bp_tgt
)
554 target_ops
*target
= current_inferior ()->top_target ();
556 return target
->remove_hw_breakpoint (gdbarch
, bp_tgt
);
562 target_can_accel_watchpoint_condition (CORE_ADDR addr
, int len
, int type
,
565 target_ops
*target
= current_inferior ()->top_target ();
567 return target
->can_accel_watchpoint_condition (addr
, len
, type
, cond
);
573 target_can_execute_reverse ()
575 return current_inferior ()->top_target ()->can_execute_reverse ();
579 target_get_ada_task_ptid (long lwp
, ULONGEST tid
)
581 return current_inferior ()->top_target ()->get_ada_task_ptid (lwp
, tid
);
585 target_filesystem_is_local ()
587 return current_inferior ()->top_target ()->filesystem_is_local ();
593 return current_inferior ()->top_target ()->trace_init ();
597 target_download_tracepoint (bp_location
*location
)
599 return current_inferior ()->top_target ()->download_tracepoint (location
);
603 target_can_download_tracepoint ()
605 return current_inferior ()->top_target ()->can_download_tracepoint ();
609 target_download_trace_state_variable (const trace_state_variable
&tsv
)
611 target_ops
*target
= current_inferior ()->top_target ();
613 return target
->download_trace_state_variable (tsv
);
617 target_enable_tracepoint (bp_location
*loc
)
619 return current_inferior ()->top_target ()->enable_tracepoint (loc
);
623 target_disable_tracepoint (bp_location
*loc
)
625 return current_inferior ()->top_target ()->disable_tracepoint (loc
);
629 target_trace_start ()
631 return current_inferior ()->top_target ()->trace_start ();
635 target_trace_set_readonly_regions ()
637 return current_inferior ()->top_target ()->trace_set_readonly_regions ();
641 target_get_trace_status (trace_status
*ts
)
643 return current_inferior ()->top_target ()->get_trace_status (ts
);
647 target_get_tracepoint_status (tracepoint
*tp
, uploaded_tp
*utp
)
649 return current_inferior ()->top_target ()->get_tracepoint_status (tp
, utp
);
655 return current_inferior ()->top_target ()->trace_stop ();
659 target_trace_find (trace_find_type type
, int num
,
660 CORE_ADDR addr1
, CORE_ADDR addr2
, int *tpp
)
662 target_ops
*target
= current_inferior ()->top_target ();
664 return target
->trace_find (type
, num
, addr1
, addr2
, tpp
);
668 target_get_trace_state_variable_value (int tsv
, LONGEST
*val
)
670 target_ops
*target
= current_inferior ()->top_target ();
672 return target
->get_trace_state_variable_value (tsv
, val
);
676 target_save_trace_data (const char *filename
)
678 return current_inferior ()->top_target ()->save_trace_data (filename
);
682 target_upload_tracepoints (uploaded_tp
**utpp
)
684 return current_inferior ()->top_target ()->upload_tracepoints (utpp
);
688 target_upload_trace_state_variables (uploaded_tsv
**utsvp
)
690 target_ops
*target
= current_inferior ()->top_target ();
692 return target
->upload_trace_state_variables (utsvp
);
696 target_get_raw_trace_data (gdb_byte
*buf
, ULONGEST offset
, LONGEST len
)
698 target_ops
*target
= current_inferior ()->top_target ();
700 return target
->get_raw_trace_data (buf
, offset
, len
);
704 target_get_min_fast_tracepoint_insn_len ()
706 target_ops
*target
= current_inferior ()->top_target ();
708 return target
->get_min_fast_tracepoint_insn_len ();
712 target_set_disconnected_tracing (int val
)
714 return current_inferior ()->top_target ()->set_disconnected_tracing (val
);
718 target_set_circular_trace_buffer (int val
)
720 return current_inferior ()->top_target ()->set_circular_trace_buffer (val
);
724 target_set_trace_buffer_size (LONGEST val
)
726 return current_inferior ()->top_target ()->set_trace_buffer_size (val
);
730 target_set_trace_notes (const char *user
, const char *notes
,
731 const char *stopnotes
)
733 target_ops
*target
= current_inferior ()->top_target ();
735 return target
->set_trace_notes (user
, notes
, stopnotes
);
739 target_get_tib_address (ptid_t ptid
, CORE_ADDR
*addr
)
741 return current_inferior ()->top_target ()->get_tib_address (ptid
, addr
);
745 target_set_permissions ()
747 return current_inferior ()->top_target ()->set_permissions ();
751 target_static_tracepoint_marker_at (CORE_ADDR addr
,
752 static_tracepoint_marker
*marker
)
754 target_ops
*target
= current_inferior ()->top_target ();
756 return target
->static_tracepoint_marker_at (addr
, marker
);
759 std::vector
<static_tracepoint_marker
>
760 target_static_tracepoint_markers_by_strid (const char *marker_id
)
762 target_ops
*target
= current_inferior ()->top_target ();
764 return target
->static_tracepoint_markers_by_strid (marker_id
);
768 target_traceframe_info ()
770 return current_inferior ()->top_target ()->traceframe_info ();
774 target_use_agent (bool use
)
776 return current_inferior ()->top_target ()->use_agent (use
);
780 target_can_use_agent ()
782 return current_inferior ()->top_target ()->can_use_agent ();
786 target_augmented_libraries_svr4_read ()
788 return current_inferior ()->top_target ()->augmented_libraries_svr4_read ();
792 target_supports_memory_tagging ()
794 return current_inferior ()->top_target ()->supports_memory_tagging ();
798 target_fetch_memtags (CORE_ADDR address
, size_t len
, gdb::byte_vector
&tags
,
801 return current_inferior ()->top_target ()->fetch_memtags (address
, len
, tags
, type
);
805 target_store_memtags (CORE_ADDR address
, size_t len
,
806 const gdb::byte_vector
&tags
, int type
)
808 return current_inferior ()->top_target ()->store_memtags (address
, len
, tags
, type
);
812 target_is_address_tagged (gdbarch
*gdbarch
, CORE_ADDR address
)
814 return current_inferior ()->top_target ()->is_address_tagged (gdbarch
, address
);
818 target_fetch_x86_xsave_layout ()
820 return current_inferior ()->top_target ()->fetch_x86_xsave_layout ();
824 target_log_command (const char *p
)
826 return current_inferior ()->top_target ()->log_command (p
);
829 /* This is used to implement the various target commands. */
832 open_target (const char *args
, int from_tty
, struct cmd_list_element
*command
)
834 auto *ti
= static_cast<target_info
*> (command
->context ());
835 target_open_ftype
*func
= target_factories
[ti
];
837 target_debug_printf_nofunc ("-> %s->open (...)", ti
->shortname
);
838 func (args
, from_tty
);
839 target_debug_printf_nofunc ("<- %s->open (%s, %d)", ti
->shortname
, args
, from_tty
);
845 add_target (const target_info
&t
, target_open_ftype
*func
,
846 completer_ftype
*completer
)
848 struct cmd_list_element
*c
;
850 auto &func_slot
= target_factories
[&t
];
851 if (func_slot
!= nullptr)
852 internal_error (_("target already added (\"%s\")."), t
.shortname
);
855 if (targetlist
== NULL
)
856 add_basic_prefix_cmd ("target", class_run
, _("\
857 Connect to a target machine or process.\n\
858 The first argument is the type or protocol of the target machine.\n\
859 Remaining arguments are interpreted by the target protocol. For more\n\
860 information on the arguments for a particular protocol, type\n\
861 `help target ' followed by the protocol name."),
862 &targetlist
, 0, &cmdlist
);
863 c
= add_cmd (t
.shortname
, no_class
, t
.doc
, &targetlist
);
864 c
->set_context ((void *) &t
);
865 c
->func
= open_target
;
866 if (completer
!= NULL
)
867 set_cmd_completer (c
, completer
);
873 add_deprecated_target_alias (const target_info
&tinfo
, const char *alias
)
875 struct cmd_list_element
*c
;
877 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
879 c
= add_cmd (alias
, no_class
, tinfo
.doc
, &targetlist
);
880 c
->func
= open_target
;
881 c
->set_context ((void *) &tinfo
);
882 gdb::unique_xmalloc_ptr
<char> alt
883 = xstrprintf ("target %s", tinfo
.shortname
);
884 deprecate_cmd (c
, alt
.release ());
893 /* If the commit_resume_state of the to-be-killed-inferior's process stratum
894 is true, and this inferior is the last live inferior with resumed threads
895 of that target, then we want to leave commit_resume_state to false, as the
896 target won't have any resumed threads anymore. We achieve this with
897 this scoped_disable_commit_resumed. On construction, it will set the flag
898 to false. On destruction, it will only set it to true if there are resumed
900 scoped_disable_commit_resumed
disable ("killing");
901 current_inferior ()->top_target ()->kill ();
905 target_load (const char *arg
, int from_tty
)
907 target_dcache_invalidate (current_program_space
->aspace
);
908 current_inferior ()->top_target ()->load (arg
, from_tty
);
913 target_terminal_state
target_terminal::m_terminal_state
914 = target_terminal_state::is_ours
;
916 /* See target/target.h. */
919 target_terminal::init (void)
921 current_inferior ()->top_target ()->terminal_init ();
923 m_terminal_state
= target_terminal_state::is_ours
;
926 /* See target/target.h. */
929 target_terminal::inferior (void)
931 struct ui
*ui
= current_ui
;
933 /* A background resume (``run&'') should leave GDB in control of the
935 if (ui
->prompt_state
!= PROMPT_BLOCKED
)
938 /* Since we always run the inferior in the main console (unless "set
939 inferior-tty" is in effect), when some UI other than the main one
940 calls target_terminal::inferior, then we leave the main UI's
941 terminal settings as is. */
945 /* If GDB is resuming the inferior in the foreground, install
946 inferior's terminal modes. */
948 struct inferior
*inf
= current_inferior ();
950 if (inf
->terminal_state
!= target_terminal_state::is_inferior
)
952 current_inferior ()->top_target ()->terminal_inferior ();
953 inf
->terminal_state
= target_terminal_state::is_inferior
;
956 m_terminal_state
= target_terminal_state::is_inferior
;
958 /* If the user hit C-c before, pretend that it was hit right
960 if (check_quit_flag ())
961 target_pass_ctrlc ();
964 /* See target/target.h. */
967 target_terminal::restore_inferior (void)
969 struct ui
*ui
= current_ui
;
971 /* See target_terminal::inferior(). */
972 if (ui
->prompt_state
!= PROMPT_BLOCKED
|| ui
!= main_ui
)
975 /* Restore the terminal settings of inferiors that were in the
976 foreground but are now ours_for_output due to a temporary
977 target_target::ours_for_output() call. */
980 scoped_restore_current_inferior restore_inferior
;
982 for (::inferior
*inf
: all_inferiors ())
984 if (inf
->terminal_state
== target_terminal_state::is_ours_for_output
)
986 set_current_inferior (inf
);
987 current_inferior ()->top_target ()->terminal_inferior ();
988 inf
->terminal_state
= target_terminal_state::is_inferior
;
993 m_terminal_state
= target_terminal_state::is_inferior
;
995 /* If the user hit C-c before, pretend that it was hit right
997 if (check_quit_flag ())
998 target_pass_ctrlc ();
1001 /* Switch terminal state to DESIRED_STATE, either is_ours, or
1002 is_ours_for_output. */
1005 target_terminal_is_ours_kind (target_terminal_state desired_state
)
1007 scoped_restore_current_inferior restore_inferior
;
1009 /* Must do this in two passes. First, have all inferiors save the
1010 current terminal settings. Then, after all inferiors have add a
1011 chance to safely save the terminal settings, restore GDB's
1012 terminal settings. */
1014 for (inferior
*inf
: all_inferiors ())
1016 if (inf
->terminal_state
== target_terminal_state::is_inferior
)
1018 set_current_inferior (inf
);
1019 current_inferior ()->top_target ()->terminal_save_inferior ();
1023 for (inferior
*inf
: all_inferiors ())
1025 /* Note we don't check is_inferior here like above because we
1026 need to handle 'is_ours_for_output -> is_ours' too. Careful
1027 to never transition from 'is_ours' to 'is_ours_for_output',
1029 if (inf
->terminal_state
!= target_terminal_state::is_ours
1030 && inf
->terminal_state
!= desired_state
)
1032 set_current_inferior (inf
);
1033 if (desired_state
== target_terminal_state::is_ours
)
1034 current_inferior ()->top_target ()->terminal_ours ();
1035 else if (desired_state
== target_terminal_state::is_ours_for_output
)
1036 current_inferior ()->top_target ()->terminal_ours_for_output ();
1038 gdb_assert_not_reached ("unhandled desired state");
1039 inf
->terminal_state
= desired_state
;
1044 /* See target/target.h. */
1047 target_terminal::ours ()
1049 struct ui
*ui
= current_ui
;
1051 /* See target_terminal::inferior. */
1055 if (m_terminal_state
== target_terminal_state::is_ours
)
1058 target_terminal_is_ours_kind (target_terminal_state::is_ours
);
1059 m_terminal_state
= target_terminal_state::is_ours
;
1062 /* See target/target.h. */
1065 target_terminal::ours_for_output ()
1067 struct ui
*ui
= current_ui
;
1069 /* See target_terminal::inferior. */
1073 if (!target_terminal::is_inferior ())
1076 target_terminal_is_ours_kind (target_terminal_state::is_ours_for_output
);
1077 target_terminal::m_terminal_state
= target_terminal_state::is_ours_for_output
;
1080 /* See target/target.h. */
1083 target_terminal::info (const char *arg
, int from_tty
)
1085 current_inferior ()->top_target ()->terminal_info (arg
, from_tty
);
1091 target_supports_terminal_ours (void)
1093 /* The current top target is the target at the top of the target
1094 stack of the current inferior. While normally there's always an
1095 inferior, we must check for nullptr here because we can get here
1096 very early during startup, before the initial inferior is first
1098 inferior
*inf
= current_inferior ();
1102 return inf
->top_target ()->supports_terminal_ours ();
1108 error (_("You can't do that when your target is `%s'"),
1109 current_inferior ()->top_target ()->shortname ());
1115 error (_("You can't do that without a process to debug."));
1119 default_terminal_info (struct target_ops
*self
, const char *args
, int from_tty
)
1121 gdb_printf (_("No saved terminal information.\n"));
1124 /* A default implementation for the to_get_ada_task_ptid target method.
1126 This function builds the PTID by using both LWP and TID as part of
1127 the PTID lwp and tid elements. The pid used is the pid of the
1131 default_get_ada_task_ptid (struct target_ops
*self
, long lwp
, ULONGEST tid
)
1133 return ptid_t (inferior_ptid
.pid (), lwp
, tid
);
1136 static enum exec_direction_kind
1137 default_execution_direction (struct target_ops
*self
)
1139 if (!target_can_execute_reverse ())
1140 return EXEC_FORWARD
;
1141 else if (!target_can_async_p ())
1142 return EXEC_FORWARD
;
1144 gdb_assert_not_reached ("\
1145 to_execution_direction must be implemented for reverse async");
1151 target_ops_ref_policy::decref (target_ops
*t
)
1154 if (t
->refcount () == 0)
1156 if (t
->stratum () == process_stratum
)
1157 connection_list_remove (as_process_stratum_target (t
));
1159 for (inferior
*inf
: all_inferiors ())
1160 gdb_assert (!inf
->target_is_pushed (t
));
1162 fileio_handles_invalidate_target (t
);
1166 target_debug_printf_nofunc ("closing target");
1173 target_stack::push (target_ops
*t
)
1175 /* We must create a new reference first. It is possible that T is
1176 already pushed on this target stack, in which case we will first
1177 unpush it below, before re-pushing it. If we don't increment the
1178 reference count now, then when we unpush it, we might end up deleting
1179 T, which is not good. */
1180 auto ref
= target_ops_ref::new_reference (t
);
1182 strata stratum
= t
->stratum ();
1184 /* If there's already a target at this stratum, remove it. */
1186 if (m_stack
[stratum
].get () != nullptr)
1187 unpush (m_stack
[stratum
].get ());
1189 /* Now add the new one. */
1190 m_stack
[stratum
] = std::move (ref
);
1192 if (m_top
< stratum
)
1195 if (stratum
== process_stratum
)
1196 connection_list_add (as_process_stratum_target (t
));
1202 target_stack::unpush (target_ops
*t
)
1204 gdb_assert (t
!= NULL
);
1206 strata stratum
= t
->stratum ();
1208 if (stratum
== dummy_stratum
)
1209 internal_error (_("Attempt to unpush the dummy target"));
1211 /* Look for the specified target. Note that a target can only occur
1212 once in the target stack. */
1214 if (m_stack
[stratum
] != t
)
1216 /* If T wasn't pushed, quit. Only open targets should be
1221 if (m_top
== stratum
)
1222 m_top
= this->find_beneath (t
)->stratum ();
1224 /* Move the target reference off the target stack, this sets the pointer
1225 held in m_stack to nullptr, and places the reference in ref. When
1226 ref goes out of scope its reference count will be decremented, which
1227 might cause the target to close.
1229 We have to do it this way, and not just set the value in m_stack to
1230 nullptr directly, because doing so would decrement the reference
1231 count first, which might close the target, and closing the target
1232 does a check that the target is not on any inferiors target_stack. */
1233 auto ref
= std::move (m_stack
[stratum
]);
1239 target_unpusher::operator() (struct target_ops
*ops
) const
1241 current_inferior ()->unpush_target (ops
);
1244 /* Default implementation of to_get_thread_local_address. */
1247 generic_tls_error (void)
1249 throw_error (TLS_GENERIC_ERROR
,
1250 _("Cannot find thread-local variables on this target"));
1253 /* Using the objfile specified in OBJFILE, find the address for the
1254 current thread's thread-local storage with offset OFFSET. */
1256 target_translate_tls_address (struct objfile
*objfile
, CORE_ADDR offset
)
1258 volatile CORE_ADDR addr
= 0;
1259 struct target_ops
*target
= current_inferior ()->top_target ();
1260 gdbarch
*gdbarch
= current_inferior ()->arch ();
1262 /* If OBJFILE is a separate debug object file, look for the
1263 original object file. */
1264 if (objfile
->separate_debug_objfile_backlink
!= NULL
)
1265 objfile
= objfile
->separate_debug_objfile_backlink
;
1267 if (gdbarch_fetch_tls_load_module_address_p (gdbarch
))
1269 ptid_t ptid
= inferior_ptid
;
1275 /* Fetch the load module address for this objfile. */
1276 lm_addr
= gdbarch_fetch_tls_load_module_address (gdbarch
,
1279 if (gdbarch_get_thread_local_address_p (gdbarch
))
1280 addr
= gdbarch_get_thread_local_address (gdbarch
, ptid
, lm_addr
,
1283 addr
= target
->get_thread_local_address (ptid
, lm_addr
, offset
);
1285 /* If an error occurred, print TLS related messages here. Otherwise,
1286 throw the error to some higher catcher. */
1287 catch (const gdb_exception
&ex
)
1289 int objfile_is_library
= (objfile
->flags
& OBJF_SHARED
);
1293 case TLS_NO_LIBRARY_SUPPORT_ERROR
:
1294 error (_("Cannot find thread-local variables "
1295 "in this thread library."));
1297 case TLS_LOAD_MODULE_NOT_FOUND_ERROR
:
1298 if (objfile_is_library
)
1299 error (_("Cannot find shared library `%s' in dynamic"
1300 " linker's load module list"), objfile_name (objfile
));
1302 error (_("Cannot find executable file `%s' in dynamic"
1303 " linker's load module list"), objfile_name (objfile
));
1305 case TLS_NOT_ALLOCATED_YET_ERROR
:
1306 if (objfile_is_library
)
1307 error (_("The inferior has not yet allocated storage for"
1308 " thread-local variables in\n"
1309 "the shared library `%s'\n"
1311 objfile_name (objfile
),
1312 target_pid_to_str (ptid
).c_str ());
1314 error (_("The inferior has not yet allocated storage for"
1315 " thread-local variables in\n"
1316 "the executable `%s'\n"
1318 objfile_name (objfile
),
1319 target_pid_to_str (ptid
).c_str ());
1321 case TLS_GENERIC_ERROR
:
1322 if (objfile_is_library
)
1323 error (_("Cannot find thread-local storage for %s, "
1324 "shared library %s:\n%s"),
1325 target_pid_to_str (ptid
).c_str (),
1326 objfile_name (objfile
), ex
.what ());
1328 error (_("Cannot find thread-local storage for %s, "
1329 "executable file %s:\n%s"),
1330 target_pid_to_str (ptid
).c_str (),
1331 objfile_name (objfile
), ex
.what ());
1340 error (_("Cannot find thread-local variables on this target"));
1346 target_xfer_status_to_string (enum target_xfer_status status
)
1348 #define CASE(X) case X: return #X
1351 CASE(TARGET_XFER_E_IO
);
1352 CASE(TARGET_XFER_UNAVAILABLE
);
1360 const std::vector
<target_section
> *
1361 target_get_section_table (struct target_ops
*target
)
1363 return target
->get_section_table ();
1366 /* Find a section containing ADDR. */
1368 const struct target_section
*
1369 target_section_by_addr (struct target_ops
*target
, CORE_ADDR addr
)
1371 const std::vector
<target_section
> *table
= target_get_section_table (target
);
1376 for (const target_section
&secp
: *table
)
1378 if (addr
>= secp
.addr
&& addr
< secp
.endaddr
)
1386 const std::vector
<target_section
> *
1387 default_get_section_table ()
1389 return ¤t_program_space
->target_sections ();
1392 /* Helper for the memory xfer routines. Checks the attributes of the
1393 memory region of MEMADDR against the read or write being attempted.
1394 If the access is permitted returns true, otherwise returns false.
1395 REGION_P is an optional output parameter. If not-NULL, it is
1396 filled with a pointer to the memory region of MEMADDR. REG_LEN
1397 returns LEN trimmed to the end of the region. This is how much the
1398 caller can continue requesting, if the access is permitted. A
1399 single xfer request must not straddle memory region boundaries. */
1402 memory_xfer_check_region (gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1403 ULONGEST memaddr
, ULONGEST len
, ULONGEST
*reg_len
,
1404 struct mem_region
**region_p
)
1406 struct mem_region
*region
;
1408 region
= lookup_mem_region (memaddr
);
1410 if (region_p
!= NULL
)
1413 switch (region
->attrib
.mode
)
1416 if (writebuf
!= NULL
)
1421 if (readbuf
!= NULL
)
1426 /* We only support writing to flash during "load" for now. */
1427 if (writebuf
!= NULL
)
1428 error (_("Writing to flash memory forbidden in this context"));
1435 /* region->hi == 0 means there's no upper bound. */
1436 if (memaddr
+ len
< region
->hi
|| region
->hi
== 0)
1439 *reg_len
= region
->hi
- memaddr
;
1444 /* Read memory from more than one valid target. A core file, for
1445 instance, could have some of memory but delegate other bits to
1446 the target below it. So, we must manually try all targets. */
1448 enum target_xfer_status
1449 raw_memory_xfer_partial (struct target_ops
*ops
, gdb_byte
*readbuf
,
1450 const gdb_byte
*writebuf
, ULONGEST memaddr
, LONGEST len
,
1451 ULONGEST
*xfered_len
)
1453 enum target_xfer_status res
;
1457 res
= ops
->xfer_partial (TARGET_OBJECT_MEMORY
, NULL
,
1458 readbuf
, writebuf
, memaddr
, len
,
1460 if (res
== TARGET_XFER_OK
)
1463 /* Stop if the target reports that the memory is not available. */
1464 if (res
== TARGET_XFER_UNAVAILABLE
)
1467 /* Don't continue past targets which have all the memory.
1468 At one time, this code was necessary to read data from
1469 executables / shared libraries when data for the requested
1470 addresses weren't available in the core file. But now the
1471 core target handles this case itself. */
1472 if (ops
->has_all_memory ())
1475 ops
= ops
->beneath ();
1477 while (ops
!= NULL
);
1479 /* The cache works at the raw memory level. Make sure the cache
1480 gets updated with raw contents no matter what kind of memory
1481 object was originally being written. Note we do write-through
1482 first, so that if it fails, we don't write to the cache contents
1483 that never made it to the target. */
1484 if (writebuf
!= NULL
1485 && inferior_ptid
!= null_ptid
1486 && target_dcache_init_p (current_program_space
->aspace
)
1487 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
1489 DCACHE
*dcache
= target_dcache_get (current_program_space
->aspace
);
1491 /* Note that writing to an area of memory which wasn't present
1492 in the cache doesn't cause it to be loaded in. */
1493 dcache_update (dcache
, res
, memaddr
, writebuf
, *xfered_len
);
1499 /* Perform a partial memory transfer.
1500 For docs see target.h, to_xfer_partial. */
1502 static enum target_xfer_status
1503 memory_xfer_partial_1 (struct target_ops
*ops
, enum target_object object
,
1504 gdb_byte
*readbuf
, const gdb_byte
*writebuf
, ULONGEST memaddr
,
1505 ULONGEST len
, ULONGEST
*xfered_len
)
1507 enum target_xfer_status res
;
1509 struct mem_region
*region
;
1510 struct inferior
*inf
;
1512 /* For accesses to unmapped overlay sections, read directly from
1513 files. Must do this first, as MEMADDR may need adjustment. */
1514 if (readbuf
!= NULL
&& overlay_debugging
)
1516 struct obj_section
*section
= find_pc_overlay (memaddr
);
1518 if (pc_in_unmapped_range (memaddr
, section
))
1520 const std::vector
<target_section
> *table
= target_get_section_table (ops
);
1521 const char *section_name
= section
->the_bfd_section
->name
;
1523 memaddr
= overlay_mapped_address (memaddr
, section
);
1525 auto match_cb
= [=] (const struct target_section
*s
)
1527 return (strcmp (section_name
, s
->the_bfd_section
->name
) == 0);
1530 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1531 memaddr
, len
, xfered_len
,
1536 /* Try the executable files, if "trust-readonly-sections" is set. */
1537 if (readbuf
!= NULL
&& trust_readonly
)
1539 const struct target_section
*secp
1540 = target_section_by_addr (ops
, memaddr
);
1542 && (bfd_section_flags (secp
->the_bfd_section
) & SEC_READONLY
))
1544 const std::vector
<target_section
> *table
= target_get_section_table (ops
);
1545 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1546 memaddr
, len
, xfered_len
,
1551 /* Try GDB's internal data cache. */
1553 if (!memory_xfer_check_region (readbuf
, writebuf
, memaddr
, len
, ®_len
,
1555 return TARGET_XFER_E_IO
;
1557 if (inferior_ptid
!= null_ptid
)
1558 inf
= current_inferior ();
1564 /* The dcache reads whole cache lines; that doesn't play well
1565 with reading from a trace buffer, because reading outside of
1566 the collected memory range fails. */
1567 && get_traceframe_number () == -1
1568 && (region
->attrib
.cache
1569 || (stack_cache_enabled_p () && object
== TARGET_OBJECT_STACK_MEMORY
)
1570 || (code_cache_enabled_p () && object
== TARGET_OBJECT_CODE_MEMORY
)))
1573 = target_dcache_get_or_init (current_program_space
->aspace
);
1575 return dcache_read_memory_partial (ops
, dcache
, memaddr
, readbuf
,
1576 reg_len
, xfered_len
);
1579 /* If none of those methods found the memory we wanted, fall back
1580 to a target partial transfer. Normally a single call to
1581 to_xfer_partial is enough; if it doesn't recognize an object
1582 it will call the to_xfer_partial of the next target down.
1583 But for memory this won't do. Memory is the only target
1584 object which can be read from more than one valid target.
1585 A core file, for instance, could have some of memory but
1586 delegate other bits to the target below it. So, we must
1587 manually try all targets. */
1589 res
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, memaddr
, reg_len
,
1592 /* If we still haven't got anything, return the last error. We
1597 /* Perform a partial memory transfer. For docs see target.h,
1600 static enum target_xfer_status
1601 memory_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1602 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1603 ULONGEST memaddr
, ULONGEST len
, ULONGEST
*xfered_len
)
1605 enum target_xfer_status res
;
1607 /* Zero length requests are ok and require no work. */
1609 return TARGET_XFER_EOF
;
1611 memaddr
= gdbarch_remove_non_address_bits (current_inferior ()->arch (),
1614 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1615 breakpoint insns, thus hiding out from higher layers whether
1616 there are software breakpoints inserted in the code stream. */
1617 if (readbuf
!= NULL
)
1619 res
= memory_xfer_partial_1 (ops
, object
, readbuf
, NULL
, memaddr
, len
,
1622 if (res
== TARGET_XFER_OK
&& !show_memory_breakpoints
)
1623 breakpoint_xfer_memory (readbuf
, NULL
, NULL
, memaddr
, *xfered_len
);
1627 /* A large write request is likely to be partially satisfied
1628 by memory_xfer_partial_1. We will continually malloc
1629 and free a copy of the entire write request for breakpoint
1630 shadow handling even though we only end up writing a small
1631 subset of it. Cap writes to a limit specified by the target
1632 to mitigate this. */
1633 len
= std::min (ops
->get_memory_xfer_limit (), len
);
1635 gdb::byte_vector
buf (writebuf
, writebuf
+ len
);
1636 breakpoint_xfer_memory (NULL
, buf
.data (), writebuf
, memaddr
, len
);
1637 res
= memory_xfer_partial_1 (ops
, object
, NULL
, buf
.data (), memaddr
, len
,
1644 scoped_restore_tmpl
<int>
1645 make_scoped_restore_show_memory_breakpoints (int show
)
1647 return make_scoped_restore (&show_memory_breakpoints
, show
);
1650 /* For docs see target.h, to_xfer_partial. */
1652 enum target_xfer_status
1653 target_xfer_partial (struct target_ops
*ops
,
1654 enum target_object object
, const char *annex
,
1655 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1656 ULONGEST offset
, ULONGEST len
,
1657 ULONGEST
*xfered_len
)
1659 enum target_xfer_status retval
;
1661 /* Transfer is done when LEN is zero. */
1663 return TARGET_XFER_EOF
;
1665 if (writebuf
&& !may_write_memory
)
1666 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1667 core_addr_to_string_nz (offset
), plongest (len
));
1671 /* If this is a memory transfer, let the memory-specific code
1672 have a look at it instead. Memory transfers are more
1674 if (object
== TARGET_OBJECT_MEMORY
|| object
== TARGET_OBJECT_STACK_MEMORY
1675 || object
== TARGET_OBJECT_CODE_MEMORY
)
1676 retval
= memory_xfer_partial (ops
, object
, readbuf
,
1677 writebuf
, offset
, len
, xfered_len
);
1678 else if (object
== TARGET_OBJECT_RAW_MEMORY
)
1680 /* Skip/avoid accessing the target if the memory region
1681 attributes block the access. Check this here instead of in
1682 raw_memory_xfer_partial as otherwise we'd end up checking
1683 this twice in the case of the memory_xfer_partial path is
1684 taken; once before checking the dcache, and another in the
1685 tail call to raw_memory_xfer_partial. */
1686 if (!memory_xfer_check_region (readbuf
, writebuf
, offset
, len
, &len
,
1688 return TARGET_XFER_E_IO
;
1690 /* Request the normal memory object from other layers. */
1691 retval
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, offset
, len
,
1695 retval
= ops
->xfer_partial (object
, annex
, readbuf
,
1696 writebuf
, offset
, len
, xfered_len
);
1700 const unsigned char *myaddr
= NULL
;
1702 = string_printf ("%s:target_xfer_partial "
1703 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1704 ops
->shortname (), (int) object
,
1705 (annex
? annex
: "(null)"),
1706 host_address_to_string (readbuf
),
1707 host_address_to_string (writebuf
),
1708 core_addr_to_string_nz (offset
), pulongest (len
),
1709 retval
, pulongest (*xfered_len
));
1715 if (retval
== TARGET_XFER_OK
&& myaddr
!= NULL
)
1719 string_appendf (s
, ", bytes =");
1720 for (i
= 0; i
< *xfered_len
; i
++)
1722 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
1724 if (targetdebug
< 2 && i
> 0)
1726 string_appendf (s
, " ...");
1730 target_debug_printf_nofunc ("%s", s
.c_str ());
1734 string_appendf (s
, " %02x", myaddr
[i
] & 0xff);
1738 target_debug_printf_nofunc ("%s", s
.c_str ());
1741 /* Check implementations of to_xfer_partial update *XFERED_LEN
1742 properly. Do assertion after printing debug messages, so that we
1743 can find more clues on assertion failure from debugging messages. */
1744 if (retval
== TARGET_XFER_OK
|| retval
== TARGET_XFER_UNAVAILABLE
)
1745 gdb_assert (*xfered_len
> 0);
1750 /* Read LEN bytes of target memory at address MEMADDR, placing the
1751 results in GDB's memory at MYADDR. Returns either 0 for success or
1752 -1 if any error occurs.
1754 If an error occurs, no guarantee is made about the contents of the data at
1755 MYADDR. In particular, the caller should not depend upon partial reads
1756 filling the buffer with good data. There is no way for the caller to know
1757 how much good data might have been transfered anyway. Callers that can
1758 deal with partial reads should call target_read (which will retry until
1759 it makes no progress, and then return how much was transferred). */
1762 target_read_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1764 if (target_read (current_inferior ()->top_target (),
1765 TARGET_OBJECT_MEMORY
, NULL
,
1766 myaddr
, memaddr
, len
) == len
)
1772 /* See target/target.h. */
1775 target_read_uint32 (CORE_ADDR memaddr
, uint32_t *result
)
1780 r
= target_read_memory (memaddr
, buf
, sizeof buf
);
1783 *result
= extract_unsigned_integer
1785 gdbarch_byte_order (current_inferior ()->arch ()));
1789 /* Like target_read_memory, but specify explicitly that this is a read
1790 from the target's raw memory. That is, this read bypasses the
1791 dcache, breakpoint shadowing, etc. */
1794 target_read_raw_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1796 if (target_read (current_inferior ()->top_target (),
1797 TARGET_OBJECT_RAW_MEMORY
, NULL
,
1798 myaddr
, memaddr
, len
) == len
)
1804 /* Like target_read_memory, but specify explicitly that this is a read from
1805 the target's stack. This may trigger different cache behavior. */
1808 target_read_stack (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1810 if (target_read (current_inferior ()->top_target (),
1811 TARGET_OBJECT_STACK_MEMORY
, NULL
,
1812 myaddr
, memaddr
, len
) == len
)
1818 /* Like target_read_memory, but specify explicitly that this is a read from
1819 the target's code. This may trigger different cache behavior. */
1822 target_read_code (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1824 if (target_read (current_inferior ()->top_target (),
1825 TARGET_OBJECT_CODE_MEMORY
, NULL
,
1826 myaddr
, memaddr
, len
) == len
)
1832 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1833 Returns either 0 for success or -1 if any error occurs. If an
1834 error occurs, no guarantee is made about how much data got written.
1835 Callers that can deal with partial writes should call
1839 target_write_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1841 if (target_write (current_inferior ()->top_target (),
1842 TARGET_OBJECT_MEMORY
, NULL
,
1843 myaddr
, memaddr
, len
) == len
)
1849 /* Write LEN bytes from MYADDR to target raw memory at address
1850 MEMADDR. Returns either 0 for success or -1 if any error occurs.
1851 If an error occurs, no guarantee is made about how much data got
1852 written. Callers that can deal with partial writes should call
1856 target_write_raw_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1858 if (target_write (current_inferior ()->top_target (),
1859 TARGET_OBJECT_RAW_MEMORY
, NULL
,
1860 myaddr
, memaddr
, len
) == len
)
1866 /* Fetch the target's memory map. */
1868 std::vector
<mem_region
>
1869 target_memory_map (void)
1871 target_ops
*target
= current_inferior ()->top_target ();
1872 std::vector
<mem_region
> result
= target
->memory_map ();
1873 if (result
.empty ())
1876 std::sort (result
.begin (), result
.end ());
1878 /* Check that regions do not overlap. Simultaneously assign
1879 a numbering for the "mem" commands to use to refer to
1881 mem_region
*last_one
= NULL
;
1882 for (size_t ix
= 0; ix
< result
.size (); ix
++)
1884 mem_region
*this_one
= &result
[ix
];
1885 this_one
->number
= ix
;
1887 if (last_one
!= NULL
&& last_one
->hi
> this_one
->lo
)
1889 warning (_("Overlapping regions in memory map: ignoring"));
1890 return std::vector
<mem_region
> ();
1893 last_one
= this_one
;
1900 target_flash_erase (ULONGEST address
, LONGEST length
)
1902 current_inferior ()->top_target ()->flash_erase (address
, length
);
1906 target_flash_done (void)
1908 current_inferior ()->top_target ()->flash_done ();
1912 show_trust_readonly (struct ui_file
*file
, int from_tty
,
1913 struct cmd_list_element
*c
, const char *value
)
1916 _("Mode for reading from readonly sections is %s.\n"),
1920 /* Target vector read/write partial wrapper functions. */
1922 static enum target_xfer_status
1923 target_read_partial (struct target_ops
*ops
,
1924 enum target_object object
,
1925 const char *annex
, gdb_byte
*buf
,
1926 ULONGEST offset
, ULONGEST len
,
1927 ULONGEST
*xfered_len
)
1929 return target_xfer_partial (ops
, object
, annex
, buf
, NULL
, offset
, len
,
1933 static enum target_xfer_status
1934 target_write_partial (struct target_ops
*ops
,
1935 enum target_object object
,
1936 const char *annex
, const gdb_byte
*buf
,
1937 ULONGEST offset
, LONGEST len
, ULONGEST
*xfered_len
)
1939 return target_xfer_partial (ops
, object
, annex
, NULL
, buf
, offset
, len
,
1943 /* Wrappers to perform the full transfer. */
1945 /* For docs on target_read see target.h. */
1948 target_read (struct target_ops
*ops
,
1949 enum target_object object
,
1950 const char *annex
, gdb_byte
*buf
,
1951 ULONGEST offset
, LONGEST len
)
1953 LONGEST xfered_total
= 0;
1956 /* If we are reading from a memory object, find the length of an addressable
1957 unit for that architecture. */
1958 if (object
== TARGET_OBJECT_MEMORY
1959 || object
== TARGET_OBJECT_STACK_MEMORY
1960 || object
== TARGET_OBJECT_CODE_MEMORY
1961 || object
== TARGET_OBJECT_RAW_MEMORY
)
1962 unit_size
= gdbarch_addressable_memory_unit_size
1963 (current_inferior ()->arch ());
1965 while (xfered_total
< len
)
1967 ULONGEST xfered_partial
;
1968 enum target_xfer_status status
;
1970 status
= target_read_partial (ops
, object
, annex
,
1971 buf
+ xfered_total
* unit_size
,
1972 offset
+ xfered_total
, len
- xfered_total
,
1975 /* Call an observer, notifying them of the xfer progress? */
1976 if (status
== TARGET_XFER_EOF
)
1977 return xfered_total
;
1978 else if (status
== TARGET_XFER_OK
)
1980 xfered_total
+= xfered_partial
;
1984 return TARGET_XFER_E_IO
;
1990 /* Assuming that the entire [begin, end) range of memory cannot be
1991 read, try to read whatever subrange is possible to read.
1993 The function returns, in RESULT, either zero or one memory block.
1994 If there's a readable subrange at the beginning, it is completely
1995 read and returned. Any further readable subrange will not be read.
1996 Otherwise, if there's a readable subrange at the end, it will be
1997 completely read and returned. Any readable subranges before it
1998 (obviously, not starting at the beginning), will be ignored. In
1999 other cases -- either no readable subrange, or readable subrange(s)
2000 that is neither at the beginning, or end, nothing is returned.
2002 The purpose of this function is to handle a read across a boundary
2003 of accessible memory in a case when memory map is not available.
2004 The above restrictions are fine for this case, but will give
2005 incorrect results if the memory is 'patchy'. However, supporting
2006 'patchy' memory would require trying to read every single byte,
2007 and it seems unacceptable solution. Explicit memory map is
2008 recommended for this case -- and target_read_memory_robust will
2009 take care of reading multiple ranges then. */
2012 read_whatever_is_readable (struct target_ops
*ops
,
2013 const ULONGEST begin
, const ULONGEST end
,
2015 std::vector
<memory_read_result
> *result
)
2017 ULONGEST current_begin
= begin
;
2018 ULONGEST current_end
= end
;
2020 ULONGEST xfered_len
;
2022 /* If we previously failed to read 1 byte, nothing can be done here. */
2023 if (end
- begin
<= 1)
2026 gdb::unique_xmalloc_ptr
<gdb_byte
> buf ((gdb_byte
*) xmalloc (end
- begin
));
2028 /* Check that either first or the last byte is readable, and give up
2029 if not. This heuristic is meant to permit reading accessible memory
2030 at the boundary of accessible region. */
2031 if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2032 buf
.get (), begin
, 1, &xfered_len
) == TARGET_XFER_OK
)
2037 else if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2038 buf
.get () + (end
- begin
) - 1, end
- 1, 1,
2039 &xfered_len
) == TARGET_XFER_OK
)
2047 /* Loop invariant is that the [current_begin, current_end) was previously
2048 found to be not readable as a whole.
2050 Note loop condition -- if the range has 1 byte, we can't divide the range
2051 so there's no point trying further. */
2052 while (current_end
- current_begin
> 1)
2054 ULONGEST first_half_begin
, first_half_end
;
2055 ULONGEST second_half_begin
, second_half_end
;
2057 ULONGEST middle
= current_begin
+ (current_end
- current_begin
) / 2;
2061 first_half_begin
= current_begin
;
2062 first_half_end
= middle
;
2063 second_half_begin
= middle
;
2064 second_half_end
= current_end
;
2068 first_half_begin
= middle
;
2069 first_half_end
= current_end
;
2070 second_half_begin
= current_begin
;
2071 second_half_end
= middle
;
2074 xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2075 buf
.get () + (first_half_begin
- begin
) * unit_size
,
2077 first_half_end
- first_half_begin
);
2079 if (xfer
== first_half_end
- first_half_begin
)
2081 /* This half reads up fine. So, the error must be in the
2083 current_begin
= second_half_begin
;
2084 current_end
= second_half_end
;
2088 /* This half is not readable. Because we've tried one byte, we
2089 know some part of this half if actually readable. Go to the next
2090 iteration to divide again and try to read.
2092 We don't handle the other half, because this function only tries
2093 to read a single readable subrange. */
2094 current_begin
= first_half_begin
;
2095 current_end
= first_half_end
;
2101 /* The [begin, current_begin) range has been read. */
2102 result
->emplace_back (begin
, current_end
, std::move (buf
));
2106 /* The [current_end, end) range has been read. */
2107 LONGEST region_len
= end
- current_end
;
2109 gdb::unique_xmalloc_ptr
<gdb_byte
> data
2110 ((gdb_byte
*) xmalloc (region_len
* unit_size
));
2111 memcpy (data
.get (), buf
.get () + (current_end
- begin
) * unit_size
,
2112 region_len
* unit_size
);
2113 result
->emplace_back (current_end
, end
, std::move (data
));
2117 std::vector
<memory_read_result
>
2118 read_memory_robust (struct target_ops
*ops
,
2119 const ULONGEST offset
, const LONGEST len
)
2121 std::vector
<memory_read_result
> result
;
2123 = gdbarch_addressable_memory_unit_size (current_inferior ()->arch ());
2125 LONGEST xfered_total
= 0;
2126 while (xfered_total
< len
)
2128 struct mem_region
*region
= lookup_mem_region (offset
+ xfered_total
);
2131 /* If there is no explicit region, a fake one should be created. */
2132 gdb_assert (region
);
2134 if (region
->hi
== 0)
2135 region_len
= len
- xfered_total
;
2137 region_len
= region
->hi
- offset
;
2139 if (region
->attrib
.mode
== MEM_NONE
|| region
->attrib
.mode
== MEM_WO
)
2141 /* Cannot read this region. Note that we can end up here only
2142 if the region is explicitly marked inaccessible, or
2143 'inaccessible-by-default' is in effect. */
2144 xfered_total
+= region_len
;
2148 LONGEST to_read
= std::min (len
- xfered_total
, region_len
);
2149 gdb::unique_xmalloc_ptr
<gdb_byte
> buffer
2150 ((gdb_byte
*) xmalloc (to_read
* unit_size
));
2152 LONGEST xfered_partial
=
2153 target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
, buffer
.get (),
2154 offset
+ xfered_total
, to_read
);
2155 /* Call an observer, notifying them of the xfer progress? */
2156 if (xfered_partial
<= 0)
2158 /* Got an error reading full chunk. See if maybe we can read
2160 read_whatever_is_readable (ops
, offset
+ xfered_total
,
2161 offset
+ xfered_total
+ to_read
,
2162 unit_size
, &result
);
2163 xfered_total
+= to_read
;
2167 result
.emplace_back (offset
+ xfered_total
,
2168 offset
+ xfered_total
+ xfered_partial
,
2169 std::move (buffer
));
2170 xfered_total
+= xfered_partial
;
2180 /* An alternative to target_write with progress callbacks. */
2183 target_write_with_progress (struct target_ops
*ops
,
2184 enum target_object object
,
2185 const char *annex
, const gdb_byte
*buf
,
2186 ULONGEST offset
, LONGEST len
,
2187 void (*progress
) (ULONGEST
, void *), void *baton
)
2189 LONGEST xfered_total
= 0;
2192 /* If we are writing to a memory object, find the length of an addressable
2193 unit for that architecture. */
2194 if (object
== TARGET_OBJECT_MEMORY
2195 || object
== TARGET_OBJECT_STACK_MEMORY
2196 || object
== TARGET_OBJECT_CODE_MEMORY
2197 || object
== TARGET_OBJECT_RAW_MEMORY
)
2198 unit_size
= gdbarch_addressable_memory_unit_size
2199 (current_inferior ()->arch ());
2201 /* Give the progress callback a chance to set up. */
2203 (*progress
) (0, baton
);
2205 while (xfered_total
< len
)
2207 ULONGEST xfered_partial
;
2208 enum target_xfer_status status
;
2210 status
= target_write_partial (ops
, object
, annex
,
2211 buf
+ xfered_total
* unit_size
,
2212 offset
+ xfered_total
, len
- xfered_total
,
2215 if (status
!= TARGET_XFER_OK
)
2216 return status
== TARGET_XFER_EOF
? xfered_total
: TARGET_XFER_E_IO
;
2219 (*progress
) (xfered_partial
, baton
);
2221 xfered_total
+= xfered_partial
;
2227 /* For docs on target_write see target.h. */
2230 target_write (struct target_ops
*ops
,
2231 enum target_object object
,
2232 const char *annex
, const gdb_byte
*buf
,
2233 ULONGEST offset
, LONGEST len
)
2235 return target_write_with_progress (ops
, object
, annex
, buf
, offset
, len
,
2239 /* Help for target_read_alloc and target_read_stralloc. See their comments
2242 template <typename T
>
2243 std::optional
<gdb::def_vector
<T
>>
2244 target_read_alloc_1 (struct target_ops
*ops
, enum target_object object
,
2247 gdb::def_vector
<T
> buf
;
2249 const int chunk
= 4096;
2251 /* This function does not have a length parameter; it reads the
2252 entire OBJECT). Also, it doesn't support objects fetched partly
2253 from one target and partly from another (in a different stratum,
2254 e.g. a core file and an executable). Both reasons make it
2255 unsuitable for reading memory. */
2256 gdb_assert (object
!= TARGET_OBJECT_MEMORY
);
2258 /* Start by reading up to 4K at a time. The target will throttle
2259 this number down if necessary. */
2262 ULONGEST xfered_len
;
2263 enum target_xfer_status status
;
2265 buf
.resize (buf_pos
+ chunk
);
2267 status
= target_read_partial (ops
, object
, annex
,
2268 (gdb_byte
*) &buf
[buf_pos
],
2272 if (status
== TARGET_XFER_EOF
)
2274 /* Read all there was. */
2275 buf
.resize (buf_pos
);
2278 else if (status
!= TARGET_XFER_OK
)
2280 /* An error occurred. */
2284 buf_pos
+= xfered_len
;
2292 std::optional
<gdb::byte_vector
>
2293 target_read_alloc (struct target_ops
*ops
, enum target_object object
,
2296 return target_read_alloc_1
<gdb_byte
> (ops
, object
, annex
);
2301 std::optional
<gdb::char_vector
>
2302 target_read_stralloc (struct target_ops
*ops
, enum target_object object
,
2305 std::optional
<gdb::char_vector
> buf
2306 = target_read_alloc_1
<char> (ops
, object
, annex
);
2311 if (buf
->empty () || buf
->back () != '\0')
2312 buf
->push_back ('\0');
2314 /* Check for embedded NUL bytes; but allow trailing NULs. */
2315 for (auto it
= std::find (buf
->begin (), buf
->end (), '\0');
2316 it
!= buf
->end (); it
++)
2319 warning (_("target object %d, annex %s, "
2320 "contained unexpected null characters"),
2321 (int) object
, annex
? annex
: "(none)");
2328 /* Memory transfer methods. */
2331 get_target_memory (struct target_ops
*ops
, CORE_ADDR addr
, gdb_byte
*buf
,
2334 /* This method is used to read from an alternate, non-current
2335 target. This read must bypass the overlay support (as symbols
2336 don't match this target), and GDB's internal cache (wrong cache
2337 for this target). */
2338 if (target_read (ops
, TARGET_OBJECT_RAW_MEMORY
, NULL
, buf
, addr
, len
)
2340 memory_error (TARGET_XFER_E_IO
, addr
);
2344 get_target_memory_unsigned (struct target_ops
*ops
, CORE_ADDR addr
,
2345 int len
, enum bfd_endian byte_order
)
2347 gdb_byte buf
[sizeof (ULONGEST
)];
2349 gdb_assert (len
<= sizeof (buf
));
2350 get_target_memory (ops
, addr
, buf
, len
);
2351 return extract_unsigned_integer (buf
, len
, byte_order
);
2357 target_insert_breakpoint (struct gdbarch
*gdbarch
,
2358 struct bp_target_info
*bp_tgt
)
2360 if (!may_insert_breakpoints
)
2362 warning (_("May not insert breakpoints"));
2366 target_ops
*target
= current_inferior ()->top_target ();
2368 return target
->insert_breakpoint (gdbarch
, bp_tgt
);
2374 target_remove_breakpoint (struct gdbarch
*gdbarch
,
2375 struct bp_target_info
*bp_tgt
,
2376 enum remove_bp_reason reason
)
2378 /* This is kind of a weird case to handle, but the permission might
2379 have been changed after breakpoints were inserted - in which case
2380 we should just take the user literally and assume that any
2381 breakpoints should be left in place. */
2382 if (!may_insert_breakpoints
)
2384 warning (_("May not remove breakpoints"));
2388 target_ops
*target
= current_inferior ()->top_target ();
2390 return target
->remove_breakpoint (gdbarch
, bp_tgt
, reason
);
2394 info_target_command (const char *args
, int from_tty
)
2396 int has_all_mem
= 0;
2398 if (current_program_space
->symfile_object_file
!= NULL
)
2400 objfile
*objf
= current_program_space
->symfile_object_file
;
2401 gdb_printf (_("Symbols from \"%ps\".\n"),
2402 styled_string (file_name_style
.style (),
2403 objfile_name (objf
)));
2406 for (target_ops
*t
= current_inferior ()->top_target ();
2410 if (!t
->has_memory ())
2413 if ((int) (t
->stratum ()) <= (int) dummy_stratum
)
2416 gdb_printf (_("\tWhile running this, "
2417 "GDB does not access memory from...\n"));
2418 gdb_printf ("%s:\n", t
->longname ());
2420 has_all_mem
= t
->has_all_memory ();
2424 /* This function is called before any new inferior is created, e.g.
2425 by running a program, attaching, or connecting to a target.
2426 It cleans up any state from previous invocations which might
2427 change between runs. This is a subset of what target_preopen
2428 resets (things which might change between targets). */
2431 target_pre_inferior ()
2433 /* Clear out solib state. Otherwise the solib state of the previous
2434 inferior might have survived and is entirely wrong for the new
2435 target. This has been observed on GNU/Linux using glibc 2.3. How
2447 Cannot access memory at address 0xdeadbeef
2450 /* In some OSs, the shared library list is the same/global/shared
2451 across inferiors. If code is shared between processes, so are
2452 memory regions and features. */
2453 if (!gdbarch_has_global_solist (current_inferior ()->arch ()))
2455 no_shared_libraries (current_program_space
);
2457 invalidate_target_mem_regions ();
2459 target_clear_description ();
2462 /* attach_flag may be set if the previous process associated with
2463 the inferior was attached to. */
2464 current_inferior ()->attach_flag
= false;
2466 current_inferior ()->highest_thread_num
= 0;
2468 update_previous_thread ();
2470 agent_capability_invalidate ();
2473 /* This is to be called by the open routine before it does
2477 target_preopen (int from_tty
)
2481 if (current_inferior ()->pid
!= 0)
2484 || !target_has_execution ()
2485 || query (_("A program is being debugged already. Kill it? ")))
2487 /* Core inferiors actually should be detached, not
2489 if (target_has_execution ())
2492 target_detach (current_inferior (), 0);
2495 error (_("Program not killed."));
2498 /* Release reference to old previous thread. */
2499 update_previous_thread ();
2501 /* Calling target_kill may remove the target from the stack. But if
2502 it doesn't (which seems like a win for UDI), remove it now. */
2503 /* Leave the exec target, though. The user may be switching from a
2504 live process to a core of the same program. */
2505 current_inferior ()->pop_all_targets_above (file_stratum
);
2507 target_pre_inferior ();
2513 target_detach (inferior
*inf
, int from_tty
)
2515 /* Thread's don't need to be resumed until the end of this function. */
2516 scoped_disable_commit_resumed
disable_commit_resumed ("detaching");
2518 /* After we have detached, we will clear the register cache for this inferior
2519 by calling registers_changed_ptid. We must save the pid_ptid before
2520 detaching, as the target detach method will clear inf->pid. */
2521 ptid_t save_pid_ptid
= ptid_t (inf
->pid
);
2523 /* As long as some to_detach implementations rely on the current_inferior
2524 (either directly, or indirectly, like through reading memory), INF needs
2525 to be the current inferior. When that requirement will become no longer
2526 true, then we can remove this assertion. */
2527 gdb_assert (inf
== current_inferior ());
2529 prepare_for_detach ();
2531 gdb::observers::inferior_pre_detach
.notify (inf
);
2533 /* Hold a strong reference because detaching may unpush the
2535 auto proc_target_ref
= target_ops_ref::new_reference (inf
->process_target ());
2537 current_inferior ()->top_target ()->detach (inf
, from_tty
);
2539 process_stratum_target
*proc_target
2540 = as_process_stratum_target (proc_target_ref
.get ());
2542 registers_changed_ptid (proc_target
, save_pid_ptid
);
2544 /* We have to ensure we have no frame cache left. Normally,
2545 registers_changed_ptid (save_pid_ptid) calls reinit_frame_cache when
2546 inferior_ptid matches save_pid_ptid, but in our case, it does not
2547 call it, as inferior_ptid has been reset. */
2548 reinit_frame_cache ();
2550 disable_commit_resumed
.reset_and_commit ();
2554 target_disconnect (const char *args
, int from_tty
)
2556 /* If we're in breakpoints-always-inserted mode or if breakpoints
2557 are global across processes, we have to remove them before
2559 remove_breakpoints ();
2561 current_inferior ()->top_target ()->disconnect (args
, from_tty
);
2564 /* See target/target.h. */
2567 target_wait (ptid_t ptid
, struct target_waitstatus
*status
,
2568 target_wait_flags options
)
2570 target_ops
*target
= current_inferior ()->top_target ();
2571 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
2573 gdb_assert (!proc_target
->commit_resumed_state
);
2575 if (!target_can_async_p (target
))
2576 gdb_assert ((options
& TARGET_WNOHANG
) == 0);
2580 gdb::observers::target_pre_wait
.notify (ptid
);
2581 ptid_t event_ptid
= target
->wait (ptid
, status
, options
);
2582 gdb::observers::target_post_wait
.notify (event_ptid
);
2587 gdb::observers::target_post_wait
.notify (null_ptid
);
2595 default_target_wait (struct target_ops
*ops
,
2596 ptid_t ptid
, struct target_waitstatus
*status
,
2597 target_wait_flags options
)
2599 status
->set_ignore ();
2600 return minus_one_ptid
;
2604 target_pid_to_str (ptid_t ptid
)
2606 return current_inferior ()->top_target ()->pid_to_str (ptid
);
2610 target_thread_name (struct thread_info
*info
)
2612 gdb_assert (info
->inf
== current_inferior ());
2614 return current_inferior ()->top_target ()->thread_name (info
);
2617 struct thread_info
*
2618 target_thread_handle_to_thread_info (const gdb_byte
*thread_handle
,
2620 struct inferior
*inf
)
2622 target_ops
*target
= current_inferior ()->top_target ();
2624 return target
->thread_handle_to_thread_info (thread_handle
, handle_len
, inf
);
2629 gdb::array_view
<const gdb_byte
>
2630 target_thread_info_to_thread_handle (struct thread_info
*tip
)
2632 target_ops
*target
= current_inferior ()->top_target ();
2634 return target
->thread_info_to_thread_handle (tip
);
2638 target_resume (ptid_t scope_ptid
, int step
, enum gdb_signal signal
)
2640 process_stratum_target
*curr_target
= current_inferior ()->process_target ();
2641 gdb_assert (!curr_target
->commit_resumed_state
);
2643 gdb_assert (inferior_ptid
!= null_ptid
);
2644 gdb_assert (inferior_ptid
.matches (scope_ptid
));
2646 target_dcache_invalidate (current_program_space
->aspace
);
2648 current_inferior ()->top_target ()->resume (scope_ptid
, step
, signal
);
2650 registers_changed_ptid (curr_target
, scope_ptid
);
2651 /* We only set the internal executing state here. The user/frontend
2652 running state is set at a higher level. This also clears the
2653 thread's stop_pc as side effect. */
2654 set_executing (curr_target
, scope_ptid
, true);
2655 clear_inline_frame_state (curr_target
, scope_ptid
);
2657 if (target_can_async_p ())
2658 target_async (true);
2664 target_commit_resumed ()
2666 gdb_assert (current_inferior ()->process_target ()->commit_resumed_state
);
2667 current_inferior ()->top_target ()->commit_resumed ();
2673 target_has_pending_events ()
2675 return current_inferior ()->top_target ()->has_pending_events ();
2679 target_pass_signals (gdb::array_view
<const unsigned char> pass_signals
)
2681 current_inferior ()->top_target ()->pass_signals (pass_signals
);
2685 target_program_signals (gdb::array_view
<const unsigned char> program_signals
)
2687 current_inferior ()->top_target ()->program_signals (program_signals
);
2691 default_follow_fork (struct target_ops
*self
, inferior
*child_inf
,
2692 ptid_t child_ptid
, target_waitkind fork_kind
,
2693 bool follow_child
, bool detach_fork
)
2695 /* Some target returned a fork event, but did not know how to follow it. */
2696 internal_error (_("could not find a target to follow fork"));
2700 default_follow_clone (struct target_ops
*self
, ptid_t child_ptid
)
2702 /* Some target returned a clone event, but did not know how to follow it. */
2703 internal_error (_("could not find a target to follow clone"));
2709 target_follow_fork (inferior
*child_inf
, ptid_t child_ptid
,
2710 target_waitkind fork_kind
, bool follow_child
,
2713 target_ops
*target
= current_inferior ()->top_target ();
2715 /* Check consistency between CHILD_INF, CHILD_PTID, FOLLOW_CHILD and
2717 if (child_inf
!= nullptr)
2719 gdb_assert (follow_child
|| !detach_fork
);
2720 gdb_assert (child_inf
->pid
== child_ptid
.pid ());
2723 gdb_assert (!follow_child
&& detach_fork
);
2725 return target
->follow_fork (child_inf
, child_ptid
, fork_kind
, follow_child
,
2732 target_follow_exec (inferior
*follow_inf
, ptid_t ptid
,
2733 const char *execd_pathname
)
2735 current_inferior ()->top_target ()->follow_exec (follow_inf
, ptid
,
2740 default_mourn_inferior (struct target_ops
*self
)
2742 internal_error (_("could not find a target to follow mourn inferior"));
2746 target_mourn_inferior (ptid_t ptid
)
2748 gdb_assert (ptid
.pid () == inferior_ptid
.pid ());
2749 current_inferior ()->top_target ()->mourn_inferior ();
2752 /* Look for a target which can describe architectural features, starting
2753 from TARGET. If we find one, return its description. */
2755 const struct target_desc
*
2756 target_read_description (struct target_ops
*target
)
2758 return target
->read_description ();
2762 /* Default implementation of memory-searching. */
2765 default_search_memory (struct target_ops
*self
,
2766 CORE_ADDR start_addr
, ULONGEST search_space_len
,
2767 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2768 CORE_ADDR
*found_addrp
)
2770 auto read_memory
= [=] (CORE_ADDR addr
, gdb_byte
*result
, size_t len
)
2772 return target_read (current_inferior ()->top_target (),
2773 TARGET_OBJECT_MEMORY
, NULL
,
2774 result
, addr
, len
) == len
;
2777 /* Start over from the top of the target stack. */
2778 return simple_search_memory (read_memory
, start_addr
, search_space_len
,
2779 pattern
, pattern_len
, found_addrp
);
2782 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2783 sequence of bytes in PATTERN with length PATTERN_LEN.
2785 The result is 1 if found, 0 if not found, and -1 if there was an error
2786 requiring halting of the search (e.g. memory read error).
2787 If the pattern is found the address is recorded in FOUND_ADDRP. */
2790 target_search_memory (CORE_ADDR start_addr
, ULONGEST search_space_len
,
2791 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2792 CORE_ADDR
*found_addrp
)
2794 target_ops
*target
= current_inferior ()->top_target ();
2796 return target
->search_memory (start_addr
, search_space_len
, pattern
,
2797 pattern_len
, found_addrp
);
2800 /* Look through the currently pushed targets. If none of them will
2801 be able to restart the currently running process, issue an error
2805 target_require_runnable (void)
2807 for (target_ops
*t
= current_inferior ()->top_target ();
2811 /* If this target knows how to create a new program, then
2812 assume we will still be able to after killing the current
2813 one. Either killing and mourning will not pop T, or else
2814 find_default_run_target will find it again. */
2815 if (t
->can_create_inferior ())
2818 /* Do not worry about targets at certain strata that can not
2819 create inferiors. Assume they will be pushed again if
2820 necessary, and continue to the process_stratum. */
2821 if (t
->stratum () > process_stratum
)
2824 error (_("The \"%s\" target does not support \"run\". "
2825 "Try \"help target\" or \"continue\"."),
2829 /* This function is only called if the target is running. In that
2830 case there should have been a process_stratum target and it
2831 should either know how to create inferiors, or not... */
2832 internal_error (_("No targets found"));
2835 /* Whether GDB is allowed to fall back to the default run target for
2836 "run", "attach", etc. when no target is connected yet. */
2837 static bool auto_connect_native_target
= true;
2840 show_auto_connect_native_target (struct ui_file
*file
, int from_tty
,
2841 struct cmd_list_element
*c
, const char *value
)
2844 _("Whether GDB may automatically connect to the "
2845 "native target is %s.\n"),
2849 /* A pointer to the target that can respond to "run" or "attach".
2850 Native targets are always singletons and instantiated early at GDB
2852 static target_ops
*the_native_target
;
2857 set_native_target (target_ops
*target
)
2859 if (the_native_target
!= NULL
)
2860 internal_error (_("native target already set (\"%s\")."),
2861 the_native_target
->longname ());
2863 the_native_target
= target
;
2869 get_native_target ()
2871 return the_native_target
;
2874 /* Look through the list of possible targets for a target that can
2875 execute a run or attach command without any other data. This is
2876 used to locate the default process stratum.
2878 If DO_MESG is not NULL, the result is always valid (error() is
2879 called for errors); else, return NULL on error. */
2881 static struct target_ops
*
2882 find_default_run_target (const char *do_mesg
)
2884 if (auto_connect_native_target
&& the_native_target
!= NULL
)
2885 return the_native_target
;
2887 if (do_mesg
!= NULL
)
2888 error (_("Don't know how to %s. Try \"help target\"."), do_mesg
);
2895 find_attach_target (void)
2897 /* If a target on the current stack can attach, use it. */
2898 for (target_ops
*t
= current_inferior ()->top_target ();
2902 if (t
->can_attach ())
2906 /* Otherwise, use the default run target for attaching. */
2907 return find_default_run_target ("attach");
2913 find_run_target (void)
2915 /* If a target on the current stack can run, use it. */
2916 for (target_ops
*t
= current_inferior ()->top_target ();
2920 if (t
->can_create_inferior ())
2924 /* Otherwise, use the default run target. */
2925 return find_default_run_target ("run");
2929 target_ops::info_proc (const char *args
, enum info_proc_what what
)
2934 /* Implement the "info proc" command. */
2937 target_info_proc (const char *args
, enum info_proc_what what
)
2939 struct target_ops
*t
;
2941 /* If we're already connected to something that can get us OS
2942 related data, use it. Otherwise, try using the native
2944 t
= find_target_at (process_stratum
);
2946 t
= find_default_run_target (NULL
);
2948 for (; t
!= NULL
; t
= t
->beneath ())
2950 if (t
->info_proc (args
, what
))
2952 target_debug_printf_nofunc ("target_info_proc (\"%s\", %d)", args
, what
);
2961 find_default_supports_disable_randomization (struct target_ops
*self
)
2963 struct target_ops
*t
;
2965 t
= find_default_run_target (NULL
);
2967 return t
->supports_disable_randomization ();
2972 target_supports_disable_randomization (void)
2974 return current_inferior ()->top_target ()->supports_disable_randomization ();
2977 /* See target/target.h. */
2980 target_supports_multi_process (void)
2982 return current_inferior ()->top_target ()->supports_multi_process ();
2987 std::optional
<gdb::char_vector
>
2988 target_get_osdata (const char *type
)
2990 struct target_ops
*t
;
2992 /* If we're already connected to something that can get us OS
2993 related data, use it. Otherwise, try using the native
2995 t
= find_target_at (process_stratum
);
2997 t
= find_default_run_target ("get OS data");
3002 return target_read_stralloc (t
, TARGET_OBJECT_OSDATA
, type
);
3008 target_ops::beneath () const
3010 return current_inferior ()->find_target_beneath (this);
3014 target_ops::close ()
3019 target_ops::can_attach ()
3025 target_ops::attach (const char *, int)
3027 gdb_assert_not_reached ("target_ops::attach called");
3031 target_ops::can_create_inferior ()
3037 target_ops::create_inferior (const char *, const std::string
&,
3040 gdb_assert_not_reached ("target_ops::create_inferior called");
3044 target_ops::can_run ()
3052 for (target_ops
*t
= current_inferior ()->top_target ();
3063 /* Target file operations. */
3065 static struct target_ops
*
3066 default_fileio_target (void)
3068 struct target_ops
*t
;
3070 /* If we're already connected to something that can perform
3071 file I/O, use it. Otherwise, try using the native target. */
3072 t
= find_target_at (process_stratum
);
3075 return find_default_run_target ("file I/O");
3078 /* File handle for target file operations. */
3082 /* The target on which this file is open. NULL if the target is
3083 meanwhile closed while the handle is open. */
3086 /* The file descriptor on the target. */
3089 /* Check whether this fileio_fh_t represents a closed file. */
3092 return target_fd
< 0;
3096 /* Vector of currently open file handles. The value returned by
3097 target_fileio_open and passed as the FD argument to other
3098 target_fileio_* functions is an index into this vector. This
3099 vector's entries are never freed; instead, files are marked as
3100 closed, and the handle becomes available for reuse. */
3101 static std::vector
<fileio_fh_t
> fileio_fhandles
;
3103 /* Index into fileio_fhandles of the lowest handle that might be
3104 closed. This permits handle reuse without searching the whole
3105 list each time a new file is opened. */
3106 static int lowest_closed_fd
;
3111 fileio_handles_invalidate_target (target_ops
*targ
)
3113 for (fileio_fh_t
&fh
: fileio_fhandles
)
3114 if (fh
.target
== targ
)
3118 /* Acquire a target fileio file descriptor. */
3121 acquire_fileio_fd (target_ops
*target
, int target_fd
)
3123 /* Search for closed handles to reuse. */
3124 for (; lowest_closed_fd
< fileio_fhandles
.size (); lowest_closed_fd
++)
3126 fileio_fh_t
&fh
= fileio_fhandles
[lowest_closed_fd
];
3128 if (fh
.is_closed ())
3132 /* Push a new handle if no closed handles were found. */
3133 if (lowest_closed_fd
== fileio_fhandles
.size ())
3134 fileio_fhandles
.push_back (fileio_fh_t
{target
, target_fd
});
3136 fileio_fhandles
[lowest_closed_fd
] = {target
, target_fd
};
3138 /* Should no longer be marked closed. */
3139 gdb_assert (!fileio_fhandles
[lowest_closed_fd
].is_closed ());
3141 /* Return its index, and start the next lookup at
3143 return lowest_closed_fd
++;
3146 /* Release a target fileio file descriptor. */
3149 release_fileio_fd (int fd
, fileio_fh_t
*fh
)
3152 lowest_closed_fd
= std::min (lowest_closed_fd
, fd
);
3155 /* Return a pointer to the fileio_fhandle_t corresponding to FD. */
3157 static fileio_fh_t
*
3158 fileio_fd_to_fh (int fd
)
3160 return &fileio_fhandles
[fd
];
3164 /* Default implementations of file i/o methods. We don't want these
3165 to delegate automatically, because we need to know which target
3166 supported the method, in order to call it directly from within
3167 pread/pwrite, etc. */
3170 target_ops::fileio_open (struct inferior
*inf
, const char *filename
,
3171 int flags
, int mode
, int warn_if_slow
,
3172 fileio_error
*target_errno
)
3174 *target_errno
= FILEIO_ENOSYS
;
3179 target_ops::fileio_pwrite (int fd
, const gdb_byte
*write_buf
, int len
,
3180 ULONGEST offset
, fileio_error
*target_errno
)
3182 *target_errno
= FILEIO_ENOSYS
;
3187 target_ops::fileio_pread (int fd
, gdb_byte
*read_buf
, int len
,
3188 ULONGEST offset
, fileio_error
*target_errno
)
3190 *target_errno
= FILEIO_ENOSYS
;
3195 target_ops::fileio_fstat (int fd
, struct stat
*sb
, fileio_error
*target_errno
)
3197 *target_errno
= FILEIO_ENOSYS
;
3202 target_ops::fileio_stat (struct inferior
*inf
, const char *filename
,
3203 struct stat
*sb
, fileio_error
*target_errno
)
3205 *target_errno
= FILEIO_ENOSYS
;
3210 target_ops::fileio_close (int fd
, fileio_error
*target_errno
)
3212 *target_errno
= FILEIO_ENOSYS
;
3217 target_ops::fileio_unlink (struct inferior
*inf
, const char *filename
,
3218 fileio_error
*target_errno
)
3220 *target_errno
= FILEIO_ENOSYS
;
3224 std::optional
<std::string
>
3225 target_ops::fileio_readlink (struct inferior
*inf
, const char *filename
,
3226 fileio_error
*target_errno
)
3228 *target_errno
= FILEIO_ENOSYS
;
3235 target_fileio_open (struct inferior
*inf
, const char *filename
,
3236 int flags
, int mode
, bool warn_if_slow
, fileio_error
*target_errno
)
3238 for (target_ops
*t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath ())
3240 int fd
= t
->fileio_open (inf
, filename
, flags
, mode
,
3241 warn_if_slow
, target_errno
);
3243 if (fd
== -1 && *target_errno
== FILEIO_ENOSYS
)
3249 fd
= acquire_fileio_fd (t
, fd
);
3251 target_debug_printf_nofunc ("target_fileio_open (%d,%s,0x%x,0%o,%d) = %d (%d)",
3252 inf
== NULL
? 0 : inf
->num
, filename
, flags
, mode
,
3253 warn_if_slow
, fd
, fd
!= -1 ? 0 : *target_errno
);
3257 *target_errno
= FILEIO_ENOSYS
;
3264 target_fileio_pwrite (int fd
, const gdb_byte
*write_buf
, int len
,
3265 ULONGEST offset
, fileio_error
*target_errno
)
3267 fileio_fh_t
*fh
= fileio_fd_to_fh (fd
);
3270 if (fh
->is_closed ())
3271 *target_errno
= FILEIO_EBADF
;
3272 else if (fh
->target
== NULL
)
3273 *target_errno
= FILEIO_EIO
;
3275 ret
= fh
->target
->fileio_pwrite (fh
->target_fd
, write_buf
,
3276 len
, offset
, target_errno
);
3278 target_debug_printf_nofunc ("target_fileio_pwrite (%d,...,%d,%s) = %d (%d)", fd
,
3279 len
, pulongest (offset
), ret
,
3280 ret
!= -1 ? 0 : *target_errno
);
3287 target_fileio_pread (int fd
, gdb_byte
*read_buf
, int len
,
3288 ULONGEST offset
, fileio_error
*target_errno
)
3290 fileio_fh_t
*fh
= fileio_fd_to_fh (fd
);
3293 if (fh
->is_closed ())
3294 *target_errno
= FILEIO_EBADF
;
3295 else if (fh
->target
== NULL
)
3296 *target_errno
= FILEIO_EIO
;
3298 ret
= fh
->target
->fileio_pread (fh
->target_fd
, read_buf
,
3299 len
, offset
, target_errno
);
3301 target_debug_printf_nofunc ("target_fileio_pread (%d,...,%d,%s) = %d (%d)", fd
, len
,
3302 pulongest (offset
), ret
, ret
!= -1 ? 0 : *target_errno
);
3309 target_fileio_fstat (int fd
, struct stat
*sb
, fileio_error
*target_errno
)
3311 fileio_fh_t
*fh
= fileio_fd_to_fh (fd
);
3314 if (fh
->is_closed ())
3315 *target_errno
= FILEIO_EBADF
;
3316 else if (fh
->target
== NULL
)
3317 *target_errno
= FILEIO_EIO
;
3319 ret
= fh
->target
->fileio_fstat (fh
->target_fd
, sb
, target_errno
);
3321 target_debug_printf_nofunc ("target_fileio_fstat (%d) = %d (%d)", fd
, ret
,
3322 ret
!= -1 ? 0 : *target_errno
);
3329 target_fileio_stat (struct inferior
*inf
, const char *filename
,
3330 struct stat
*sb
, fileio_error
*target_errno
)
3332 for (target_ops
*t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath ())
3334 int ret
= t
->fileio_stat (inf
, filename
, sb
, target_errno
);
3336 if (ret
== -1 && *target_errno
== FILEIO_ENOSYS
)
3339 target_debug_printf_nofunc ("target_fileio_stat (%s) = %d (%d)",
3341 ret
!= -1 ? 0 : *target_errno
);
3345 *target_errno
= FILEIO_ENOSYS
;
3352 target_fileio_close (int fd
, fileio_error
*target_errno
)
3354 fileio_fh_t
*fh
= fileio_fd_to_fh (fd
);
3357 if (fh
->is_closed ())
3358 *target_errno
= FILEIO_EBADF
;
3361 if (fh
->target
!= NULL
)
3362 ret
= fh
->target
->fileio_close (fh
->target_fd
,
3366 release_fileio_fd (fd
, fh
);
3369 target_debug_printf_nofunc ("target_fileio_close (%d) = %d (%d)", fd
, ret
,
3370 ret
!= -1 ? 0 : *target_errno
);
3377 target_fileio_unlink (struct inferior
*inf
, const char *filename
,
3378 fileio_error
*target_errno
)
3380 for (target_ops
*t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath ())
3382 int ret
= t
->fileio_unlink (inf
, filename
, target_errno
);
3384 if (ret
== -1 && *target_errno
== FILEIO_ENOSYS
)
3387 target_debug_printf_nofunc ("target_fileio_unlink (%d,%s) = %d (%d)",
3388 inf
== NULL
? 0 : inf
->num
, filename
, ret
,
3389 ret
!= -1 ? 0 : *target_errno
);
3393 *target_errno
= FILEIO_ENOSYS
;
3399 std::optional
<std::string
>
3400 target_fileio_readlink (struct inferior
*inf
, const char *filename
,
3401 fileio_error
*target_errno
)
3403 for (target_ops
*t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath ())
3405 std::optional
<std::string
> ret
3406 = t
->fileio_readlink (inf
, filename
, target_errno
);
3408 if (!ret
.has_value () && *target_errno
== FILEIO_ENOSYS
)
3411 target_debug_printf_nofunc ("target_fileio_readlink (%d,%s) = %s (%d)",
3412 inf
== NULL
? 0 : inf
->num
, filename
,
3413 ret
? ret
->c_str () : "(nil)",
3414 ret
? 0 : *target_errno
);
3418 *target_errno
= FILEIO_ENOSYS
;
3422 /* Like scoped_fd, but specific to target fileio. */
3424 class scoped_target_fd
3427 explicit scoped_target_fd (int fd
) noexcept
3432 ~scoped_target_fd ()
3436 fileio_error target_errno
;
3438 target_fileio_close (m_fd
, &target_errno
);
3442 DISABLE_COPY_AND_ASSIGN (scoped_target_fd
);
3444 int get () const noexcept
3453 /* Read target file FILENAME, in the filesystem as seen by INF. If
3454 INF is NULL, use the filesystem seen by the debugger (GDB or, for
3455 remote targets, the remote stub). Store the result in *BUF_P and
3456 return the size of the transferred data. PADDING additional bytes
3457 are available in *BUF_P. This is a helper function for
3458 target_fileio_read_alloc; see the declaration of that function for
3459 more information. */
3462 target_fileio_read_alloc_1 (struct inferior
*inf
, const char *filename
,
3463 gdb_byte
**buf_p
, int padding
)
3465 size_t buf_alloc
, buf_pos
;
3468 fileio_error target_errno
;
3470 scoped_target_fd
fd (target_fileio_open (inf
, filename
, FILEIO_O_RDONLY
,
3471 0700, false, &target_errno
));
3472 if (fd
.get () == -1)
3475 /* Start by reading up to 4K at a time. The target will throttle
3476 this number down if necessary. */
3478 buf
= (gdb_byte
*) xmalloc (buf_alloc
);
3482 n
= target_fileio_pread (fd
.get (), &buf
[buf_pos
],
3483 buf_alloc
- buf_pos
- padding
, buf_pos
,
3487 /* An error occurred. */
3493 /* Read all there was. */
3503 /* If the buffer is filling up, expand it. */
3504 if (buf_alloc
< buf_pos
* 2)
3507 buf
= (gdb_byte
*) xrealloc (buf
, buf_alloc
);
3517 target_fileio_read_alloc (struct inferior
*inf
, const char *filename
,
3520 return target_fileio_read_alloc_1 (inf
, filename
, buf_p
, 0);
3525 gdb::unique_xmalloc_ptr
<char>
3526 target_fileio_read_stralloc (struct inferior
*inf
, const char *filename
)
3530 LONGEST i
, transferred
;
3532 transferred
= target_fileio_read_alloc_1 (inf
, filename
, &buffer
, 1);
3533 bufstr
= (char *) buffer
;
3535 if (transferred
< 0)
3536 return gdb::unique_xmalloc_ptr
<char> (nullptr);
3538 if (transferred
== 0)
3539 return make_unique_xstrdup ("");
3541 bufstr
[transferred
] = 0;
3543 /* Check for embedded NUL bytes; but allow trailing NULs. */
3544 for (i
= strlen (bufstr
); i
< transferred
; i
++)
3547 warning (_("target file %s "
3548 "contained unexpected null characters"),
3553 return gdb::unique_xmalloc_ptr
<char> (bufstr
);
3558 default_region_ok_for_hw_watchpoint (struct target_ops
*self
,
3559 CORE_ADDR addr
, int len
)
3561 gdbarch
*arch
= current_inferior ()->arch ();
3562 return (len
<= gdbarch_ptr_bit (arch
) / TARGET_CHAR_BIT
);
3566 default_watchpoint_addr_within_range (struct target_ops
*target
,
3568 CORE_ADDR start
, int length
)
3570 return addr
>= start
&& addr
< start
+ length
;
3576 target_stack::find_beneath (const target_ops
*t
) const
3578 /* Look for a non-empty slot at stratum levels beneath T's. */
3579 for (int stratum
= t
->stratum () - 1; stratum
>= 0; --stratum
)
3580 if (m_stack
[stratum
].get () != NULL
)
3581 return m_stack
[stratum
].get ();
3589 find_target_at (enum strata stratum
)
3591 return current_inferior ()->target_at (stratum
);
3599 target_announce_detach (int from_tty
)
3602 const char *exec_file
;
3607 pid
= inferior_ptid
.pid ();
3608 exec_file
= current_program_space
->exec_filename ();
3609 if (exec_file
== nullptr)
3610 gdb_printf ("Detaching from pid %s\n",
3611 target_pid_to_str (ptid_t (pid
)).c_str ());
3613 gdb_printf (_("Detaching from program: %ps, %s\n"),
3614 styled_string (file_name_style
.style (), exec_file
),
3615 target_pid_to_str (ptid_t (pid
)).c_str ());
3621 target_announce_attach (int from_tty
, int pid
)
3626 const char *exec_file
= current_program_space
->exec_filename ();
3628 if (exec_file
!= nullptr)
3629 gdb_printf ("Attaching to program: %ps, %s\n",
3630 styled_string (file_name_style
.style (), exec_file
),
3631 target_pid_to_str (ptid_t (pid
)).c_str ());
3633 gdb_printf ("Attaching to %s\n",
3634 target_pid_to_str (ptid_t (pid
)).c_str ());
3637 /* The inferior process has died. Long live the inferior! */
3640 generic_mourn_inferior (void)
3642 inferior
*inf
= current_inferior ();
3644 switch_to_no_thread ();
3646 /* Mark breakpoints uninserted in case something tries to delete a
3647 breakpoint while we delete the inferior's threads (which would
3648 fail, since the inferior is long gone). */
3649 mark_breakpoints_out (inf
->pspace
);
3652 exit_inferior (inf
);
3654 /* Note this wipes step-resume breakpoints, so needs to be done
3655 after exit_inferior, which ends up referencing the step-resume
3656 breakpoints through clear_thread_inferior_resources. */
3657 breakpoint_init_inferior (inf
, inf_exited
);
3659 registers_changed ();
3661 reopen_exec_file ();
3662 reinit_frame_cache ();
3664 if (deprecated_detach_hook
)
3665 deprecated_detach_hook ();
3668 /* Convert a normal process ID to a string. Returns the string in a
3672 normal_pid_to_str (ptid_t ptid
)
3674 return string_printf ("process %d", ptid
.pid ());
3678 default_pid_to_str (struct target_ops
*ops
, ptid_t ptid
)
3680 return normal_pid_to_str (ptid
);
3683 /* Error-catcher for target_find_memory_regions. */
3685 dummy_find_memory_regions (struct target_ops
*self
,
3686 find_memory_region_ftype ignore1
, void *ignore2
)
3688 error (_("Command not implemented for this target."));
3692 /* Error-catcher for target_make_corefile_notes. */
3693 static gdb::unique_xmalloc_ptr
<char>
3694 dummy_make_corefile_notes (struct target_ops
*self
,
3695 bfd
*ignore1
, int *ignore2
)
3697 error (_("Command not implemented for this target."));
3701 #include "target-delegates.c"
3703 /* The initial current target, so that there is always a semi-valid
3706 static dummy_target the_dummy_target
;
3713 return &the_dummy_target
;
3716 static const target_info dummy_target_info
= {
3723 dummy_target::stratum () const
3725 return dummy_stratum
;
3729 debug_target::stratum () const
3731 return debug_stratum
;
3735 dummy_target::info () const
3737 return dummy_target_info
;
3741 debug_target::info () const
3743 return beneath ()->info ();
3749 target_thread_alive (ptid_t ptid
)
3751 return current_inferior ()->top_target ()->thread_alive (ptid
);
3755 target_update_thread_list (void)
3757 current_inferior ()->top_target ()->update_thread_list ();
3761 target_stop (ptid_t ptid
)
3763 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
3765 gdb_assert (!proc_target
->commit_resumed_state
);
3769 warning (_("May not interrupt or stop the target, ignoring attempt"));
3773 current_inferior ()->top_target ()->stop (ptid
);
3781 warning (_("May not interrupt or stop the target, ignoring attempt"));
3785 current_inferior ()->top_target ()->interrupt ();
3791 target_pass_ctrlc (void)
3793 /* Pass the Ctrl-C to the first target that has a thread
3795 for (inferior
*inf
: all_inferiors ())
3797 target_ops
*proc_target
= inf
->process_target ();
3798 if (proc_target
== NULL
)
3801 for (thread_info
*thr
: inf
->non_exited_threads ())
3803 /* A thread can be THREAD_STOPPED and executing, while
3804 running an infcall. */
3805 if (thr
->state
== THREAD_RUNNING
|| thr
->executing ())
3807 /* We can get here quite deep in target layers. Avoid
3808 switching thread context or anything that would
3809 communicate with the target (e.g., to fetch
3810 registers), or flushing e.g., the frame cache. We
3811 just switch inferior in order to be able to call
3812 through the target_stack. */
3813 scoped_restore_current_inferior restore_inferior
;
3814 set_current_inferior (inf
);
3815 current_inferior ()->top_target ()->pass_ctrlc ();
3825 default_target_pass_ctrlc (struct target_ops
*ops
)
3827 target_interrupt ();
3830 /* See target/target.h. */
3833 target_stop_and_wait (ptid_t ptid
)
3835 struct target_waitstatus status
;
3836 bool was_non_stop
= non_stop
;
3841 target_wait (ptid
, &status
, 0);
3843 non_stop
= was_non_stop
;
3846 /* See target/target.h. */
3849 target_continue_no_signal (ptid_t ptid
)
3851 target_resume (ptid
, 0, GDB_SIGNAL_0
);
3854 /* See target/target.h. */
3857 target_continue (ptid_t ptid
, enum gdb_signal signal
)
3859 target_resume (ptid
, 0, signal
);
3862 /* Concatenate ELEM to LIST, a comma-separated list. */
3865 str_comma_list_concat_elem (std::string
*list
, const char *elem
)
3867 if (!list
->empty ())
3868 list
->append (", ");
3870 list
->append (elem
);
3873 /* Helper for target_options_to_string. If OPT is present in
3874 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3875 OPT is removed from TARGET_OPTIONS. */
3878 do_option (target_wait_flags
*target_options
, std::string
*ret
,
3879 target_wait_flag opt
, const char *opt_str
)
3881 if ((*target_options
& opt
) != 0)
3883 str_comma_list_concat_elem (ret
, opt_str
);
3884 *target_options
&= ~opt
;
3891 target_options_to_string (target_wait_flags target_options
)
3895 #define DO_TARG_OPTION(OPT) \
3896 do_option (&target_options, &ret, OPT, #OPT)
3898 DO_TARG_OPTION (TARGET_WNOHANG
);
3900 if (target_options
!= 0)
3901 str_comma_list_concat_elem (&ret
, "unknown???");
3907 target_fetch_registers (struct regcache
*regcache
, int regno
)
3909 current_inferior ()->top_target ()->fetch_registers (regcache
, regno
);
3910 target_debug_printf ("%s", regcache
->register_debug_string (regno
).c_str ());
3914 target_store_registers (struct regcache
*regcache
, int regno
)
3916 if (!may_write_registers
)
3917 error (_("Writing to registers is not allowed (regno %d)"), regno
);
3919 current_inferior ()->top_target ()->store_registers (regcache
, regno
);
3920 target_debug_printf ("%s", regcache
->register_debug_string (regno
).c_str ());
3924 target_core_of_thread (ptid_t ptid
)
3926 return current_inferior ()->top_target ()->core_of_thread (ptid
);
3930 simple_verify_memory (struct target_ops
*ops
,
3931 const gdb_byte
*data
, CORE_ADDR lma
, ULONGEST size
)
3933 LONGEST total_xfered
= 0;
3935 while (total_xfered
< size
)
3937 ULONGEST xfered_len
;
3938 enum target_xfer_status status
;
3940 ULONGEST howmuch
= std::min
<ULONGEST
> (sizeof (buf
), size
- total_xfered
);
3942 status
= target_xfer_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
3943 buf
, NULL
, lma
+ total_xfered
, howmuch
,
3945 if (status
== TARGET_XFER_OK
3946 && memcmp (data
+ total_xfered
, buf
, xfered_len
) == 0)
3948 total_xfered
+= xfered_len
;
3957 /* Default implementation of memory verification. */
3960 default_verify_memory (struct target_ops
*self
,
3961 const gdb_byte
*data
, CORE_ADDR memaddr
, ULONGEST size
)
3963 /* Start over from the top of the target stack. */
3964 return simple_verify_memory (current_inferior ()->top_target (),
3965 data
, memaddr
, size
);
3969 target_verify_memory (const gdb_byte
*data
, CORE_ADDR memaddr
, ULONGEST size
)
3971 target_ops
*target
= current_inferior ()->top_target ();
3973 return target
->verify_memory (data
, memaddr
, size
);
3976 /* The documentation for this function is in its prototype declaration in
3980 target_insert_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
,
3981 enum target_hw_bp_type rw
)
3983 target_ops
*target
= current_inferior ()->top_target ();
3985 return target
->insert_mask_watchpoint (addr
, mask
, rw
);
3988 /* The documentation for this function is in its prototype declaration in
3992 target_remove_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
,
3993 enum target_hw_bp_type rw
)
3995 target_ops
*target
= current_inferior ()->top_target ();
3997 return target
->remove_mask_watchpoint (addr
, mask
, rw
);
4000 /* The documentation for this function is in its prototype declaration
4004 target_masked_watch_num_registers (CORE_ADDR addr
, CORE_ADDR mask
)
4006 target_ops
*target
= current_inferior ()->top_target ();
4008 return target
->masked_watch_num_registers (addr
, mask
);
4011 /* The documentation for this function is in its prototype declaration
4015 target_ranged_break_num_registers (void)
4017 return current_inferior ()->top_target ()->ranged_break_num_registers ();
4022 struct btrace_target_info
*
4023 target_enable_btrace (thread_info
*tp
, const struct btrace_config
*conf
)
4025 return current_inferior ()->top_target ()->enable_btrace (tp
, conf
);
4031 target_disable_btrace (struct btrace_target_info
*btinfo
)
4033 current_inferior ()->top_target ()->disable_btrace (btinfo
);
4039 target_teardown_btrace (struct btrace_target_info
*btinfo
)
4041 current_inferior ()->top_target ()->teardown_btrace (btinfo
);
4047 target_read_btrace (struct btrace_data
*btrace
,
4048 struct btrace_target_info
*btinfo
,
4049 enum btrace_read_type type
)
4051 target_ops
*target
= current_inferior ()->top_target ();
4053 return target
->read_btrace (btrace
, btinfo
, type
);
4058 const struct btrace_config
*
4059 target_btrace_conf (const struct btrace_target_info
*btinfo
)
4061 return current_inferior ()->top_target ()->btrace_conf (btinfo
);
4067 target_stop_recording (void)
4069 current_inferior ()->top_target ()->stop_recording ();
4075 target_save_record (const char *filename
)
4077 current_inferior ()->top_target ()->save_record (filename
);
4083 target_supports_delete_record ()
4085 return current_inferior ()->top_target ()->supports_delete_record ();
4091 target_delete_record (void)
4093 current_inferior ()->top_target ()->delete_record ();
4099 target_record_method (ptid_t ptid
)
4101 return current_inferior ()->top_target ()->record_method (ptid
);
4107 target_record_is_replaying (ptid_t ptid
)
4109 return current_inferior ()->top_target ()->record_is_replaying (ptid
);
4115 target_record_will_replay (ptid_t ptid
, int dir
)
4117 return current_inferior ()->top_target ()->record_will_replay (ptid
, dir
);
4123 target_record_stop_replaying (void)
4125 current_inferior ()->top_target ()->record_stop_replaying ();
4131 target_goto_record_begin (void)
4133 current_inferior ()->top_target ()->goto_record_begin ();
4139 target_goto_record_end (void)
4141 current_inferior ()->top_target ()->goto_record_end ();
4147 target_goto_record (ULONGEST insn
)
4149 current_inferior ()->top_target ()->goto_record (insn
);
4155 target_insn_history (int size
, gdb_disassembly_flags flags
)
4157 current_inferior ()->top_target ()->insn_history (size
, flags
);
4163 target_insn_history_from (ULONGEST from
, int size
,
4164 gdb_disassembly_flags flags
)
4166 current_inferior ()->top_target ()->insn_history_from (from
, size
, flags
);
4172 target_insn_history_range (ULONGEST begin
, ULONGEST end
,
4173 gdb_disassembly_flags flags
)
4175 current_inferior ()->top_target ()->insn_history_range (begin
, end
, flags
);
4181 target_call_history (int size
, record_print_flags flags
)
4183 current_inferior ()->top_target ()->call_history (size
, flags
);
4189 target_call_history_from (ULONGEST begin
, int size
, record_print_flags flags
)
4191 current_inferior ()->top_target ()->call_history_from (begin
, size
, flags
);
4197 target_call_history_range (ULONGEST begin
, ULONGEST end
, record_print_flags flags
)
4199 current_inferior ()->top_target ()->call_history_range (begin
, end
, flags
);
4204 const struct frame_unwind
*
4205 target_get_unwinder (void)
4207 return current_inferior ()->top_target ()->get_unwinder ();
4212 const struct frame_unwind
*
4213 target_get_tailcall_unwinder (void)
4215 return current_inferior ()->top_target ()->get_tailcall_unwinder ();
4221 target_prepare_to_generate_core (void)
4223 current_inferior ()->top_target ()->prepare_to_generate_core ();
4229 target_done_generating_core (void)
4231 current_inferior ()->top_target ()->done_generating_core ();
4236 static char targ_desc
[] =
4237 "Names of targets and files being debugged.\nShows the entire \
4238 stack of targets currently in use (including the exec-file,\n\
4239 core-file, and process, if any), as well as the symbol file name.";
4242 default_rcmd (struct target_ops
*self
, const char *command
,
4243 struct ui_file
*output
)
4245 error (_("\"monitor\" command not supported by this target."));
4249 do_monitor_command (const char *cmd
, int from_tty
)
4251 target_rcmd (cmd
, gdb_stdtarg
);
4254 /* Erases all the memory regions marked as flash. CMD and FROM_TTY are
4258 flash_erase_command (const char *cmd
, int from_tty
)
4260 /* Used to communicate termination of flash operations to the target. */
4261 bool found_flash_region
= false;
4262 gdbarch
*gdbarch
= current_inferior ()->arch ();
4264 std::vector
<mem_region
> mem_regions
= target_memory_map ();
4266 /* Iterate over all memory regions. */
4267 for (const mem_region
&m
: mem_regions
)
4269 /* Is this a flash memory region? */
4270 if (m
.attrib
.mode
== MEM_FLASH
)
4272 found_flash_region
= true;
4273 target_flash_erase (m
.lo
, m
.hi
- m
.lo
);
4275 ui_out_emit_tuple
tuple_emitter (current_uiout
, "erased-regions");
4277 current_uiout
->message (_("Erasing flash memory region at address "));
4278 current_uiout
->field_core_addr ("address", gdbarch
, m
.lo
);
4279 current_uiout
->message (", size = ");
4280 current_uiout
->field_string ("size", hex_string (m
.hi
- m
.lo
));
4281 current_uiout
->message ("\n");
4285 /* Did we do any flash operations? If so, we need to finalize them. */
4286 if (found_flash_region
)
4287 target_flash_done ();
4289 current_uiout
->message (_("No flash memory regions found.\n"));
4292 /* Print the name of each layers of our target stack. */
4295 maintenance_print_target_stack (const char *cmd
, int from_tty
)
4297 gdb_printf (_("The current target stack is:\n"));
4299 for (target_ops
*t
= current_inferior ()->top_target ();
4303 if (t
->stratum () == debug_stratum
)
4305 gdb_printf (" - %s (%s)\n", t
->shortname (), t
->longname ());
4312 target_async (bool enable
)
4314 /* If we are trying to enable async mode then it must be the case that
4315 async mode is possible for this target. */
4316 gdb_assert (!enable
|| target_can_async_p ());
4317 infrun_async (enable
);
4318 current_inferior ()->top_target ()->async (enable
);
4324 target_thread_events (bool enable
)
4326 current_inferior ()->top_target ()->thread_events (enable
);
4332 target_supports_set_thread_options (gdb_thread_options options
)
4334 inferior
*inf
= current_inferior ();
4335 return inf
->top_target ()->supports_set_thread_options (options
);
4338 /* Controls if targets can report that they can/are async. This is
4339 just for maintainers to use when debugging gdb. */
4340 bool target_async_permitted
= true;
4343 set_maint_target_async (bool permitted
)
4345 if (have_live_inferiors ())
4346 error (_("Cannot change this setting while the inferior is running."));
4348 target_async_permitted
= permitted
;
4352 get_maint_target_async ()
4354 return target_async_permitted
;
4358 show_maint_target_async (ui_file
*file
, int from_tty
,
4359 cmd_list_element
*c
, const char *value
)
4362 _("Controlling the inferior in "
4363 "asynchronous mode is %s.\n"), value
);
4366 /* Return true if the target operates in non-stop mode even with "set
4370 target_always_non_stop_p (void)
4372 return current_inferior ()->top_target ()->always_non_stop_p ();
4378 target_is_non_stop_p ()
4381 || target_non_stop_enabled
== AUTO_BOOLEAN_TRUE
4382 || (target_non_stop_enabled
== AUTO_BOOLEAN_AUTO
4383 && target_always_non_stop_p ()))
4384 && target_can_async_p ());
4390 exists_non_stop_target ()
4392 if (target_is_non_stop_p ())
4395 scoped_restore_current_thread restore_thread
;
4397 for (inferior
*inf
: all_inferiors ())
4399 switch_to_inferior_no_thread (inf
);
4400 if (target_is_non_stop_p ())
4407 /* Controls if targets can report that they always run in non-stop
4408 mode. This is just for maintainers to use when debugging gdb. */
4409 enum auto_boolean target_non_stop_enabled
= AUTO_BOOLEAN_AUTO
;
4411 /* Set callback for maint target-non-stop setting. */
4414 set_maint_target_non_stop (auto_boolean enabled
)
4416 if (have_live_inferiors ())
4417 error (_("Cannot change this setting while the inferior is running."));
4419 target_non_stop_enabled
= enabled
;
4422 /* Get callback for maint target-non-stop setting. */
4425 get_maint_target_non_stop ()
4427 return target_non_stop_enabled
;
4431 show_maint_target_non_stop (ui_file
*file
, int from_tty
,
4432 cmd_list_element
*c
, const char *value
)
4434 if (target_non_stop_enabled
== AUTO_BOOLEAN_AUTO
)
4436 _("Whether the target is always in non-stop mode "
4437 "is %s (currently %s).\n"), value
,
4438 target_always_non_stop_p () ? "on" : "off");
4441 _("Whether the target is always in non-stop mode "
4442 "is %s.\n"), value
);
4445 /* Temporary copies of permission settings. */
4447 static bool may_write_registers_1
= true;
4448 static bool may_write_memory_1
= true;
4449 static bool may_insert_breakpoints_1
= true;
4450 static bool may_insert_tracepoints_1
= true;
4451 static bool may_insert_fast_tracepoints_1
= true;
4452 static bool may_stop_1
= true;
4454 /* Make the user-set values match the real values again. */
4457 update_target_permissions (void)
4459 may_write_registers_1
= may_write_registers
;
4460 may_write_memory_1
= may_write_memory
;
4461 may_insert_breakpoints_1
= may_insert_breakpoints
;
4462 may_insert_tracepoints_1
= may_insert_tracepoints
;
4463 may_insert_fast_tracepoints_1
= may_insert_fast_tracepoints
;
4464 may_stop_1
= may_stop
;
4467 /* The one function handles (most of) the permission flags in the same
4471 set_target_permissions (const char *args
, int from_tty
,
4472 struct cmd_list_element
*c
)
4474 if (target_has_execution ())
4476 update_target_permissions ();
4477 error (_("Cannot change this setting while the inferior is running."));
4480 /* Make the real values match the user-changed values. */
4481 may_insert_breakpoints
= may_insert_breakpoints_1
;
4482 may_insert_tracepoints
= may_insert_tracepoints_1
;
4483 may_insert_fast_tracepoints
= may_insert_fast_tracepoints_1
;
4484 may_stop
= may_stop_1
;
4485 update_observer_mode ();
4488 /* Set some permissions independently of observer mode. */
4491 set_write_memory_registers_permission (const char *args
, int from_tty
,
4492 struct cmd_list_element
*c
)
4494 /* Make the real values match the user-changed values. */
4495 may_write_memory
= may_write_memory_1
;
4496 may_write_registers
= may_write_registers_1
;
4497 update_observer_mode ();
4500 void _initialize_target ();
4503 _initialize_target ()
4505 the_debug_target
= new debug_target ();
4507 add_info ("target", info_target_command
, targ_desc
);
4508 add_info ("files", info_target_command
, targ_desc
);
4510 add_setshow_zuinteger_cmd ("target", class_maintenance
, &targetdebug
, _("\
4511 Set target debugging."), _("\
4512 Show target debugging."), _("\
4513 When non-zero, target debugging is enabled. Higher numbers are more\n\
4517 &setdebuglist
, &showdebuglist
);
4519 add_setshow_boolean_cmd ("trust-readonly-sections", class_support
,
4520 &trust_readonly
, _("\
4521 Set mode for reading from readonly sections."), _("\
4522 Show mode for reading from readonly sections."), _("\
4523 When this mode is on, memory reads from readonly sections (such as .text)\n\
4524 will be read from the object file instead of from the target. This will\n\
4525 result in significant performance improvement for remote targets."),
4527 show_trust_readonly
,
4528 &setlist
, &showlist
);
4530 add_com ("monitor", class_obscure
, do_monitor_command
,
4531 _("Send a command to the remote monitor (remote targets only)."));
4533 add_cmd ("target-stack", class_maintenance
, maintenance_print_target_stack
,
4534 _("Print the name of each layer of the internal target stack."),
4535 &maintenanceprintlist
);
4537 add_setshow_boolean_cmd ("target-async", no_class
,
4539 Set whether gdb controls the inferior in asynchronous mode."), _("\
4540 Show whether gdb controls the inferior in asynchronous mode."), _("\
4541 Tells gdb whether to control the inferior in asynchronous mode."),
4542 set_maint_target_async
,
4543 get_maint_target_async
,
4544 show_maint_target_async
,
4545 &maintenance_set_cmdlist
,
4546 &maintenance_show_cmdlist
);
4548 add_setshow_auto_boolean_cmd ("target-non-stop", no_class
,
4550 Set whether gdb always controls the inferior in non-stop mode."), _("\
4551 Show whether gdb always controls the inferior in non-stop mode."), _("\
4552 Tells gdb whether to control the inferior in non-stop mode."),
4553 set_maint_target_non_stop
,
4554 get_maint_target_non_stop
,
4555 show_maint_target_non_stop
,
4556 &maintenance_set_cmdlist
,
4557 &maintenance_show_cmdlist
);
4559 add_setshow_boolean_cmd ("may-write-registers", class_support
,
4560 &may_write_registers_1
, _("\
4561 Set permission to write into registers."), _("\
4562 Show permission to write into registers."), _("\
4563 When this permission is on, GDB may write into the target's registers.\n\
4564 Otherwise, any sort of write attempt will result in an error."),
4565 set_write_memory_registers_permission
, NULL
,
4566 &setlist
, &showlist
);
4568 add_setshow_boolean_cmd ("may-write-memory", class_support
,
4569 &may_write_memory_1
, _("\
4570 Set permission to write into target memory."), _("\
4571 Show permission to write into target memory."), _("\
4572 When this permission is on, GDB may write into the target's memory.\n\
4573 Otherwise, any sort of write attempt will result in an error."),
4574 set_write_memory_registers_permission
, NULL
,
4575 &setlist
, &showlist
);
4577 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support
,
4578 &may_insert_breakpoints_1
, _("\
4579 Set permission to insert breakpoints in the target."), _("\
4580 Show permission to insert breakpoints in the target."), _("\
4581 When this permission is on, GDB may insert breakpoints in the program.\n\
4582 Otherwise, any sort of insertion attempt will result in an error."),
4583 set_target_permissions
, NULL
,
4584 &setlist
, &showlist
);
4586 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support
,
4587 &may_insert_tracepoints_1
, _("\
4588 Set permission to insert tracepoints in the target."), _("\
4589 Show permission to insert tracepoints in the target."), _("\
4590 When this permission is on, GDB may insert tracepoints in the program.\n\
4591 Otherwise, any sort of insertion attempt will result in an error."),
4592 set_target_permissions
, NULL
,
4593 &setlist
, &showlist
);
4595 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support
,
4596 &may_insert_fast_tracepoints_1
, _("\
4597 Set permission to insert fast tracepoints in the target."), _("\
4598 Show permission to insert fast tracepoints in the target."), _("\
4599 When this permission is on, GDB may insert fast tracepoints.\n\
4600 Otherwise, any sort of insertion attempt will result in an error."),
4601 set_target_permissions
, NULL
,
4602 &setlist
, &showlist
);
4604 add_setshow_boolean_cmd ("may-interrupt", class_support
,
4606 Set permission to interrupt or signal the target."), _("\
4607 Show permission to interrupt or signal the target."), _("\
4608 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4609 Otherwise, any attempt to interrupt or stop will be ignored."),
4610 set_target_permissions
, NULL
,
4611 &setlist
, &showlist
);
4613 add_com ("flash-erase", no_class
, flash_erase_command
,
4614 _("Erase all flash memory regions."));
4616 add_setshow_boolean_cmd ("auto-connect-native-target", class_support
,
4617 &auto_connect_native_target
, _("\
4618 Set whether GDB may automatically connect to the native target."), _("\
4619 Show whether GDB may automatically connect to the native target."), _("\
4620 When on, and GDB is not connected to a target yet, GDB\n\
4621 attempts \"run\" and other commands with the native target."),
4622 NULL
, show_auto_connect_native_target
,
4623 &setlist
, &showlist
);