1 /* Select target systems and architectures at runtime for GDB.
3 Copyright (C) 1990-2024 Free Software Foundation, Inc.
5 Contributed by Cygnus Support.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "extract-store-integer.h"
24 #include "target-dcache.h"
25 #include "cli/cli-cmds.h"
29 #include "observable.h"
37 #include "target-descriptions.h"
38 #include "gdbthread.h"
41 #include "inline-frame.h"
42 #include "tracepoint.h"
43 #include "gdbsupport/fileio.h"
44 #include "gdbsupport/agent.h"
46 #include "target-debug.h"
48 #include "event-top.h"
50 #include "gdbsupport/byte-vector.h"
51 #include "gdbsupport/search.h"
53 #include <unordered_map>
54 #include "target-connection.h"
56 #include "cli/cli-decode.h"
57 #include "cli/cli-style.h"
59 [[noreturn
]] static void generic_tls_error (void);
61 static void default_rcmd (struct target_ops
*, const char *, struct ui_file
*);
63 static int default_verify_memory (struct target_ops
*self
,
65 CORE_ADDR memaddr
, ULONGEST size
);
67 [[noreturn
]] static void tcomplain (void);
69 /* Mapping between target_info objects (which have address identity)
70 and corresponding open/factory function/callback. Each add_target
71 call adds one entry to this map, and registers a "target
72 TARGET_NAME" command that when invoked calls the factory registered
73 here. The target_info object is associated with the command via
74 the command's context. */
75 static std::unordered_map
<const target_info
*, target_open_ftype
*>
78 /* The singleton debug target. */
80 static struct target_ops
*the_debug_target
;
82 /* Command list for target. */
84 static struct cmd_list_element
*targetlist
= NULL
;
88 bool trust_readonly
= false;
90 /* Nonzero if we should show true memory content including
91 memory breakpoint inserted by gdb. */
93 static int show_memory_breakpoints
= 0;
95 /* These globals control whether GDB attempts to perform these
96 operations; they are useful for targets that need to prevent
97 inadvertent disruption, such as in non-stop mode. */
99 bool may_write_registers
= true;
101 bool may_write_memory
= true;
103 bool may_insert_breakpoints
= true;
105 bool may_insert_tracepoints
= true;
107 bool may_insert_fast_tracepoints
= true;
109 bool may_stop
= true;
111 /* Non-zero if we want to see trace of target level stuff. */
113 static unsigned int targetdebug
= 0;
115 /* Print a "target" debug statement with the function name prefix. */
117 #define target_debug_printf(fmt, ...) \
118 debug_prefixed_printf_cond (targetdebug > 0, "target", fmt, ##__VA_ARGS__)
120 /* Print a "target" debug statement without the function name prefix. */
122 #define target_debug_printf_nofunc(fmt, ...) \
123 debug_prefixed_printf_cond_nofunc (targetdebug > 0, "target", fmt, ##__VA_ARGS__)
126 set_targetdebug (const char *args
, int from_tty
, struct cmd_list_element
*c
)
129 current_inferior ()->push_target (the_debug_target
);
131 current_inferior ()->unpush_target (the_debug_target
);
135 show_targetdebug (struct ui_file
*file
, int from_tty
,
136 struct cmd_list_element
*c
, const char *value
)
138 gdb_printf (file
, _("Target debugging is %s.\n"), value
);
144 for (target_ops
*t
= current_inferior ()->top_target ();
147 if (t
->has_memory ())
156 for (target_ops
*t
= current_inferior ()->top_target ();
166 target_has_registers ()
168 for (target_ops
*t
= current_inferior ()->top_target ();
171 if (t
->has_registers ())
178 target_has_execution (inferior
*inf
)
181 inf
= current_inferior ();
183 for (target_ops
*t
= inf
->top_target ();
185 t
= inf
->find_target_beneath (t
))
186 if (t
->has_execution (inf
))
195 return current_inferior ()->top_target ()->shortname ();
201 target_attach_no_wait ()
203 return current_inferior ()->top_target ()->attach_no_wait ();
209 target_post_attach (int pid
)
211 return current_inferior ()->top_target ()->post_attach (pid
);
217 target_prepare_to_store (regcache
*regcache
)
219 return current_inferior ()->top_target ()->prepare_to_store (regcache
);
225 target_supports_enable_disable_tracepoint ()
227 target_ops
*target
= current_inferior ()->top_target ();
229 return target
->supports_enable_disable_tracepoint ();
233 target_supports_string_tracing ()
235 return current_inferior ()->top_target ()->supports_string_tracing ();
241 target_supports_evaluation_of_breakpoint_conditions ()
243 target_ops
*target
= current_inferior ()->top_target ();
245 return target
->supports_evaluation_of_breakpoint_conditions ();
251 target_supports_dumpcore ()
253 return current_inferior ()->top_target ()->supports_dumpcore ();
259 target_dumpcore (const char *filename
)
261 return current_inferior ()->top_target ()->dumpcore (filename
);
267 target_can_run_breakpoint_commands ()
269 return current_inferior ()->top_target ()->can_run_breakpoint_commands ();
277 return current_inferior ()->top_target ()->files_info ();
283 target_insert_fork_catchpoint (int pid
)
285 return current_inferior ()->top_target ()->insert_fork_catchpoint (pid
);
291 target_remove_fork_catchpoint (int pid
)
293 return current_inferior ()->top_target ()->remove_fork_catchpoint (pid
);
299 target_insert_vfork_catchpoint (int pid
)
301 return current_inferior ()->top_target ()->insert_vfork_catchpoint (pid
);
307 target_remove_vfork_catchpoint (int pid
)
309 return current_inferior ()->top_target ()->remove_vfork_catchpoint (pid
);
315 target_insert_exec_catchpoint (int pid
)
317 return current_inferior ()->top_target ()->insert_exec_catchpoint (pid
);
323 target_remove_exec_catchpoint (int pid
)
325 return current_inferior ()->top_target ()->remove_exec_catchpoint (pid
);
331 target_set_syscall_catchpoint (int pid
, bool needed
, int any_count
,
332 gdb::array_view
<const int> syscall_counts
)
334 target_ops
*target
= current_inferior ()->top_target ();
336 return target
->set_syscall_catchpoint (pid
, needed
, any_count
,
343 target_rcmd (const char *command
, struct ui_file
*outbuf
)
345 return current_inferior ()->top_target ()->rcmd (command
, outbuf
);
351 target_can_lock_scheduler ()
353 target_ops
*target
= current_inferior ()->top_target ();
355 return (target
->get_thread_control_capabilities ()& tc_schedlock
) != 0;
361 target_can_async_p ()
363 return target_can_async_p (current_inferior ()->top_target ());
369 target_can_async_p (struct target_ops
*target
)
371 if (!target_async_permitted
)
373 return target
->can_async_p ();
381 bool result
= current_inferior ()->top_target ()->is_async_p ();
382 gdb_assert (target_async_permitted
|| !result
);
387 target_execution_direction ()
389 return current_inferior ()->top_target ()->execution_direction ();
395 target_extra_thread_info (thread_info
*tp
)
397 return current_inferior ()->top_target ()->extra_thread_info (tp
);
403 target_pid_to_exec_file (int pid
)
405 return current_inferior ()->top_target ()->pid_to_exec_file (pid
);
411 target_thread_architecture (ptid_t ptid
)
413 return current_inferior ()->top_target ()->thread_architecture (ptid
);
419 target_find_memory_regions (find_memory_region_ftype func
, void *data
)
421 return current_inferior ()->top_target ()->find_memory_regions (func
, data
);
426 gdb::unique_xmalloc_ptr
<char>
427 target_make_corefile_notes (bfd
*bfd
, int *size_p
)
429 return current_inferior ()->top_target ()->make_corefile_notes (bfd
, size_p
);
433 target_get_bookmark (const char *args
, int from_tty
)
435 return current_inferior ()->top_target ()->get_bookmark (args
, from_tty
);
439 target_goto_bookmark (const gdb_byte
*arg
, int from_tty
)
441 return current_inferior ()->top_target ()->goto_bookmark (arg
, from_tty
);
447 target_stopped_by_watchpoint ()
449 return current_inferior ()->top_target ()->stopped_by_watchpoint ();
455 target_stopped_by_sw_breakpoint ()
457 return current_inferior ()->top_target ()->stopped_by_sw_breakpoint ();
461 target_supports_stopped_by_sw_breakpoint ()
463 target_ops
*target
= current_inferior ()->top_target ();
465 return target
->supports_stopped_by_sw_breakpoint ();
469 target_stopped_by_hw_breakpoint ()
471 return current_inferior ()->top_target ()->stopped_by_hw_breakpoint ();
475 target_supports_stopped_by_hw_breakpoint ()
477 target_ops
*target
= current_inferior ()->top_target ();
479 return target
->supports_stopped_by_hw_breakpoint ();
485 target_have_steppable_watchpoint ()
487 return current_inferior ()->top_target ()->have_steppable_watchpoint ();
493 target_can_use_hardware_watchpoint (bptype type
, int cnt
, int othertype
)
495 target_ops
*target
= current_inferior ()->top_target ();
497 return target
->can_use_hw_breakpoint (type
, cnt
, othertype
);
503 target_region_ok_for_hw_watchpoint (CORE_ADDR addr
, int len
)
505 target_ops
*target
= current_inferior ()->top_target ();
507 return target
->region_ok_for_hw_watchpoint (addr
, len
);
512 target_can_do_single_step ()
514 return current_inferior ()->top_target ()->can_do_single_step ();
520 target_insert_watchpoint (CORE_ADDR addr
, int len
, target_hw_bp_type type
,
523 target_ops
*target
= current_inferior ()->top_target ();
525 return target
->insert_watchpoint (addr
, len
, type
, cond
);
531 target_remove_watchpoint (CORE_ADDR addr
, int len
, target_hw_bp_type type
,
534 target_ops
*target
= current_inferior ()->top_target ();
536 return target
->remove_watchpoint (addr
, len
, type
, cond
);
542 target_insert_hw_breakpoint (gdbarch
*gdbarch
, bp_target_info
*bp_tgt
)
544 target_ops
*target
= current_inferior ()->top_target ();
546 return target
->insert_hw_breakpoint (gdbarch
, bp_tgt
);
552 target_remove_hw_breakpoint (gdbarch
*gdbarch
, bp_target_info
*bp_tgt
)
554 target_ops
*target
= current_inferior ()->top_target ();
556 return target
->remove_hw_breakpoint (gdbarch
, bp_tgt
);
562 target_can_accel_watchpoint_condition (CORE_ADDR addr
, int len
, int type
,
565 target_ops
*target
= current_inferior ()->top_target ();
567 return target
->can_accel_watchpoint_condition (addr
, len
, type
, cond
);
573 target_can_execute_reverse ()
575 return current_inferior ()->top_target ()->can_execute_reverse ();
579 target_get_ada_task_ptid (long lwp
, ULONGEST tid
)
581 return current_inferior ()->top_target ()->get_ada_task_ptid (lwp
, tid
);
585 target_filesystem_is_local ()
587 return current_inferior ()->top_target ()->filesystem_is_local ();
593 return current_inferior ()->top_target ()->trace_init ();
597 target_download_tracepoint (bp_location
*location
)
599 return current_inferior ()->top_target ()->download_tracepoint (location
);
603 target_can_download_tracepoint ()
605 return current_inferior ()->top_target ()->can_download_tracepoint ();
609 target_download_trace_state_variable (const trace_state_variable
&tsv
)
611 target_ops
*target
= current_inferior ()->top_target ();
613 return target
->download_trace_state_variable (tsv
);
617 target_enable_tracepoint (bp_location
*loc
)
619 return current_inferior ()->top_target ()->enable_tracepoint (loc
);
623 target_disable_tracepoint (bp_location
*loc
)
625 return current_inferior ()->top_target ()->disable_tracepoint (loc
);
629 target_trace_start ()
631 return current_inferior ()->top_target ()->trace_start ();
635 target_trace_set_readonly_regions ()
637 return current_inferior ()->top_target ()->trace_set_readonly_regions ();
641 target_get_trace_status (trace_status
*ts
)
643 return current_inferior ()->top_target ()->get_trace_status (ts
);
647 target_get_tracepoint_status (tracepoint
*tp
, uploaded_tp
*utp
)
649 return current_inferior ()->top_target ()->get_tracepoint_status (tp
, utp
);
655 return current_inferior ()->top_target ()->trace_stop ();
659 target_trace_find (trace_find_type type
, int num
,
660 CORE_ADDR addr1
, CORE_ADDR addr2
, int *tpp
)
662 target_ops
*target
= current_inferior ()->top_target ();
664 return target
->trace_find (type
, num
, addr1
, addr2
, tpp
);
668 target_get_trace_state_variable_value (int tsv
, LONGEST
*val
)
670 target_ops
*target
= current_inferior ()->top_target ();
672 return target
->get_trace_state_variable_value (tsv
, val
);
676 target_save_trace_data (const char *filename
)
678 return current_inferior ()->top_target ()->save_trace_data (filename
);
682 target_upload_tracepoints (uploaded_tp
**utpp
)
684 return current_inferior ()->top_target ()->upload_tracepoints (utpp
);
688 target_upload_trace_state_variables (uploaded_tsv
**utsvp
)
690 target_ops
*target
= current_inferior ()->top_target ();
692 return target
->upload_trace_state_variables (utsvp
);
696 target_get_raw_trace_data (gdb_byte
*buf
, ULONGEST offset
, LONGEST len
)
698 target_ops
*target
= current_inferior ()->top_target ();
700 return target
->get_raw_trace_data (buf
, offset
, len
);
704 target_get_min_fast_tracepoint_insn_len ()
706 target_ops
*target
= current_inferior ()->top_target ();
708 return target
->get_min_fast_tracepoint_insn_len ();
712 target_set_disconnected_tracing (int val
)
714 return current_inferior ()->top_target ()->set_disconnected_tracing (val
);
718 target_set_circular_trace_buffer (int val
)
720 return current_inferior ()->top_target ()->set_circular_trace_buffer (val
);
724 target_set_trace_buffer_size (LONGEST val
)
726 return current_inferior ()->top_target ()->set_trace_buffer_size (val
);
730 target_set_trace_notes (const char *user
, const char *notes
,
731 const char *stopnotes
)
733 target_ops
*target
= current_inferior ()->top_target ();
735 return target
->set_trace_notes (user
, notes
, stopnotes
);
739 target_get_tib_address (ptid_t ptid
, CORE_ADDR
*addr
)
741 return current_inferior ()->top_target ()->get_tib_address (ptid
, addr
);
745 target_set_permissions ()
747 return current_inferior ()->top_target ()->set_permissions ();
751 target_static_tracepoint_marker_at (CORE_ADDR addr
,
752 static_tracepoint_marker
*marker
)
754 target_ops
*target
= current_inferior ()->top_target ();
756 return target
->static_tracepoint_marker_at (addr
, marker
);
759 std::vector
<static_tracepoint_marker
>
760 target_static_tracepoint_markers_by_strid (const char *marker_id
)
762 target_ops
*target
= current_inferior ()->top_target ();
764 return target
->static_tracepoint_markers_by_strid (marker_id
);
768 target_traceframe_info ()
770 return current_inferior ()->top_target ()->traceframe_info ();
774 target_use_agent (bool use
)
776 return current_inferior ()->top_target ()->use_agent (use
);
780 target_can_use_agent ()
782 return current_inferior ()->top_target ()->can_use_agent ();
786 target_augmented_libraries_svr4_read ()
788 return current_inferior ()->top_target ()->augmented_libraries_svr4_read ();
792 target_supports_memory_tagging ()
794 return current_inferior ()->top_target ()->supports_memory_tagging ();
798 target_fetch_memtags (CORE_ADDR address
, size_t len
, gdb::byte_vector
&tags
,
801 return current_inferior ()->top_target ()->fetch_memtags (address
, len
, tags
, type
);
805 target_store_memtags (CORE_ADDR address
, size_t len
,
806 const gdb::byte_vector
&tags
, int type
)
808 return current_inferior ()->top_target ()->store_memtags (address
, len
, tags
, type
);
812 target_is_address_tagged (gdbarch
*gdbarch
, CORE_ADDR address
)
814 return current_inferior ()->top_target ()->is_address_tagged (gdbarch
, address
);
818 target_fetch_x86_xsave_layout ()
820 return current_inferior ()->top_target ()->fetch_x86_xsave_layout ();
824 target_log_command (const char *p
)
826 return current_inferior ()->top_target ()->log_command (p
);
829 /* This is used to implement the various target commands. */
832 open_target (const char *args
, int from_tty
, struct cmd_list_element
*command
)
834 auto *ti
= static_cast<target_info
*> (command
->context ());
835 target_open_ftype
*func
= target_factories
[ti
];
837 target_debug_printf_nofunc ("-> %s->open (...)", ti
->shortname
);
838 func (args
, from_tty
);
839 target_debug_printf_nofunc ("<- %s->open (%s, %d)", ti
->shortname
, args
, from_tty
);
845 add_target (const target_info
&t
, target_open_ftype
*func
,
846 completer_ftype
*completer
)
848 struct cmd_list_element
*c
;
850 auto &func_slot
= target_factories
[&t
];
851 if (func_slot
!= nullptr)
852 internal_error (_("target already added (\"%s\")."), t
.shortname
);
855 if (targetlist
== NULL
)
856 add_basic_prefix_cmd ("target", class_run
, _("\
857 Connect to a target machine or process.\n\
858 The first argument is the type or protocol of the target machine.\n\
859 Remaining arguments are interpreted by the target protocol. For more\n\
860 information on the arguments for a particular protocol, type\n\
861 `help target ' followed by the protocol name."),
862 &targetlist
, 0, &cmdlist
);
863 c
= add_cmd (t
.shortname
, no_class
, t
.doc
, &targetlist
);
864 c
->set_context ((void *) &t
);
865 c
->func
= open_target
;
866 if (completer
!= NULL
)
867 set_cmd_completer (c
, completer
);
873 add_deprecated_target_alias (const target_info
&tinfo
, const char *alias
)
875 struct cmd_list_element
*c
;
877 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
879 c
= add_cmd (alias
, no_class
, tinfo
.doc
, &targetlist
);
880 c
->func
= open_target
;
881 c
->set_context ((void *) &tinfo
);
882 gdb::unique_xmalloc_ptr
<char> alt
883 = xstrprintf ("target %s", tinfo
.shortname
);
884 deprecate_cmd (c
, alt
.release ());
893 /* If the commit_resume_state of the to-be-killed-inferior's process stratum
894 is true, and this inferior is the last live inferior with resumed threads
895 of that target, then we want to leave commit_resume_state to false, as the
896 target won't have any resumed threads anymore. We achieve this with
897 this scoped_disable_commit_resumed. On construction, it will set the flag
898 to false. On destruction, it will only set it to true if there are resumed
900 scoped_disable_commit_resumed
disable ("killing");
901 current_inferior ()->top_target ()->kill ();
905 target_load (const char *arg
, int from_tty
)
907 target_dcache_invalidate (current_program_space
->aspace
);
908 current_inferior ()->top_target ()->load (arg
, from_tty
);
913 target_terminal_state
target_terminal::m_terminal_state
914 = target_terminal_state::is_ours
;
916 /* See target/target.h. */
919 target_terminal::init (void)
921 current_inferior ()->top_target ()->terminal_init ();
923 m_terminal_state
= target_terminal_state::is_ours
;
926 /* See target/target.h. */
929 target_terminal::inferior (void)
931 struct ui
*ui
= current_ui
;
933 /* A background resume (``run&'') should leave GDB in control of the
935 if (ui
->prompt_state
!= PROMPT_BLOCKED
)
938 /* Since we always run the inferior in the main console (unless "set
939 inferior-tty" is in effect), when some UI other than the main one
940 calls target_terminal::inferior, then we leave the main UI's
941 terminal settings as is. */
945 /* If GDB is resuming the inferior in the foreground, install
946 inferior's terminal modes. */
948 struct inferior
*inf
= current_inferior ();
950 if (inf
->terminal_state
!= target_terminal_state::is_inferior
)
952 current_inferior ()->top_target ()->terminal_inferior ();
953 inf
->terminal_state
= target_terminal_state::is_inferior
;
956 m_terminal_state
= target_terminal_state::is_inferior
;
958 /* If the user hit C-c before, pretend that it was hit right
960 if (check_quit_flag ())
961 target_pass_ctrlc ();
964 /* See target/target.h. */
967 target_terminal::restore_inferior (void)
969 struct ui
*ui
= current_ui
;
971 /* See target_terminal::inferior(). */
972 if (ui
->prompt_state
!= PROMPT_BLOCKED
|| ui
!= main_ui
)
975 /* Restore the terminal settings of inferiors that were in the
976 foreground but are now ours_for_output due to a temporary
977 target_target::ours_for_output() call. */
980 scoped_restore_current_inferior restore_inferior
;
982 for (::inferior
*inf
: all_inferiors ())
984 if (inf
->terminal_state
== target_terminal_state::is_ours_for_output
)
986 set_current_inferior (inf
);
987 current_inferior ()->top_target ()->terminal_inferior ();
988 inf
->terminal_state
= target_terminal_state::is_inferior
;
993 m_terminal_state
= target_terminal_state::is_inferior
;
995 /* If the user hit C-c before, pretend that it was hit right
997 if (check_quit_flag ())
998 target_pass_ctrlc ();
1001 /* Switch terminal state to DESIRED_STATE, either is_ours, or
1002 is_ours_for_output. */
1005 target_terminal_is_ours_kind (target_terminal_state desired_state
)
1007 scoped_restore_current_inferior restore_inferior
;
1009 /* Must do this in two passes. First, have all inferiors save the
1010 current terminal settings. Then, after all inferiors have add a
1011 chance to safely save the terminal settings, restore GDB's
1012 terminal settings. */
1014 for (inferior
*inf
: all_inferiors ())
1016 if (inf
->terminal_state
== target_terminal_state::is_inferior
)
1018 set_current_inferior (inf
);
1019 current_inferior ()->top_target ()->terminal_save_inferior ();
1023 for (inferior
*inf
: all_inferiors ())
1025 /* Note we don't check is_inferior here like above because we
1026 need to handle 'is_ours_for_output -> is_ours' too. Careful
1027 to never transition from 'is_ours' to 'is_ours_for_output',
1029 if (inf
->terminal_state
!= target_terminal_state::is_ours
1030 && inf
->terminal_state
!= desired_state
)
1032 set_current_inferior (inf
);
1033 if (desired_state
== target_terminal_state::is_ours
)
1034 current_inferior ()->top_target ()->terminal_ours ();
1035 else if (desired_state
== target_terminal_state::is_ours_for_output
)
1036 current_inferior ()->top_target ()->terminal_ours_for_output ();
1038 gdb_assert_not_reached ("unhandled desired state");
1039 inf
->terminal_state
= desired_state
;
1044 /* See target/target.h. */
1047 target_terminal::ours ()
1049 struct ui
*ui
= current_ui
;
1051 /* See target_terminal::inferior. */
1055 if (m_terminal_state
== target_terminal_state::is_ours
)
1058 target_terminal_is_ours_kind (target_terminal_state::is_ours
);
1059 m_terminal_state
= target_terminal_state::is_ours
;
1062 /* See target/target.h. */
1065 target_terminal::ours_for_output ()
1067 struct ui
*ui
= current_ui
;
1069 /* See target_terminal::inferior. */
1073 if (!target_terminal::is_inferior ())
1076 target_terminal_is_ours_kind (target_terminal_state::is_ours_for_output
);
1077 target_terminal::m_terminal_state
= target_terminal_state::is_ours_for_output
;
1080 /* See target/target.h. */
1083 target_terminal::info (const char *arg
, int from_tty
)
1085 current_inferior ()->top_target ()->terminal_info (arg
, from_tty
);
1091 target_supports_terminal_ours (void)
1093 /* The current top target is the target at the top of the target
1094 stack of the current inferior. While normally there's always an
1095 inferior, we must check for nullptr here because we can get here
1096 very early during startup, before the initial inferior is first
1098 inferior
*inf
= current_inferior ();
1102 return inf
->top_target ()->supports_terminal_ours ();
1108 error (_("You can't do that when your target is `%s'"),
1109 current_inferior ()->top_target ()->shortname ());
1115 error (_("You can't do that without a process to debug."));
1119 default_terminal_info (struct target_ops
*self
, const char *args
, int from_tty
)
1121 gdb_printf (_("No saved terminal information.\n"));
1124 /* A default implementation for the to_get_ada_task_ptid target method.
1126 This function builds the PTID by using both LWP and TID as part of
1127 the PTID lwp and tid elements. The pid used is the pid of the
1131 default_get_ada_task_ptid (struct target_ops
*self
, long lwp
, ULONGEST tid
)
1133 return ptid_t (inferior_ptid
.pid (), lwp
, tid
);
1136 static enum exec_direction_kind
1137 default_execution_direction (struct target_ops
*self
)
1139 if (!target_can_execute_reverse ())
1140 return EXEC_FORWARD
;
1141 else if (!target_can_async_p ())
1142 return EXEC_FORWARD
;
1144 gdb_assert_not_reached ("\
1145 to_execution_direction must be implemented for reverse async");
1151 target_ops_ref_policy::decref (target_ops
*t
)
1154 if (t
->refcount () == 0)
1156 if (t
->stratum () == process_stratum
)
1157 connection_list_remove (as_process_stratum_target (t
));
1159 for (inferior
*inf
: all_inferiors ())
1160 gdb_assert (!inf
->target_is_pushed (t
));
1162 fileio_handles_invalidate_target (t
);
1166 target_debug_printf_nofunc ("closing target");
1173 target_stack::push (target_ops
*t
)
1175 /* We must create a new reference first. It is possible that T is
1176 already pushed on this target stack, in which case we will first
1177 unpush it below, before re-pushing it. If we don't increment the
1178 reference count now, then when we unpush it, we might end up deleting
1179 T, which is not good. */
1180 auto ref
= target_ops_ref::new_reference (t
);
1182 strata stratum
= t
->stratum ();
1184 /* If there's already a target at this stratum, remove it. */
1186 if (m_stack
[stratum
].get () != nullptr)
1187 unpush (m_stack
[stratum
].get ());
1189 /* Now add the new one. */
1190 m_stack
[stratum
] = std::move (ref
);
1192 if (m_top
< stratum
)
1195 if (stratum
== process_stratum
)
1196 connection_list_add (as_process_stratum_target (t
));
1202 target_stack::unpush (target_ops
*t
)
1204 gdb_assert (t
!= NULL
);
1206 strata stratum
= t
->stratum ();
1208 if (stratum
== dummy_stratum
)
1209 internal_error (_("Attempt to unpush the dummy target"));
1211 /* Look for the specified target. Note that a target can only occur
1212 once in the target stack. */
1214 if (m_stack
[stratum
] != t
)
1216 /* If T wasn't pushed, quit. Only open targets should be
1221 if (m_top
== stratum
)
1222 m_top
= this->find_beneath (t
)->stratum ();
1224 /* Move the target reference off the target stack, this sets the pointer
1225 held in m_stack to nullptr, and places the reference in ref. When
1226 ref goes out of scope its reference count will be decremented, which
1227 might cause the target to close.
1229 We have to do it this way, and not just set the value in m_stack to
1230 nullptr directly, because doing so would decrement the reference
1231 count first, which might close the target, and closing the target
1232 does a check that the target is not on any inferiors target_stack. */
1233 auto ref
= std::move (m_stack
[stratum
]);
1239 target_unpusher::operator() (struct target_ops
*ops
) const
1241 current_inferior ()->unpush_target (ops
);
1244 /* Default implementation of to_get_thread_local_address. */
1247 generic_tls_error (void)
1249 throw_error (TLS_GENERIC_ERROR
,
1250 _("Cannot find thread-local variables on this target"));
1253 /* Using the objfile specified in OBJFILE, find the address for the
1254 current thread's thread-local storage with offset OFFSET. */
1256 target_translate_tls_address (struct objfile
*objfile
, CORE_ADDR offset
)
1258 volatile CORE_ADDR addr
= 0;
1259 struct target_ops
*target
= current_inferior ()->top_target ();
1260 gdbarch
*gdbarch
= current_inferior ()->arch ();
1262 /* If OBJFILE is a separate debug object file, look for the
1263 original object file. */
1264 if (objfile
->separate_debug_objfile_backlink
!= NULL
)
1265 objfile
= objfile
->separate_debug_objfile_backlink
;
1267 if (gdbarch_fetch_tls_load_module_address_p (gdbarch
))
1269 ptid_t ptid
= inferior_ptid
;
1275 /* Fetch the load module address for this objfile. */
1276 lm_addr
= gdbarch_fetch_tls_load_module_address (gdbarch
,
1279 if (gdbarch_get_thread_local_address_p (gdbarch
))
1280 addr
= gdbarch_get_thread_local_address (gdbarch
, ptid
, lm_addr
,
1283 addr
= target
->get_thread_local_address (ptid
, lm_addr
, offset
);
1285 /* If an error occurred, print TLS related messages here. Otherwise,
1286 throw the error to some higher catcher. */
1287 catch (const gdb_exception
&ex
)
1289 int objfile_is_library
= (objfile
->flags
& OBJF_SHARED
);
1293 case TLS_NO_LIBRARY_SUPPORT_ERROR
:
1294 error (_("Cannot find thread-local variables "
1295 "in this thread library."));
1297 case TLS_LOAD_MODULE_NOT_FOUND_ERROR
:
1298 if (objfile_is_library
)
1299 error (_("Cannot find shared library `%s' in dynamic"
1300 " linker's load module list"), objfile_name (objfile
));
1302 error (_("Cannot find executable file `%s' in dynamic"
1303 " linker's load module list"), objfile_name (objfile
));
1305 case TLS_NOT_ALLOCATED_YET_ERROR
:
1306 if (objfile_is_library
)
1307 error (_("The inferior has not yet allocated storage for"
1308 " thread-local variables in\n"
1309 "the shared library `%s'\n"
1311 objfile_name (objfile
),
1312 target_pid_to_str (ptid
).c_str ());
1314 error (_("The inferior has not yet allocated storage for"
1315 " thread-local variables in\n"
1316 "the executable `%s'\n"
1318 objfile_name (objfile
),
1319 target_pid_to_str (ptid
).c_str ());
1321 case TLS_GENERIC_ERROR
:
1322 if (objfile_is_library
)
1323 error (_("Cannot find thread-local storage for %s, "
1324 "shared library %s:\n%s"),
1325 target_pid_to_str (ptid
).c_str (),
1326 objfile_name (objfile
), ex
.what ());
1328 error (_("Cannot find thread-local storage for %s, "
1329 "executable file %s:\n%s"),
1330 target_pid_to_str (ptid
).c_str (),
1331 objfile_name (objfile
), ex
.what ());
1340 error (_("Cannot find thread-local variables on this target"));
1346 target_xfer_status_to_string (enum target_xfer_status status
)
1348 #define CASE(X) case X: return #X
1351 CASE(TARGET_XFER_E_IO
);
1352 CASE(TARGET_XFER_UNAVAILABLE
);
1360 const std::vector
<target_section
> *
1361 target_get_section_table (struct target_ops
*target
)
1363 return target
->get_section_table ();
1366 /* Find a section containing ADDR. */
1368 const struct target_section
*
1369 target_section_by_addr (struct target_ops
*target
, CORE_ADDR addr
)
1371 const std::vector
<target_section
> *table
= target_get_section_table (target
);
1376 for (const target_section
&secp
: *table
)
1378 if (addr
>= secp
.addr
&& addr
< secp
.endaddr
)
1386 const std::vector
<target_section
> *
1387 default_get_section_table ()
1389 return ¤t_program_space
->target_sections ();
1392 /* Helper for the memory xfer routines. Checks the attributes of the
1393 memory region of MEMADDR against the read or write being attempted.
1394 If the access is permitted returns true, otherwise returns false.
1395 REGION_P is an optional output parameter. If not-NULL, it is
1396 filled with a pointer to the memory region of MEMADDR. REG_LEN
1397 returns LEN trimmed to the end of the region. This is how much the
1398 caller can continue requesting, if the access is permitted. A
1399 single xfer request must not straddle memory region boundaries. */
1402 memory_xfer_check_region (gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1403 ULONGEST memaddr
, ULONGEST len
, ULONGEST
*reg_len
,
1404 struct mem_region
**region_p
)
1406 struct mem_region
*region
;
1408 region
= lookup_mem_region (memaddr
);
1410 if (region_p
!= NULL
)
1413 switch (region
->attrib
.mode
)
1416 if (writebuf
!= NULL
)
1421 if (readbuf
!= NULL
)
1426 /* We only support writing to flash during "load" for now. */
1427 if (writebuf
!= NULL
)
1428 error (_("Writing to flash memory forbidden in this context"));
1435 /* region->hi == 0 means there's no upper bound. */
1436 if (memaddr
+ len
< region
->hi
|| region
->hi
== 0)
1439 *reg_len
= region
->hi
- memaddr
;
1444 /* Read memory from more than one valid target. A core file, for
1445 instance, could have some of memory but delegate other bits to
1446 the target below it. So, we must manually try all targets. */
1448 enum target_xfer_status
1449 raw_memory_xfer_partial (struct target_ops
*ops
, gdb_byte
*readbuf
,
1450 const gdb_byte
*writebuf
, ULONGEST memaddr
, LONGEST len
,
1451 ULONGEST
*xfered_len
)
1453 enum target_xfer_status res
;
1457 res
= ops
->xfer_partial (TARGET_OBJECT_MEMORY
, NULL
,
1458 readbuf
, writebuf
, memaddr
, len
,
1460 if (res
== TARGET_XFER_OK
)
1463 /* Stop if the target reports that the memory is not available. */
1464 if (res
== TARGET_XFER_UNAVAILABLE
)
1467 /* Don't continue past targets which have all the memory.
1468 At one time, this code was necessary to read data from
1469 executables / shared libraries when data for the requested
1470 addresses weren't available in the core file. But now the
1471 core target handles this case itself. */
1472 if (ops
->has_all_memory ())
1475 ops
= ops
->beneath ();
1477 while (ops
!= NULL
);
1479 /* The cache works at the raw memory level. Make sure the cache
1480 gets updated with raw contents no matter what kind of memory
1481 object was originally being written. Note we do write-through
1482 first, so that if it fails, we don't write to the cache contents
1483 that never made it to the target. */
1484 if (writebuf
!= NULL
1485 && inferior_ptid
!= null_ptid
1486 && target_dcache_init_p (current_program_space
->aspace
)
1487 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
1489 DCACHE
*dcache
= target_dcache_get (current_program_space
->aspace
);
1491 /* Note that writing to an area of memory which wasn't present
1492 in the cache doesn't cause it to be loaded in. */
1493 dcache_update (dcache
, res
, memaddr
, writebuf
, *xfered_len
);
1499 /* Perform a partial memory transfer.
1500 For docs see target.h, to_xfer_partial. */
1502 static enum target_xfer_status
1503 memory_xfer_partial_1 (struct target_ops
*ops
, enum target_object object
,
1504 gdb_byte
*readbuf
, const gdb_byte
*writebuf
, ULONGEST memaddr
,
1505 ULONGEST len
, ULONGEST
*xfered_len
)
1507 enum target_xfer_status res
;
1509 struct mem_region
*region
;
1510 struct inferior
*inf
;
1512 /* For accesses to unmapped overlay sections, read directly from
1513 files. Must do this first, as MEMADDR may need adjustment. */
1514 if (readbuf
!= NULL
&& overlay_debugging
)
1516 struct obj_section
*section
= find_pc_overlay (memaddr
);
1518 if (pc_in_unmapped_range (memaddr
, section
))
1520 const std::vector
<target_section
> *table
= target_get_section_table (ops
);
1521 const char *section_name
= section
->the_bfd_section
->name
;
1523 memaddr
= overlay_mapped_address (memaddr
, section
);
1525 auto match_cb
= [=] (const struct target_section
*s
)
1527 return (strcmp (section_name
, s
->the_bfd_section
->name
) == 0);
1530 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1531 memaddr
, len
, xfered_len
,
1536 /* Try the executable files, if "trust-readonly-sections" is set. */
1537 if (readbuf
!= NULL
&& trust_readonly
)
1539 const struct target_section
*secp
1540 = target_section_by_addr (ops
, memaddr
);
1542 && (bfd_section_flags (secp
->the_bfd_section
) & SEC_READONLY
))
1544 const std::vector
<target_section
> *table
= target_get_section_table (ops
);
1545 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1546 memaddr
, len
, xfered_len
,
1551 /* Try GDB's internal data cache. */
1553 if (!memory_xfer_check_region (readbuf
, writebuf
, memaddr
, len
, ®_len
,
1555 return TARGET_XFER_E_IO
;
1557 if (inferior_ptid
!= null_ptid
)
1558 inf
= current_inferior ();
1564 /* The dcache reads whole cache lines; that doesn't play well
1565 with reading from a trace buffer, because reading outside of
1566 the collected memory range fails. */
1567 && get_traceframe_number () == -1
1568 && (region
->attrib
.cache
1569 || (stack_cache_enabled_p () && object
== TARGET_OBJECT_STACK_MEMORY
)
1570 || (code_cache_enabled_p () && object
== TARGET_OBJECT_CODE_MEMORY
)))
1573 = target_dcache_get_or_init (current_program_space
->aspace
);
1575 return dcache_read_memory_partial (ops
, dcache
, memaddr
, readbuf
,
1576 reg_len
, xfered_len
);
1579 /* If none of those methods found the memory we wanted, fall back
1580 to a target partial transfer. Normally a single call to
1581 to_xfer_partial is enough; if it doesn't recognize an object
1582 it will call the to_xfer_partial of the next target down.
1583 But for memory this won't do. Memory is the only target
1584 object which can be read from more than one valid target.
1585 A core file, for instance, could have some of memory but
1586 delegate other bits to the target below it. So, we must
1587 manually try all targets. */
1589 res
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, memaddr
, reg_len
,
1592 /* If we still haven't got anything, return the last error. We
1597 /* Perform a partial memory transfer. For docs see target.h,
1600 static enum target_xfer_status
1601 memory_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1602 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1603 ULONGEST memaddr
, ULONGEST len
, ULONGEST
*xfered_len
)
1605 enum target_xfer_status res
;
1607 /* Zero length requests are ok and require no work. */
1609 return TARGET_XFER_EOF
;
1611 memaddr
= gdbarch_remove_non_address_bits (current_inferior ()->arch (),
1614 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1615 breakpoint insns, thus hiding out from higher layers whether
1616 there are software breakpoints inserted in the code stream. */
1617 if (readbuf
!= NULL
)
1619 res
= memory_xfer_partial_1 (ops
, object
, readbuf
, NULL
, memaddr
, len
,
1622 if (res
== TARGET_XFER_OK
&& !show_memory_breakpoints
)
1623 breakpoint_xfer_memory (readbuf
, NULL
, NULL
, memaddr
, *xfered_len
);
1627 /* A large write request is likely to be partially satisfied
1628 by memory_xfer_partial_1. We will continually malloc
1629 and free a copy of the entire write request for breakpoint
1630 shadow handling even though we only end up writing a small
1631 subset of it. Cap writes to a limit specified by the target
1632 to mitigate this. */
1633 len
= std::min (ops
->get_memory_xfer_limit (), len
);
1635 gdb::byte_vector
buf (writebuf
, writebuf
+ len
);
1636 breakpoint_xfer_memory (NULL
, buf
.data (), writebuf
, memaddr
, len
);
1637 res
= memory_xfer_partial_1 (ops
, object
, NULL
, buf
.data (), memaddr
, len
,
1644 scoped_restore_tmpl
<int>
1645 make_scoped_restore_show_memory_breakpoints (int show
)
1647 return make_scoped_restore (&show_memory_breakpoints
, show
);
1650 /* For docs see target.h, to_xfer_partial. */
1652 enum target_xfer_status
1653 target_xfer_partial (struct target_ops
*ops
,
1654 enum target_object object
, const char *annex
,
1655 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1656 ULONGEST offset
, ULONGEST len
,
1657 ULONGEST
*xfered_len
)
1659 enum target_xfer_status retval
;
1661 /* Transfer is done when LEN is zero. */
1663 return TARGET_XFER_EOF
;
1665 if (writebuf
&& !may_write_memory
)
1666 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1667 core_addr_to_string_nz (offset
), plongest (len
));
1671 /* If this is a memory transfer, let the memory-specific code
1672 have a look at it instead. Memory transfers are more
1674 if (object
== TARGET_OBJECT_MEMORY
|| object
== TARGET_OBJECT_STACK_MEMORY
1675 || object
== TARGET_OBJECT_CODE_MEMORY
)
1676 retval
= memory_xfer_partial (ops
, object
, readbuf
,
1677 writebuf
, offset
, len
, xfered_len
);
1678 else if (object
== TARGET_OBJECT_RAW_MEMORY
)
1680 /* Skip/avoid accessing the target if the memory region
1681 attributes block the access. Check this here instead of in
1682 raw_memory_xfer_partial as otherwise we'd end up checking
1683 this twice in the case of the memory_xfer_partial path is
1684 taken; once before checking the dcache, and another in the
1685 tail call to raw_memory_xfer_partial. */
1686 if (!memory_xfer_check_region (readbuf
, writebuf
, offset
, len
, &len
,
1688 return TARGET_XFER_E_IO
;
1690 /* Request the normal memory object from other layers. */
1691 retval
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, offset
, len
,
1695 retval
= ops
->xfer_partial (object
, annex
, readbuf
,
1696 writebuf
, offset
, len
, xfered_len
);
1700 const unsigned char *myaddr
= NULL
;
1702 = string_printf ("%s:target_xfer_partial "
1703 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1704 ops
->shortname (), (int) object
,
1705 (annex
? annex
: "(null)"),
1706 host_address_to_string (readbuf
),
1707 host_address_to_string (writebuf
),
1708 core_addr_to_string_nz (offset
), pulongest (len
),
1709 retval
, pulongest (*xfered_len
));
1715 if (retval
== TARGET_XFER_OK
&& myaddr
!= NULL
)
1719 string_appendf (s
, ", bytes =");
1720 for (i
= 0; i
< *xfered_len
; i
++)
1722 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
1724 if (targetdebug
< 2 && i
> 0)
1726 string_appendf (s
, " ...");
1730 target_debug_printf_nofunc ("%s", s
.c_str ());
1734 string_appendf (s
, " %02x", myaddr
[i
] & 0xff);
1738 target_debug_printf_nofunc ("%s", s
.c_str ());
1741 /* Check implementations of to_xfer_partial update *XFERED_LEN
1742 properly. Do assertion after printing debug messages, so that we
1743 can find more clues on assertion failure from debugging messages. */
1744 if (retval
== TARGET_XFER_OK
|| retval
== TARGET_XFER_UNAVAILABLE
)
1745 gdb_assert (*xfered_len
> 0);
1750 /* Read LEN bytes of target memory at address MEMADDR, placing the
1751 results in GDB's memory at MYADDR. Returns either 0 for success or
1752 -1 if any error occurs.
1754 If an error occurs, no guarantee is made about the contents of the data at
1755 MYADDR. In particular, the caller should not depend upon partial reads
1756 filling the buffer with good data. There is no way for the caller to know
1757 how much good data might have been transferred anyway. Callers that can
1758 deal with partial reads should call target_read (which will retry until
1759 it makes no progress, and then return how much was transferred). */
1762 target_read_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1764 if (target_read (current_inferior ()->top_target (),
1765 TARGET_OBJECT_MEMORY
, NULL
,
1766 myaddr
, memaddr
, len
) == len
)
1772 /* See target/target.h. */
1775 target_read_uint32 (CORE_ADDR memaddr
, uint32_t *result
)
1780 r
= target_read_memory (memaddr
, buf
, sizeof buf
);
1783 *result
= extract_unsigned_integer
1785 gdbarch_byte_order (current_inferior ()->arch ()));
1789 /* Like target_read_memory, but specify explicitly that this is a read
1790 from the target's raw memory. That is, this read bypasses the
1791 dcache, breakpoint shadowing, etc. */
1794 target_read_raw_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1796 if (target_read (current_inferior ()->top_target (),
1797 TARGET_OBJECT_RAW_MEMORY
, NULL
,
1798 myaddr
, memaddr
, len
) == len
)
1804 /* Like target_read_memory, but specify explicitly that this is a read from
1805 the target's stack. This may trigger different cache behavior. */
1808 target_read_stack (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1810 if (target_read (current_inferior ()->top_target (),
1811 TARGET_OBJECT_STACK_MEMORY
, NULL
,
1812 myaddr
, memaddr
, len
) == len
)
1818 /* Like target_read_memory, but specify explicitly that this is a read from
1819 the target's code. This may trigger different cache behavior. */
1822 target_read_code (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1824 if (target_read (current_inferior ()->top_target (),
1825 TARGET_OBJECT_CODE_MEMORY
, NULL
,
1826 myaddr
, memaddr
, len
) == len
)
1832 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1833 Returns either 0 for success or -1 if any error occurs. If an
1834 error occurs, no guarantee is made about how much data got written.
1835 Callers that can deal with partial writes should call
1839 target_write_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1841 if (target_write (current_inferior ()->top_target (),
1842 TARGET_OBJECT_MEMORY
, NULL
,
1843 myaddr
, memaddr
, len
) == len
)
1849 /* Write LEN bytes from MYADDR to target raw memory at address
1850 MEMADDR. Returns either 0 for success or -1 if any error occurs.
1851 If an error occurs, no guarantee is made about how much data got
1852 written. Callers that can deal with partial writes should call
1856 target_write_raw_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1858 if (target_write (current_inferior ()->top_target (),
1859 TARGET_OBJECT_RAW_MEMORY
, NULL
,
1860 myaddr
, memaddr
, len
) == len
)
1866 /* Fetch the target's memory map. */
1868 std::vector
<mem_region
>
1869 target_memory_map (void)
1871 target_ops
*target
= current_inferior ()->top_target ();
1872 std::vector
<mem_region
> result
= target
->memory_map ();
1873 if (result
.empty ())
1876 std::sort (result
.begin (), result
.end ());
1878 /* Check that regions do not overlap. Simultaneously assign
1879 a numbering for the "mem" commands to use to refer to
1881 mem_region
*last_one
= NULL
;
1882 for (size_t ix
= 0; ix
< result
.size (); ix
++)
1884 mem_region
*this_one
= &result
[ix
];
1885 this_one
->number
= ix
;
1887 if (last_one
!= NULL
&& last_one
->hi
> this_one
->lo
)
1889 warning (_("Overlapping regions in memory map: ignoring"));
1890 return std::vector
<mem_region
> ();
1893 last_one
= this_one
;
1900 target_flash_erase (ULONGEST address
, LONGEST length
)
1902 current_inferior ()->top_target ()->flash_erase (address
, length
);
1906 target_flash_done (void)
1908 current_inferior ()->top_target ()->flash_done ();
1912 show_trust_readonly (struct ui_file
*file
, int from_tty
,
1913 struct cmd_list_element
*c
, const char *value
)
1916 _("Mode for reading from readonly sections is %s.\n"),
1920 /* Target vector read/write partial wrapper functions. */
1922 static enum target_xfer_status
1923 target_read_partial (struct target_ops
*ops
,
1924 enum target_object object
,
1925 const char *annex
, gdb_byte
*buf
,
1926 ULONGEST offset
, ULONGEST len
,
1927 ULONGEST
*xfered_len
)
1929 return target_xfer_partial (ops
, object
, annex
, buf
, NULL
, offset
, len
,
1933 static enum target_xfer_status
1934 target_write_partial (struct target_ops
*ops
,
1935 enum target_object object
,
1936 const char *annex
, const gdb_byte
*buf
,
1937 ULONGEST offset
, LONGEST len
, ULONGEST
*xfered_len
)
1939 return target_xfer_partial (ops
, object
, annex
, NULL
, buf
, offset
, len
,
1943 /* Wrappers to perform the full transfer. */
1945 /* For docs on target_read see target.h. */
1948 target_read (struct target_ops
*ops
,
1949 enum target_object object
,
1950 const char *annex
, gdb_byte
*buf
,
1951 ULONGEST offset
, LONGEST len
)
1953 LONGEST xfered_total
= 0;
1956 /* If we are reading from a memory object, find the length of an addressable
1957 unit for that architecture. */
1958 if (object
== TARGET_OBJECT_MEMORY
1959 || object
== TARGET_OBJECT_STACK_MEMORY
1960 || object
== TARGET_OBJECT_CODE_MEMORY
1961 || object
== TARGET_OBJECT_RAW_MEMORY
)
1962 unit_size
= gdbarch_addressable_memory_unit_size
1963 (current_inferior ()->arch ());
1965 while (xfered_total
< len
)
1967 ULONGEST xfered_partial
;
1968 enum target_xfer_status status
;
1970 status
= target_read_partial (ops
, object
, annex
,
1971 buf
+ xfered_total
* unit_size
,
1972 offset
+ xfered_total
, len
- xfered_total
,
1975 /* Call an observer, notifying them of the xfer progress? */
1976 if (status
== TARGET_XFER_EOF
)
1977 return xfered_total
;
1978 else if (status
== TARGET_XFER_OK
)
1980 xfered_total
+= xfered_partial
;
1984 return TARGET_XFER_E_IO
;
1990 /* Assuming that the entire [begin, end) range of memory cannot be
1991 read, try to read whatever subrange is possible to read.
1993 The function returns, in RESULT, either zero or one memory block.
1994 If there's a readable subrange at the beginning, it is completely
1995 read and returned. Any further readable subrange will not be read.
1996 Otherwise, if there's a readable subrange at the end, it will be
1997 completely read and returned. Any readable subranges before it
1998 (obviously, not starting at the beginning), will be ignored. In
1999 other cases -- either no readable subrange, or readable subrange(s)
2000 that is neither at the beginning, or end, nothing is returned.
2002 The purpose of this function is to handle a read across a boundary
2003 of accessible memory in a case when memory map is not available.
2004 The above restrictions are fine for this case, but will give
2005 incorrect results if the memory is 'patchy'. However, supporting
2006 'patchy' memory would require trying to read every single byte,
2007 and it seems unacceptable solution. Explicit memory map is
2008 recommended for this case -- and target_read_memory_robust will
2009 take care of reading multiple ranges then. */
2012 read_whatever_is_readable (struct target_ops
*ops
,
2013 const ULONGEST begin
, const ULONGEST end
,
2015 std::vector
<memory_read_result
> *result
)
2017 ULONGEST current_begin
= begin
;
2018 ULONGEST current_end
= end
;
2020 ULONGEST xfered_len
;
2022 /* If we previously failed to read 1 byte, nothing can be done here. */
2023 if (end
- begin
<= 1)
2026 gdb::unique_xmalloc_ptr
<gdb_byte
> buf ((gdb_byte
*) xmalloc (end
- begin
));
2028 /* Check that either first or the last byte is readable, and give up
2029 if not. This heuristic is meant to permit reading accessible memory
2030 at the boundary of accessible region. */
2031 if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2032 buf
.get (), begin
, 1, &xfered_len
) == TARGET_XFER_OK
)
2037 else if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2038 buf
.get () + (end
- begin
) - 1, end
- 1, 1,
2039 &xfered_len
) == TARGET_XFER_OK
)
2047 /* Loop invariant is that the [current_begin, current_end) was previously
2048 found to be not readable as a whole.
2050 Note loop condition -- if the range has 1 byte, we can't divide the range
2051 so there's no point trying further. */
2052 while (current_end
- current_begin
> 1)
2054 ULONGEST first_half_begin
, first_half_end
;
2055 ULONGEST second_half_begin
, second_half_end
;
2057 ULONGEST middle
= current_begin
+ (current_end
- current_begin
) / 2;
2061 first_half_begin
= current_begin
;
2062 first_half_end
= middle
;
2063 second_half_begin
= middle
;
2064 second_half_end
= current_end
;
2068 first_half_begin
= middle
;
2069 first_half_end
= current_end
;
2070 second_half_begin
= current_begin
;
2071 second_half_end
= middle
;
2074 xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2075 buf
.get () + (first_half_begin
- begin
) * unit_size
,
2077 first_half_end
- first_half_begin
);
2079 if (xfer
== first_half_end
- first_half_begin
)
2081 /* This half reads up fine. So, the error must be in the
2083 current_begin
= second_half_begin
;
2084 current_end
= second_half_end
;
2088 /* This half is not readable. Because we've tried one byte, we
2089 know some part of this half if actually readable. Go to the next
2090 iteration to divide again and try to read.
2092 We don't handle the other half, because this function only tries
2093 to read a single readable subrange. */
2094 current_begin
= first_half_begin
;
2095 current_end
= first_half_end
;
2101 /* The [begin, current_begin) range has been read. */
2102 result
->emplace_back (begin
, current_end
, std::move (buf
));
2106 /* The [current_end, end) range has been read. */
2107 LONGEST region_len
= end
- current_end
;
2109 gdb::unique_xmalloc_ptr
<gdb_byte
> data
2110 ((gdb_byte
*) xmalloc (region_len
* unit_size
));
2111 memcpy (data
.get (), buf
.get () + (current_end
- begin
) * unit_size
,
2112 region_len
* unit_size
);
2113 result
->emplace_back (current_end
, end
, std::move (data
));
2117 std::vector
<memory_read_result
>
2118 read_memory_robust (struct target_ops
*ops
,
2119 const ULONGEST offset
, const LONGEST len
)
2121 std::vector
<memory_read_result
> result
;
2123 = gdbarch_addressable_memory_unit_size (current_inferior ()->arch ());
2125 LONGEST xfered_total
= 0;
2126 while (xfered_total
< len
)
2128 struct mem_region
*region
= lookup_mem_region (offset
+ xfered_total
);
2131 /* If there is no explicit region, a fake one should be created. */
2132 gdb_assert (region
);
2134 if (region
->hi
== 0)
2135 region_len
= len
- xfered_total
;
2137 region_len
= region
->hi
- offset
;
2139 if (region
->attrib
.mode
== MEM_NONE
|| region
->attrib
.mode
== MEM_WO
)
2141 /* Cannot read this region. Note that we can end up here only
2142 if the region is explicitly marked inaccessible, or
2143 'inaccessible-by-default' is in effect. */
2144 xfered_total
+= region_len
;
2148 LONGEST to_read
= std::min (len
- xfered_total
, region_len
);
2149 gdb::unique_xmalloc_ptr
<gdb_byte
> buffer
2150 ((gdb_byte
*) xmalloc (to_read
* unit_size
));
2152 LONGEST xfered_partial
=
2153 target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
, buffer
.get (),
2154 offset
+ xfered_total
, to_read
);
2155 /* Call an observer, notifying them of the xfer progress? */
2156 if (xfered_partial
<= 0)
2158 /* Got an error reading full chunk. See if maybe we can read
2160 read_whatever_is_readable (ops
, offset
+ xfered_total
,
2161 offset
+ xfered_total
+ to_read
,
2162 unit_size
, &result
);
2163 xfered_total
+= to_read
;
2167 result
.emplace_back (offset
+ xfered_total
,
2168 offset
+ xfered_total
+ xfered_partial
,
2169 std::move (buffer
));
2170 xfered_total
+= xfered_partial
;
2180 /* An alternative to target_write with progress callbacks. */
2183 target_write_with_progress (struct target_ops
*ops
,
2184 enum target_object object
,
2185 const char *annex
, const gdb_byte
*buf
,
2186 ULONGEST offset
, LONGEST len
,
2187 void (*progress
) (ULONGEST
, void *), void *baton
)
2189 LONGEST xfered_total
= 0;
2192 /* If we are writing to a memory object, find the length of an addressable
2193 unit for that architecture. */
2194 if (object
== TARGET_OBJECT_MEMORY
2195 || object
== TARGET_OBJECT_STACK_MEMORY
2196 || object
== TARGET_OBJECT_CODE_MEMORY
2197 || object
== TARGET_OBJECT_RAW_MEMORY
)
2198 unit_size
= gdbarch_addressable_memory_unit_size
2199 (current_inferior ()->arch ());
2201 /* Give the progress callback a chance to set up. */
2203 (*progress
) (0, baton
);
2205 while (xfered_total
< len
)
2207 ULONGEST xfered_partial
;
2208 enum target_xfer_status status
;
2210 status
= target_write_partial (ops
, object
, annex
,
2211 buf
+ xfered_total
* unit_size
,
2212 offset
+ xfered_total
, len
- xfered_total
,
2215 if (status
!= TARGET_XFER_OK
)
2216 return status
== TARGET_XFER_EOF
? xfered_total
: TARGET_XFER_E_IO
;
2219 (*progress
) (xfered_partial
, baton
);
2221 xfered_total
+= xfered_partial
;
2227 /* For docs on target_write see target.h. */
2230 target_write (struct target_ops
*ops
,
2231 enum target_object object
,
2232 const char *annex
, const gdb_byte
*buf
,
2233 ULONGEST offset
, LONGEST len
)
2235 return target_write_with_progress (ops
, object
, annex
, buf
, offset
, len
,
2239 /* Help for target_read_alloc and target_read_stralloc. See their comments
2242 template <typename T
>
2243 std::optional
<gdb::def_vector
<T
>>
2244 target_read_alloc_1 (struct target_ops
*ops
, enum target_object object
,
2247 gdb::def_vector
<T
> buf
;
2249 const int chunk
= 4096;
2251 /* This function does not have a length parameter; it reads the
2252 entire OBJECT). Also, it doesn't support objects fetched partly
2253 from one target and partly from another (in a different stratum,
2254 e.g. a core file and an executable). Both reasons make it
2255 unsuitable for reading memory. */
2256 gdb_assert (object
!= TARGET_OBJECT_MEMORY
);
2258 /* Start by reading up to 4K at a time. The target will throttle
2259 this number down if necessary. */
2262 ULONGEST xfered_len
;
2263 enum target_xfer_status status
;
2265 buf
.resize (buf_pos
+ chunk
);
2267 status
= target_read_partial (ops
, object
, annex
,
2268 (gdb_byte
*) &buf
[buf_pos
],
2272 if (status
== TARGET_XFER_EOF
)
2274 /* Read all there was. */
2275 buf
.resize (buf_pos
);
2278 else if (status
!= TARGET_XFER_OK
)
2280 /* An error occurred. */
2284 buf_pos
+= xfered_len
;
2292 std::optional
<gdb::byte_vector
>
2293 target_read_alloc (struct target_ops
*ops
, enum target_object object
,
2296 return target_read_alloc_1
<gdb_byte
> (ops
, object
, annex
);
2301 std::optional
<gdb::char_vector
>
2302 target_read_stralloc (struct target_ops
*ops
, enum target_object object
,
2305 std::optional
<gdb::char_vector
> buf
2306 = target_read_alloc_1
<char> (ops
, object
, annex
);
2311 if (buf
->empty () || buf
->back () != '\0')
2312 buf
->push_back ('\0');
2314 /* Check for embedded NUL bytes; but allow trailing NULs. */
2315 for (auto it
= std::find (buf
->begin (), buf
->end (), '\0');
2316 it
!= buf
->end (); it
++)
2319 warning (_("target object %d, annex %s, "
2320 "contained unexpected null characters"),
2321 (int) object
, annex
? annex
: "(none)");
2328 /* Memory transfer methods. */
2331 get_target_memory (struct target_ops
*ops
, CORE_ADDR addr
, gdb_byte
*buf
,
2334 /* This method is used to read from an alternate, non-current
2335 target. This read must bypass the overlay support (as symbols
2336 don't match this target), and GDB's internal cache (wrong cache
2337 for this target). */
2338 if (target_read (ops
, TARGET_OBJECT_RAW_MEMORY
, NULL
, buf
, addr
, len
)
2340 memory_error (TARGET_XFER_E_IO
, addr
);
2344 get_target_memory_unsigned (struct target_ops
*ops
, CORE_ADDR addr
,
2345 int len
, enum bfd_endian byte_order
)
2347 gdb_byte buf
[sizeof (ULONGEST
)];
2349 gdb_assert (len
<= sizeof (buf
));
2350 get_target_memory (ops
, addr
, buf
, len
);
2351 return extract_unsigned_integer (buf
, len
, byte_order
);
2357 target_insert_breakpoint (struct gdbarch
*gdbarch
,
2358 struct bp_target_info
*bp_tgt
)
2360 if (!may_insert_breakpoints
)
2362 warning (_("May not insert breakpoints"));
2366 target_ops
*target
= current_inferior ()->top_target ();
2368 return target
->insert_breakpoint (gdbarch
, bp_tgt
);
2374 target_remove_breakpoint (struct gdbarch
*gdbarch
,
2375 struct bp_target_info
*bp_tgt
,
2376 enum remove_bp_reason reason
)
2378 /* This is kind of a weird case to handle, but the permission might
2379 have been changed after breakpoints were inserted - in which case
2380 we should just take the user literally and assume that any
2381 breakpoints should be left in place. */
2382 if (!may_insert_breakpoints
)
2384 warning (_("May not remove breakpoints"));
2388 target_ops
*target
= current_inferior ()->top_target ();
2390 return target
->remove_breakpoint (gdbarch
, bp_tgt
, reason
);
2394 info_target_command (const char *args
, int from_tty
)
2396 int has_all_mem
= 0;
2398 if (current_program_space
->symfile_object_file
!= NULL
)
2400 objfile
*objf
= current_program_space
->symfile_object_file
;
2401 gdb_printf (_("Symbols from \"%ps\".\n"),
2402 styled_string (file_name_style
.style (),
2403 objfile_name (objf
)));
2406 for (target_ops
*t
= current_inferior ()->top_target ();
2410 if (!t
->has_memory ())
2413 if ((int) (t
->stratum ()) <= (int) dummy_stratum
)
2416 gdb_printf (_("\tWhile running this, "
2417 "GDB does not access memory from...\n"));
2418 gdb_printf ("%s:\n", t
->longname ());
2420 has_all_mem
= t
->has_all_memory ();
2424 /* This function is called before any new inferior is created, e.g.
2425 by running a program, attaching, or connecting to a target.
2426 It cleans up any state from previous invocations which might
2427 change between runs. This is a subset of what target_preopen
2428 resets (things which might change between targets). */
2431 target_pre_inferior ()
2433 /* Clear out solib state. Otherwise the solib state of the previous
2434 inferior might have survived and is entirely wrong for the new
2435 target. This has been observed on GNU/Linux using glibc 2.3. How
2447 Cannot access memory at address 0xdeadbeef
2450 /* In some OSs, the shared library list is the same/global/shared
2451 across inferiors. If code is shared between processes, so are
2452 memory regions and features. */
2453 if (!gdbarch_has_global_solist (current_inferior ()->arch ()))
2455 no_shared_libraries (current_program_space
);
2457 invalidate_target_mem_regions ();
2459 target_clear_description ();
2462 /* attach_flag may be set if the previous process associated with
2463 the inferior was attached to. */
2464 current_inferior ()->attach_flag
= false;
2466 current_inferior ()->highest_thread_num
= 0;
2468 update_previous_thread ();
2470 agent_capability_invalidate ();
2473 /* This is to be called by the open routine before it does
2477 target_preopen (int from_tty
)
2481 if (current_inferior ()->pid
!= 0)
2484 || !target_has_execution ()
2485 || query (_("A program is being debugged already. Kill it? ")))
2487 /* Core inferiors actually should be detached, not
2489 if (target_has_execution ())
2492 target_detach (current_inferior (), 0);
2495 error (_("Program not killed."));
2498 /* Release reference to old previous thread. */
2499 update_previous_thread ();
2501 /* Calling target_kill may remove the target from the stack. But if
2502 it doesn't (which seems like a win for UDI), remove it now. */
2503 /* Leave the exec target, though. The user may be switching from a
2504 live process to a core of the same program. */
2505 current_inferior ()->pop_all_targets_above (file_stratum
);
2507 target_pre_inferior ();
2513 target_detach (inferior
*inf
, int from_tty
)
2515 /* Thread's don't need to be resumed until the end of this function. */
2516 scoped_disable_commit_resumed
disable_commit_resumed ("detaching");
2518 /* After we have detached, we will clear the register cache for this inferior
2519 by calling registers_changed_ptid. We must save the pid_ptid before
2520 detaching, as the target detach method will clear inf->pid. */
2521 ptid_t save_pid_ptid
= ptid_t (inf
->pid
);
2523 /* As long as some to_detach implementations rely on the current_inferior
2524 (either directly, or indirectly, like through reading memory), INF needs
2525 to be the current inferior. When that requirement will become no longer
2526 true, then we can remove this assertion. */
2527 gdb_assert (inf
== current_inferior ());
2529 prepare_for_detach ();
2531 gdb::observers::inferior_pre_detach
.notify (inf
);
2533 /* Hold a strong reference because detaching may unpush the
2535 auto proc_target_ref
= target_ops_ref::new_reference (inf
->process_target ());
2537 current_inferior ()->top_target ()->detach (inf
, from_tty
);
2539 process_stratum_target
*proc_target
2540 = as_process_stratum_target (proc_target_ref
.get ());
2542 registers_changed_ptid (proc_target
, save_pid_ptid
);
2544 /* We have to ensure we have no frame cache left. Normally,
2545 registers_changed_ptid (save_pid_ptid) calls reinit_frame_cache when
2546 inferior_ptid matches save_pid_ptid, but in our case, it does not
2547 call it, as inferior_ptid has been reset. */
2548 reinit_frame_cache ();
2550 disable_commit_resumed
.reset_and_commit ();
2554 target_disconnect (const char *args
, int from_tty
)
2556 /* If we're in breakpoints-always-inserted mode or if breakpoints
2557 are global across processes, we have to remove them before
2559 remove_breakpoints ();
2561 current_inferior ()->top_target ()->disconnect (args
, from_tty
);
2564 /* See target/target.h. */
2567 target_wait (ptid_t ptid
, struct target_waitstatus
*status
,
2568 target_wait_flags options
)
2570 target_ops
*target
= current_inferior ()->top_target ();
2571 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
2573 gdb_assert (!proc_target
->commit_resumed_state
);
2575 if (!target_can_async_p (target
))
2576 gdb_assert ((options
& TARGET_WNOHANG
) == 0);
2578 ptid_t event_ptid
= null_ptid
;
2579 SCOPE_EXIT
{ gdb::observers::target_post_wait
.notify (event_ptid
); };
2580 gdb::observers::target_pre_wait
.notify (ptid
);
2581 event_ptid
= target
->wait (ptid
, status
, options
);
2589 default_target_wait (struct target_ops
*ops
,
2590 ptid_t ptid
, struct target_waitstatus
*status
,
2591 target_wait_flags options
)
2593 status
->set_ignore ();
2594 return minus_one_ptid
;
2598 target_pid_to_str (ptid_t ptid
)
2600 return current_inferior ()->top_target ()->pid_to_str (ptid
);
2604 target_thread_name (struct thread_info
*info
)
2606 gdb_assert (info
->inf
== current_inferior ());
2608 return current_inferior ()->top_target ()->thread_name (info
);
2611 struct thread_info
*
2612 target_thread_handle_to_thread_info (const gdb_byte
*thread_handle
,
2614 struct inferior
*inf
)
2616 target_ops
*target
= current_inferior ()->top_target ();
2618 return target
->thread_handle_to_thread_info (thread_handle
, handle_len
, inf
);
2623 gdb::array_view
<const gdb_byte
>
2624 target_thread_info_to_thread_handle (struct thread_info
*tip
)
2626 target_ops
*target
= current_inferior ()->top_target ();
2628 return target
->thread_info_to_thread_handle (tip
);
2632 target_resume (ptid_t scope_ptid
, int step
, enum gdb_signal signal
)
2634 process_stratum_target
*curr_target
= current_inferior ()->process_target ();
2635 gdb_assert (!curr_target
->commit_resumed_state
);
2637 gdb_assert (inferior_ptid
!= null_ptid
);
2638 gdb_assert (inferior_ptid
.matches (scope_ptid
));
2640 target_dcache_invalidate (current_program_space
->aspace
);
2642 current_inferior ()->top_target ()->resume (scope_ptid
, step
, signal
);
2644 registers_changed_ptid (curr_target
, scope_ptid
);
2645 /* We only set the internal executing state here. The user/frontend
2646 running state is set at a higher level. This also clears the
2647 thread's stop_pc as side effect. */
2648 set_executing (curr_target
, scope_ptid
, true);
2649 clear_inline_frame_state (curr_target
, scope_ptid
);
2651 if (target_can_async_p ())
2652 target_async (true);
2658 target_commit_resumed ()
2660 gdb_assert (current_inferior ()->process_target ()->commit_resumed_state
);
2661 current_inferior ()->top_target ()->commit_resumed ();
2667 target_has_pending_events ()
2669 return current_inferior ()->top_target ()->has_pending_events ();
2673 target_pass_signals (gdb::array_view
<const unsigned char> pass_signals
)
2675 current_inferior ()->top_target ()->pass_signals (pass_signals
);
2679 target_program_signals (gdb::array_view
<const unsigned char> program_signals
)
2681 current_inferior ()->top_target ()->program_signals (program_signals
);
2685 default_follow_fork (struct target_ops
*self
, inferior
*child_inf
,
2686 ptid_t child_ptid
, target_waitkind fork_kind
,
2687 bool follow_child
, bool detach_fork
)
2689 /* Some target returned a fork event, but did not know how to follow it. */
2690 internal_error (_("could not find a target to follow fork"));
2694 default_follow_clone (struct target_ops
*self
, ptid_t child_ptid
)
2696 /* Some target returned a clone event, but did not know how to follow it. */
2697 internal_error (_("could not find a target to follow clone"));
2703 target_follow_fork (inferior
*child_inf
, ptid_t child_ptid
,
2704 target_waitkind fork_kind
, bool follow_child
,
2707 target_ops
*target
= current_inferior ()->top_target ();
2709 /* Check consistency between CHILD_INF, CHILD_PTID, FOLLOW_CHILD and
2711 if (child_inf
!= nullptr)
2713 gdb_assert (follow_child
|| !detach_fork
);
2714 gdb_assert (child_inf
->pid
== child_ptid
.pid ());
2717 gdb_assert (!follow_child
&& detach_fork
);
2719 return target
->follow_fork (child_inf
, child_ptid
, fork_kind
, follow_child
,
2726 target_follow_exec (inferior
*follow_inf
, ptid_t ptid
,
2727 const char *execd_pathname
)
2729 current_inferior ()->top_target ()->follow_exec (follow_inf
, ptid
,
2734 default_mourn_inferior (struct target_ops
*self
)
2736 internal_error (_("could not find a target to follow mourn inferior"));
2740 target_mourn_inferior (ptid_t ptid
)
2742 gdb_assert (ptid
.pid () == inferior_ptid
.pid ());
2743 current_inferior ()->top_target ()->mourn_inferior ();
2746 /* Look for a target which can describe architectural features, starting
2747 from TARGET. If we find one, return its description. */
2749 const struct target_desc
*
2750 target_read_description (struct target_ops
*target
)
2752 return target
->read_description ();
2756 /* Default implementation of memory-searching. */
2759 default_search_memory (struct target_ops
*self
,
2760 CORE_ADDR start_addr
, ULONGEST search_space_len
,
2761 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2762 CORE_ADDR
*found_addrp
)
2764 auto read_memory
= [=] (CORE_ADDR addr
, gdb_byte
*result
, size_t len
)
2766 return target_read (current_inferior ()->top_target (),
2767 TARGET_OBJECT_MEMORY
, NULL
,
2768 result
, addr
, len
) == len
;
2771 /* Start over from the top of the target stack. */
2772 return simple_search_memory (read_memory
, start_addr
, search_space_len
,
2773 pattern
, pattern_len
, found_addrp
);
2776 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2777 sequence of bytes in PATTERN with length PATTERN_LEN.
2779 The result is 1 if found, 0 if not found, and -1 if there was an error
2780 requiring halting of the search (e.g. memory read error).
2781 If the pattern is found the address is recorded in FOUND_ADDRP. */
2784 target_search_memory (CORE_ADDR start_addr
, ULONGEST search_space_len
,
2785 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2786 CORE_ADDR
*found_addrp
)
2788 target_ops
*target
= current_inferior ()->top_target ();
2790 return target
->search_memory (start_addr
, search_space_len
, pattern
,
2791 pattern_len
, found_addrp
);
2794 /* Look through the currently pushed targets. If none of them will
2795 be able to restart the currently running process, issue an error
2799 target_require_runnable (void)
2801 for (target_ops
*t
= current_inferior ()->top_target ();
2805 /* If this target knows how to create a new program, then
2806 assume we will still be able to after killing the current
2807 one. Either killing and mourning will not pop T, or else
2808 find_default_run_target will find it again. */
2809 if (t
->can_create_inferior ())
2812 /* Do not worry about targets at certain strata that can not
2813 create inferiors. Assume they will be pushed again if
2814 necessary, and continue to the process_stratum. */
2815 if (t
->stratum () > process_stratum
)
2818 error (_("The \"%s\" target does not support \"run\". "
2819 "Try \"help target\" or \"continue\"."),
2823 /* This function is only called if the target is running. In that
2824 case there should have been a process_stratum target and it
2825 should either know how to create inferiors, or not... */
2826 internal_error (_("No targets found"));
2829 /* Whether GDB is allowed to fall back to the default run target for
2830 "run", "attach", etc. when no target is connected yet. */
2831 static bool auto_connect_native_target
= true;
2834 show_auto_connect_native_target (struct ui_file
*file
, int from_tty
,
2835 struct cmd_list_element
*c
, const char *value
)
2838 _("Whether GDB may automatically connect to the "
2839 "native target is %s.\n"),
2843 /* A pointer to the target that can respond to "run" or "attach".
2844 Native targets are always singletons and instantiated early at GDB
2846 static target_ops
*the_native_target
;
2851 set_native_target (target_ops
*target
)
2853 if (the_native_target
!= NULL
)
2854 internal_error (_("native target already set (\"%s\")."),
2855 the_native_target
->longname ());
2857 the_native_target
= target
;
2863 get_native_target ()
2865 return the_native_target
;
2868 /* Look through the list of possible targets for a target that can
2869 execute a run or attach command without any other data. This is
2870 used to locate the default process stratum.
2872 If DO_MESG is not NULL, the result is always valid (error() is
2873 called for errors); else, return NULL on error. */
2875 static struct target_ops
*
2876 find_default_run_target (const char *do_mesg
)
2878 if (auto_connect_native_target
&& the_native_target
!= NULL
)
2879 return the_native_target
;
2881 if (do_mesg
!= NULL
)
2882 error (_("Don't know how to %s. Try \"help target\"."), do_mesg
);
2889 find_attach_target (void)
2891 /* If a target on the current stack can attach, use it. */
2892 for (target_ops
*t
= current_inferior ()->top_target ();
2896 if (t
->can_attach ())
2900 /* Otherwise, use the default run target for attaching. */
2901 return find_default_run_target ("attach");
2907 find_run_target (void)
2909 /* If a target on the current stack can run, use it. */
2910 for (target_ops
*t
= current_inferior ()->top_target ();
2914 if (t
->can_create_inferior ())
2918 /* Otherwise, use the default run target. */
2919 return find_default_run_target ("run");
2923 target_ops::info_proc (const char *args
, enum info_proc_what what
)
2928 /* Implement the "info proc" command. */
2931 target_info_proc (const char *args
, enum info_proc_what what
)
2933 struct target_ops
*t
;
2935 /* If we're already connected to something that can get us OS
2936 related data, use it. Otherwise, try using the native
2938 t
= find_target_at (process_stratum
);
2940 t
= find_default_run_target (NULL
);
2942 for (; t
!= NULL
; t
= t
->beneath ())
2944 if (t
->info_proc (args
, what
))
2946 target_debug_printf_nofunc ("target_info_proc (\"%s\", %d)", args
, what
);
2955 find_default_supports_disable_randomization (struct target_ops
*self
)
2957 struct target_ops
*t
;
2959 t
= find_default_run_target (NULL
);
2961 return t
->supports_disable_randomization ();
2966 target_supports_disable_randomization (void)
2968 return current_inferior ()->top_target ()->supports_disable_randomization ();
2971 /* See target/target.h. */
2974 target_supports_multi_process (void)
2976 return current_inferior ()->top_target ()->supports_multi_process ();
2981 std::optional
<gdb::char_vector
>
2982 target_get_osdata (const char *type
)
2984 struct target_ops
*t
;
2986 /* If we're already connected to something that can get us OS
2987 related data, use it. Otherwise, try using the native
2989 t
= find_target_at (process_stratum
);
2991 t
= find_default_run_target ("get OS data");
2996 return target_read_stralloc (t
, TARGET_OBJECT_OSDATA
, type
);
3002 target_ops::beneath () const
3004 return current_inferior ()->find_target_beneath (this);
3008 target_ops::close ()
3013 target_ops::can_attach ()
3019 target_ops::attach (const char *, int)
3021 gdb_assert_not_reached ("target_ops::attach called");
3025 target_ops::can_create_inferior ()
3031 target_ops::create_inferior (const char *, const std::string
&,
3034 gdb_assert_not_reached ("target_ops::create_inferior called");
3038 target_ops::can_run ()
3046 for (target_ops
*t
= current_inferior ()->top_target ();
3057 /* Target file operations. */
3059 static struct target_ops
*
3060 default_fileio_target (void)
3062 struct target_ops
*t
;
3064 /* If we're already connected to something that can perform
3065 file I/O, use it. Otherwise, try using the native target. */
3066 t
= find_target_at (process_stratum
);
3069 return find_default_run_target ("file I/O");
3072 /* File handle for target file operations. */
3076 /* The target on which this file is open. NULL if the target is
3077 meanwhile closed while the handle is open. */
3080 /* The file descriptor on the target. */
3083 /* Check whether this fileio_fh_t represents a closed file. */
3086 return target_fd
< 0;
3090 /* Vector of currently open file handles. The value returned by
3091 target_fileio_open and passed as the FD argument to other
3092 target_fileio_* functions is an index into this vector. This
3093 vector's entries are never freed; instead, files are marked as
3094 closed, and the handle becomes available for reuse. */
3095 static std::vector
<fileio_fh_t
> fileio_fhandles
;
3097 /* Index into fileio_fhandles of the lowest handle that might be
3098 closed. This permits handle reuse without searching the whole
3099 list each time a new file is opened. */
3100 static int lowest_closed_fd
;
3105 fileio_handles_invalidate_target (target_ops
*targ
)
3107 for (fileio_fh_t
&fh
: fileio_fhandles
)
3108 if (fh
.target
== targ
)
3112 /* Acquire a target fileio file descriptor. */
3115 acquire_fileio_fd (target_ops
*target
, int target_fd
)
3117 /* Search for closed handles to reuse. */
3118 for (; lowest_closed_fd
< fileio_fhandles
.size (); lowest_closed_fd
++)
3120 fileio_fh_t
&fh
= fileio_fhandles
[lowest_closed_fd
];
3122 if (fh
.is_closed ())
3126 /* Push a new handle if no closed handles were found. */
3127 if (lowest_closed_fd
== fileio_fhandles
.size ())
3128 fileio_fhandles
.push_back (fileio_fh_t
{target
, target_fd
});
3130 fileio_fhandles
[lowest_closed_fd
] = {target
, target_fd
};
3132 /* Should no longer be marked closed. */
3133 gdb_assert (!fileio_fhandles
[lowest_closed_fd
].is_closed ());
3135 /* Return its index, and start the next lookup at
3137 return lowest_closed_fd
++;
3140 /* Release a target fileio file descriptor. */
3143 release_fileio_fd (int fd
, fileio_fh_t
*fh
)
3146 lowest_closed_fd
= std::min (lowest_closed_fd
, fd
);
3149 /* Return a pointer to the fileio_fhandle_t corresponding to FD. */
3151 static fileio_fh_t
*
3152 fileio_fd_to_fh (int fd
)
3154 return &fileio_fhandles
[fd
];
3158 /* Default implementations of file i/o methods. We don't want these
3159 to delegate automatically, because we need to know which target
3160 supported the method, in order to call it directly from within
3161 pread/pwrite, etc. */
3164 target_ops::fileio_open (struct inferior
*inf
, const char *filename
,
3165 int flags
, int mode
, int warn_if_slow
,
3166 fileio_error
*target_errno
)
3168 *target_errno
= FILEIO_ENOSYS
;
3173 target_ops::fileio_pwrite (int fd
, const gdb_byte
*write_buf
, int len
,
3174 ULONGEST offset
, fileio_error
*target_errno
)
3176 *target_errno
= FILEIO_ENOSYS
;
3181 target_ops::fileio_pread (int fd
, gdb_byte
*read_buf
, int len
,
3182 ULONGEST offset
, fileio_error
*target_errno
)
3184 *target_errno
= FILEIO_ENOSYS
;
3189 target_ops::fileio_fstat (int fd
, struct stat
*sb
, fileio_error
*target_errno
)
3191 *target_errno
= FILEIO_ENOSYS
;
3196 target_ops::fileio_stat (struct inferior
*inf
, const char *filename
,
3197 struct stat
*sb
, fileio_error
*target_errno
)
3199 *target_errno
= FILEIO_ENOSYS
;
3204 target_ops::fileio_close (int fd
, fileio_error
*target_errno
)
3206 *target_errno
= FILEIO_ENOSYS
;
3211 target_ops::fileio_unlink (struct inferior
*inf
, const char *filename
,
3212 fileio_error
*target_errno
)
3214 *target_errno
= FILEIO_ENOSYS
;
3218 std::optional
<std::string
>
3219 target_ops::fileio_readlink (struct inferior
*inf
, const char *filename
,
3220 fileio_error
*target_errno
)
3222 *target_errno
= FILEIO_ENOSYS
;
3229 target_fileio_open (struct inferior
*inf
, const char *filename
,
3230 int flags
, int mode
, bool warn_if_slow
, fileio_error
*target_errno
)
3232 for (target_ops
*t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath ())
3234 int fd
= t
->fileio_open (inf
, filename
, flags
, mode
,
3235 warn_if_slow
, target_errno
);
3237 if (fd
== -1 && *target_errno
== FILEIO_ENOSYS
)
3243 fd
= acquire_fileio_fd (t
, fd
);
3245 target_debug_printf_nofunc ("target_fileio_open (%d,%s,0x%x,0%o,%d) = %d (%d)",
3246 inf
== NULL
? 0 : inf
->num
, filename
, flags
, mode
,
3247 warn_if_slow
, fd
, fd
!= -1 ? 0 : *target_errno
);
3251 *target_errno
= FILEIO_ENOSYS
;
3258 target_fileio_pwrite (int fd
, const gdb_byte
*write_buf
, int len
,
3259 ULONGEST offset
, fileio_error
*target_errno
)
3261 fileio_fh_t
*fh
= fileio_fd_to_fh (fd
);
3264 if (fh
->is_closed ())
3265 *target_errno
= FILEIO_EBADF
;
3266 else if (fh
->target
== NULL
)
3267 *target_errno
= FILEIO_EIO
;
3269 ret
= fh
->target
->fileio_pwrite (fh
->target_fd
, write_buf
,
3270 len
, offset
, target_errno
);
3272 target_debug_printf_nofunc ("target_fileio_pwrite (%d,...,%d,%s) = %d (%d)", fd
,
3273 len
, pulongest (offset
), ret
,
3274 ret
!= -1 ? 0 : *target_errno
);
3281 target_fileio_pread (int fd
, gdb_byte
*read_buf
, int len
,
3282 ULONGEST offset
, fileio_error
*target_errno
)
3284 fileio_fh_t
*fh
= fileio_fd_to_fh (fd
);
3287 if (fh
->is_closed ())
3288 *target_errno
= FILEIO_EBADF
;
3289 else if (fh
->target
== NULL
)
3290 *target_errno
= FILEIO_EIO
;
3292 ret
= fh
->target
->fileio_pread (fh
->target_fd
, read_buf
,
3293 len
, offset
, target_errno
);
3295 target_debug_printf_nofunc ("target_fileio_pread (%d,...,%d,%s) = %d (%d)", fd
, len
,
3296 pulongest (offset
), ret
, ret
!= -1 ? 0 : *target_errno
);
3303 target_fileio_fstat (int fd
, struct stat
*sb
, fileio_error
*target_errno
)
3305 fileio_fh_t
*fh
= fileio_fd_to_fh (fd
);
3308 if (fh
->is_closed ())
3309 *target_errno
= FILEIO_EBADF
;
3310 else if (fh
->target
== NULL
)
3311 *target_errno
= FILEIO_EIO
;
3313 ret
= fh
->target
->fileio_fstat (fh
->target_fd
, sb
, target_errno
);
3315 target_debug_printf_nofunc ("target_fileio_fstat (%d) = %d (%d)", fd
, ret
,
3316 ret
!= -1 ? 0 : *target_errno
);
3323 target_fileio_stat (struct inferior
*inf
, const char *filename
,
3324 struct stat
*sb
, fileio_error
*target_errno
)
3326 for (target_ops
*t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath ())
3328 int ret
= t
->fileio_stat (inf
, filename
, sb
, target_errno
);
3330 if (ret
== -1 && *target_errno
== FILEIO_ENOSYS
)
3333 target_debug_printf_nofunc ("target_fileio_stat (%s) = %d (%d)",
3335 ret
!= -1 ? 0 : *target_errno
);
3339 *target_errno
= FILEIO_ENOSYS
;
3346 target_fileio_close (int fd
, fileio_error
*target_errno
)
3348 fileio_fh_t
*fh
= fileio_fd_to_fh (fd
);
3351 if (fh
->is_closed ())
3352 *target_errno
= FILEIO_EBADF
;
3355 if (fh
->target
!= NULL
)
3356 ret
= fh
->target
->fileio_close (fh
->target_fd
,
3360 release_fileio_fd (fd
, fh
);
3363 target_debug_printf_nofunc ("target_fileio_close (%d) = %d (%d)", fd
, ret
,
3364 ret
!= -1 ? 0 : *target_errno
);
3371 target_fileio_unlink (struct inferior
*inf
, const char *filename
,
3372 fileio_error
*target_errno
)
3374 for (target_ops
*t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath ())
3376 int ret
= t
->fileio_unlink (inf
, filename
, target_errno
);
3378 if (ret
== -1 && *target_errno
== FILEIO_ENOSYS
)
3381 target_debug_printf_nofunc ("target_fileio_unlink (%d,%s) = %d (%d)",
3382 inf
== NULL
? 0 : inf
->num
, filename
, ret
,
3383 ret
!= -1 ? 0 : *target_errno
);
3387 *target_errno
= FILEIO_ENOSYS
;
3393 std::optional
<std::string
>
3394 target_fileio_readlink (struct inferior
*inf
, const char *filename
,
3395 fileio_error
*target_errno
)
3397 for (target_ops
*t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath ())
3399 std::optional
<std::string
> ret
3400 = t
->fileio_readlink (inf
, filename
, target_errno
);
3402 if (!ret
.has_value () && *target_errno
== FILEIO_ENOSYS
)
3405 target_debug_printf_nofunc ("target_fileio_readlink (%d,%s) = %s (%d)",
3406 inf
== NULL
? 0 : inf
->num
, filename
,
3407 ret
? ret
->c_str () : "(nil)",
3408 ret
? 0 : *target_errno
);
3412 *target_errno
= FILEIO_ENOSYS
;
3416 /* Like scoped_fd, but specific to target fileio. */
3418 class scoped_target_fd
3421 explicit scoped_target_fd (int fd
) noexcept
3426 ~scoped_target_fd ()
3430 fileio_error target_errno
;
3432 target_fileio_close (m_fd
, &target_errno
);
3436 DISABLE_COPY_AND_ASSIGN (scoped_target_fd
);
3438 int get () const noexcept
3447 /* Read target file FILENAME, in the filesystem as seen by INF. If
3448 INF is NULL, use the filesystem seen by the debugger (GDB or, for
3449 remote targets, the remote stub). Store the result in *BUF_P and
3450 return the size of the transferred data. PADDING additional bytes
3451 are available in *BUF_P. This is a helper function for
3452 target_fileio_read_alloc; see the declaration of that function for
3453 more information. */
3456 target_fileio_read_alloc_1 (struct inferior
*inf
, const char *filename
,
3457 gdb_byte
**buf_p
, int padding
)
3459 size_t buf_alloc
, buf_pos
;
3462 fileio_error target_errno
;
3464 scoped_target_fd
fd (target_fileio_open (inf
, filename
, FILEIO_O_RDONLY
,
3465 0700, false, &target_errno
));
3466 if (fd
.get () == -1)
3469 /* Start by reading up to 4K at a time. The target will throttle
3470 this number down if necessary. */
3472 buf
= (gdb_byte
*) xmalloc (buf_alloc
);
3476 n
= target_fileio_pread (fd
.get (), &buf
[buf_pos
],
3477 buf_alloc
- buf_pos
- padding
, buf_pos
,
3481 /* An error occurred. */
3487 /* Read all there was. */
3497 /* If the buffer is filling up, expand it. */
3498 if (buf_alloc
< buf_pos
* 2)
3501 buf
= (gdb_byte
*) xrealloc (buf
, buf_alloc
);
3511 target_fileio_read_alloc (struct inferior
*inf
, const char *filename
,
3514 return target_fileio_read_alloc_1 (inf
, filename
, buf_p
, 0);
3519 gdb::unique_xmalloc_ptr
<char>
3520 target_fileio_read_stralloc (struct inferior
*inf
, const char *filename
)
3524 LONGEST i
, transferred
;
3526 transferred
= target_fileio_read_alloc_1 (inf
, filename
, &buffer
, 1);
3527 bufstr
= (char *) buffer
;
3529 if (transferred
< 0)
3530 return gdb::unique_xmalloc_ptr
<char> (nullptr);
3532 if (transferred
== 0)
3533 return make_unique_xstrdup ("");
3535 bufstr
[transferred
] = 0;
3537 /* Check for embedded NUL bytes; but allow trailing NULs. */
3538 for (i
= strlen (bufstr
); i
< transferred
; i
++)
3541 warning (_("target file %s "
3542 "contained unexpected null characters"),
3547 return gdb::unique_xmalloc_ptr
<char> (bufstr
);
3552 default_region_ok_for_hw_watchpoint (struct target_ops
*self
,
3553 CORE_ADDR addr
, int len
)
3555 gdbarch
*arch
= current_inferior ()->arch ();
3556 return (len
<= gdbarch_ptr_bit (arch
) / TARGET_CHAR_BIT
);
3560 default_watchpoint_addr_within_range (struct target_ops
*target
,
3562 CORE_ADDR start
, int length
)
3564 return addr
>= start
&& addr
< start
+ length
;
3570 target_stack::find_beneath (const target_ops
*t
) const
3572 /* Look for a non-empty slot at stratum levels beneath T's. */
3573 for (int stratum
= t
->stratum () - 1; stratum
>= 0; --stratum
)
3574 if (m_stack
[stratum
].get () != NULL
)
3575 return m_stack
[stratum
].get ();
3583 find_target_at (enum strata stratum
)
3585 return current_inferior ()->target_at (stratum
);
3593 target_announce_detach (int from_tty
)
3596 const char *exec_file
;
3601 pid
= inferior_ptid
.pid ();
3602 exec_file
= current_program_space
->exec_filename ();
3603 if (exec_file
== nullptr)
3604 gdb_printf ("Detaching from pid %s\n",
3605 target_pid_to_str (ptid_t (pid
)).c_str ());
3607 gdb_printf (_("Detaching from program: %ps, %s\n"),
3608 styled_string (file_name_style
.style (), exec_file
),
3609 target_pid_to_str (ptid_t (pid
)).c_str ());
3615 target_announce_attach (int from_tty
, int pid
)
3620 const char *exec_file
= current_program_space
->exec_filename ();
3622 if (exec_file
!= nullptr)
3623 gdb_printf ("Attaching to program: %ps, %s\n",
3624 styled_string (file_name_style
.style (), exec_file
),
3625 target_pid_to_str (ptid_t (pid
)).c_str ());
3627 gdb_printf ("Attaching to %s\n",
3628 target_pid_to_str (ptid_t (pid
)).c_str ());
3631 /* The inferior process has died. Long live the inferior! */
3634 generic_mourn_inferior (void)
3636 inferior
*inf
= current_inferior ();
3638 switch_to_no_thread ();
3640 /* Mark breakpoints uninserted in case something tries to delete a
3641 breakpoint while we delete the inferior's threads (which would
3642 fail, since the inferior is long gone). */
3643 mark_breakpoints_out (inf
->pspace
);
3646 exit_inferior (inf
);
3648 /* Note this wipes step-resume breakpoints, so needs to be done
3649 after exit_inferior, which ends up referencing the step-resume
3650 breakpoints through clear_thread_inferior_resources. */
3651 breakpoint_init_inferior (inf
, inf_exited
);
3653 registers_changed ();
3655 reopen_exec_file ();
3656 reinit_frame_cache ();
3658 if (deprecated_detach_hook
)
3659 deprecated_detach_hook ();
3662 /* Convert a normal process ID to a string. Returns the string in a
3666 normal_pid_to_str (ptid_t ptid
)
3668 return string_printf ("process %d", ptid
.pid ());
3672 default_pid_to_str (struct target_ops
*ops
, ptid_t ptid
)
3674 return normal_pid_to_str (ptid
);
3677 /* Error-catcher for target_find_memory_regions. */
3679 dummy_find_memory_regions (struct target_ops
*self
,
3680 find_memory_region_ftype ignore1
, void *ignore2
)
3682 error (_("Command not implemented for this target."));
3686 /* Error-catcher for target_make_corefile_notes. */
3687 static gdb::unique_xmalloc_ptr
<char>
3688 dummy_make_corefile_notes (struct target_ops
*self
,
3689 bfd
*ignore1
, int *ignore2
)
3691 error (_("Command not implemented for this target."));
3695 #include "target-delegates-gen.c"
3697 /* The initial current target, so that there is always a semi-valid
3700 static dummy_target the_dummy_target
;
3707 return &the_dummy_target
;
3710 static const target_info dummy_target_info
= {
3717 dummy_target::stratum () const
3719 return dummy_stratum
;
3723 debug_target::stratum () const
3725 return debug_stratum
;
3729 dummy_target::info () const
3731 return dummy_target_info
;
3735 debug_target::info () const
3737 return beneath ()->info ();
3743 target_thread_alive (ptid_t ptid
)
3745 return current_inferior ()->top_target ()->thread_alive (ptid
);
3749 target_update_thread_list (void)
3751 current_inferior ()->top_target ()->update_thread_list ();
3755 target_stop (ptid_t ptid
)
3757 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
3759 gdb_assert (!proc_target
->commit_resumed_state
);
3763 warning (_("May not interrupt or stop the target, ignoring attempt"));
3767 current_inferior ()->top_target ()->stop (ptid
);
3775 warning (_("May not interrupt or stop the target, ignoring attempt"));
3779 current_inferior ()->top_target ()->interrupt ();
3785 target_pass_ctrlc (void)
3787 /* Pass the Ctrl-C to the first target that has a thread
3789 for (inferior
*inf
: all_inferiors ())
3791 target_ops
*proc_target
= inf
->process_target ();
3792 if (proc_target
== NULL
)
3795 for (thread_info
*thr
: inf
->non_exited_threads ())
3797 /* A thread can be THREAD_STOPPED and executing, while
3798 running an infcall. */
3799 if (thr
->state
== THREAD_RUNNING
|| thr
->executing ())
3801 /* We can get here quite deep in target layers. Avoid
3802 switching thread context or anything that would
3803 communicate with the target (e.g., to fetch
3804 registers), or flushing e.g., the frame cache. We
3805 just switch inferior in order to be able to call
3806 through the target_stack. */
3807 scoped_restore_current_inferior restore_inferior
;
3808 set_current_inferior (inf
);
3809 current_inferior ()->top_target ()->pass_ctrlc ();
3819 default_target_pass_ctrlc (struct target_ops
*ops
)
3821 target_interrupt ();
3824 /* See target/target.h. */
3827 target_stop_and_wait (ptid_t ptid
)
3829 struct target_waitstatus status
;
3830 bool was_non_stop
= non_stop
;
3835 target_wait (ptid
, &status
, 0);
3837 non_stop
= was_non_stop
;
3840 /* See target/target.h. */
3843 target_continue_no_signal (ptid_t ptid
)
3845 target_resume (ptid
, 0, GDB_SIGNAL_0
);
3848 /* See target/target.h. */
3851 target_continue (ptid_t ptid
, enum gdb_signal signal
)
3853 target_resume (ptid
, 0, signal
);
3856 /* Concatenate ELEM to LIST, a comma-separated list. */
3859 str_comma_list_concat_elem (std::string
*list
, const char *elem
)
3861 if (!list
->empty ())
3862 list
->append (", ");
3864 list
->append (elem
);
3867 /* Helper for target_options_to_string. If OPT is present in
3868 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3869 OPT is removed from TARGET_OPTIONS. */
3872 do_option (target_wait_flags
*target_options
, std::string
*ret
,
3873 target_wait_flag opt
, const char *opt_str
)
3875 if ((*target_options
& opt
) != 0)
3877 str_comma_list_concat_elem (ret
, opt_str
);
3878 *target_options
&= ~opt
;
3885 target_options_to_string (target_wait_flags target_options
)
3889 #define DO_TARG_OPTION(OPT) \
3890 do_option (&target_options, &ret, OPT, #OPT)
3892 DO_TARG_OPTION (TARGET_WNOHANG
);
3894 if (target_options
!= 0)
3895 str_comma_list_concat_elem (&ret
, "unknown???");
3901 target_fetch_registers (struct regcache
*regcache
, int regno
)
3903 current_inferior ()->top_target ()->fetch_registers (regcache
, regno
);
3904 target_debug_printf ("%s", regcache
->register_debug_string (regno
).c_str ());
3908 target_store_registers (struct regcache
*regcache
, int regno
)
3910 if (!may_write_registers
)
3911 error (_("Writing to registers is not allowed (regno %d)"), regno
);
3913 current_inferior ()->top_target ()->store_registers (regcache
, regno
);
3914 target_debug_printf ("%s", regcache
->register_debug_string (regno
).c_str ());
3918 target_core_of_thread (ptid_t ptid
)
3920 return current_inferior ()->top_target ()->core_of_thread (ptid
);
3924 simple_verify_memory (struct target_ops
*ops
,
3925 const gdb_byte
*data
, CORE_ADDR lma
, ULONGEST size
)
3927 LONGEST total_xfered
= 0;
3929 while (total_xfered
< size
)
3931 ULONGEST xfered_len
;
3932 enum target_xfer_status status
;
3934 ULONGEST howmuch
= std::min
<ULONGEST
> (sizeof (buf
), size
- total_xfered
);
3936 status
= target_xfer_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
3937 buf
, NULL
, lma
+ total_xfered
, howmuch
,
3939 if (status
== TARGET_XFER_OK
3940 && memcmp (data
+ total_xfered
, buf
, xfered_len
) == 0)
3942 total_xfered
+= xfered_len
;
3951 /* Default implementation of memory verification. */
3954 default_verify_memory (struct target_ops
*self
,
3955 const gdb_byte
*data
, CORE_ADDR memaddr
, ULONGEST size
)
3957 /* Start over from the top of the target stack. */
3958 return simple_verify_memory (current_inferior ()->top_target (),
3959 data
, memaddr
, size
);
3963 target_verify_memory (const gdb_byte
*data
, CORE_ADDR memaddr
, ULONGEST size
)
3965 target_ops
*target
= current_inferior ()->top_target ();
3967 return target
->verify_memory (data
, memaddr
, size
);
3970 /* The documentation for this function is in its prototype declaration in
3974 target_insert_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
,
3975 enum target_hw_bp_type rw
)
3977 target_ops
*target
= current_inferior ()->top_target ();
3979 return target
->insert_mask_watchpoint (addr
, mask
, rw
);
3982 /* The documentation for this function is in its prototype declaration in
3986 target_remove_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
,
3987 enum target_hw_bp_type rw
)
3989 target_ops
*target
= current_inferior ()->top_target ();
3991 return target
->remove_mask_watchpoint (addr
, mask
, rw
);
3994 /* The documentation for this function is in its prototype declaration
3998 target_masked_watch_num_registers (CORE_ADDR addr
, CORE_ADDR mask
)
4000 target_ops
*target
= current_inferior ()->top_target ();
4002 return target
->masked_watch_num_registers (addr
, mask
);
4005 /* The documentation for this function is in its prototype declaration
4009 target_ranged_break_num_registers (void)
4011 return current_inferior ()->top_target ()->ranged_break_num_registers ();
4016 struct btrace_target_info
*
4017 target_enable_btrace (thread_info
*tp
, const struct btrace_config
*conf
)
4019 return current_inferior ()->top_target ()->enable_btrace (tp
, conf
);
4025 target_disable_btrace (struct btrace_target_info
*btinfo
)
4027 current_inferior ()->top_target ()->disable_btrace (btinfo
);
4033 target_teardown_btrace (struct btrace_target_info
*btinfo
)
4035 current_inferior ()->top_target ()->teardown_btrace (btinfo
);
4041 target_read_btrace (struct btrace_data
*btrace
,
4042 struct btrace_target_info
*btinfo
,
4043 enum btrace_read_type type
)
4045 target_ops
*target
= current_inferior ()->top_target ();
4047 return target
->read_btrace (btrace
, btinfo
, type
);
4052 const struct btrace_config
*
4053 target_btrace_conf (const struct btrace_target_info
*btinfo
)
4055 return current_inferior ()->top_target ()->btrace_conf (btinfo
);
4061 target_stop_recording (void)
4063 current_inferior ()->top_target ()->stop_recording ();
4069 target_save_record (const char *filename
)
4071 current_inferior ()->top_target ()->save_record (filename
);
4077 target_supports_delete_record ()
4079 return current_inferior ()->top_target ()->supports_delete_record ();
4085 target_delete_record (void)
4087 current_inferior ()->top_target ()->delete_record ();
4093 target_record_method (ptid_t ptid
)
4095 return current_inferior ()->top_target ()->record_method (ptid
);
4101 target_record_is_replaying (ptid_t ptid
)
4103 return current_inferior ()->top_target ()->record_is_replaying (ptid
);
4109 target_record_will_replay (ptid_t ptid
, int dir
)
4111 return current_inferior ()->top_target ()->record_will_replay (ptid
, dir
);
4117 target_record_stop_replaying (void)
4119 current_inferior ()->top_target ()->record_stop_replaying ();
4125 target_goto_record_begin (void)
4127 current_inferior ()->top_target ()->goto_record_begin ();
4133 target_goto_record_end (void)
4135 current_inferior ()->top_target ()->goto_record_end ();
4141 target_goto_record (ULONGEST insn
)
4143 current_inferior ()->top_target ()->goto_record (insn
);
4149 target_insn_history (int size
, gdb_disassembly_flags flags
)
4151 current_inferior ()->top_target ()->insn_history (size
, flags
);
4157 target_insn_history_from (ULONGEST from
, int size
,
4158 gdb_disassembly_flags flags
)
4160 current_inferior ()->top_target ()->insn_history_from (from
, size
, flags
);
4166 target_insn_history_range (ULONGEST begin
, ULONGEST end
,
4167 gdb_disassembly_flags flags
)
4169 current_inferior ()->top_target ()->insn_history_range (begin
, end
, flags
);
4175 target_call_history (int size
, record_print_flags flags
)
4177 current_inferior ()->top_target ()->call_history (size
, flags
);
4183 target_call_history_from (ULONGEST begin
, int size
, record_print_flags flags
)
4185 current_inferior ()->top_target ()->call_history_from (begin
, size
, flags
);
4191 target_call_history_range (ULONGEST begin
, ULONGEST end
, record_print_flags flags
)
4193 current_inferior ()->top_target ()->call_history_range (begin
, end
, flags
);
4198 const struct frame_unwind
*
4199 target_get_unwinder (void)
4201 return current_inferior ()->top_target ()->get_unwinder ();
4206 const struct frame_unwind
*
4207 target_get_tailcall_unwinder (void)
4209 return current_inferior ()->top_target ()->get_tailcall_unwinder ();
4215 target_prepare_to_generate_core (void)
4217 current_inferior ()->top_target ()->prepare_to_generate_core ();
4223 target_done_generating_core (void)
4225 current_inferior ()->top_target ()->done_generating_core ();
4230 static char targ_desc
[] =
4231 "Names of targets and files being debugged.\nShows the entire \
4232 stack of targets currently in use (including the exec-file,\n\
4233 core-file, and process, if any), as well as the symbol file name.";
4236 default_rcmd (struct target_ops
*self
, const char *command
,
4237 struct ui_file
*output
)
4239 error (_("\"monitor\" command not supported by this target."));
4243 do_monitor_command (const char *cmd
, int from_tty
)
4245 target_rcmd (cmd
, gdb_stdtarg
);
4248 /* Erases all the memory regions marked as flash. CMD and FROM_TTY are
4252 flash_erase_command (const char *cmd
, int from_tty
)
4254 /* Used to communicate termination of flash operations to the target. */
4255 bool found_flash_region
= false;
4256 gdbarch
*gdbarch
= current_inferior ()->arch ();
4258 std::vector
<mem_region
> mem_regions
= target_memory_map ();
4260 /* Iterate over all memory regions. */
4261 for (const mem_region
&m
: mem_regions
)
4263 /* Is this a flash memory region? */
4264 if (m
.attrib
.mode
== MEM_FLASH
)
4266 found_flash_region
= true;
4267 target_flash_erase (m
.lo
, m
.hi
- m
.lo
);
4269 ui_out_emit_tuple
tuple_emitter (current_uiout
, "erased-regions");
4271 current_uiout
->message (_("Erasing flash memory region at address "));
4272 current_uiout
->field_core_addr ("address", gdbarch
, m
.lo
);
4273 current_uiout
->message (", size = ");
4274 current_uiout
->field_string ("size", hex_string (m
.hi
- m
.lo
));
4275 current_uiout
->message ("\n");
4279 /* Did we do any flash operations? If so, we need to finalize them. */
4280 if (found_flash_region
)
4281 target_flash_done ();
4283 current_uiout
->message (_("No flash memory regions found.\n"));
4286 /* Print the name of each layers of our target stack. */
4289 maintenance_print_target_stack (const char *cmd
, int from_tty
)
4291 gdb_printf (_("The current target stack is:\n"));
4293 for (target_ops
*t
= current_inferior ()->top_target ();
4297 if (t
->stratum () == debug_stratum
)
4299 gdb_printf (" - %s (%s)\n", t
->shortname (), t
->longname ());
4306 target_async (bool enable
)
4308 /* If we are trying to enable async mode then it must be the case that
4309 async mode is possible for this target. */
4310 gdb_assert (!enable
|| target_can_async_p ());
4311 infrun_async (enable
);
4312 current_inferior ()->top_target ()->async (enable
);
4318 target_thread_events (bool enable
)
4320 current_inferior ()->top_target ()->thread_events (enable
);
4326 target_supports_set_thread_options (gdb_thread_options options
)
4328 inferior
*inf
= current_inferior ();
4329 return inf
->top_target ()->supports_set_thread_options (options
);
4332 /* Controls if targets can report that they can/are async. This is
4333 just for maintainers to use when debugging gdb. */
4334 bool target_async_permitted
= true;
4337 set_maint_target_async (bool permitted
)
4339 if (have_live_inferiors ())
4340 error (_("Cannot change this setting while the inferior is running."));
4342 target_async_permitted
= permitted
;
4346 get_maint_target_async ()
4348 return target_async_permitted
;
4352 show_maint_target_async (ui_file
*file
, int from_tty
,
4353 cmd_list_element
*c
, const char *value
)
4356 _("Controlling the inferior in "
4357 "asynchronous mode is %s.\n"), value
);
4360 /* Return true if the target operates in non-stop mode even with "set
4364 target_always_non_stop_p (void)
4366 return current_inferior ()->top_target ()->always_non_stop_p ();
4372 target_is_non_stop_p ()
4375 || target_non_stop_enabled
== AUTO_BOOLEAN_TRUE
4376 || (target_non_stop_enabled
== AUTO_BOOLEAN_AUTO
4377 && target_always_non_stop_p ()))
4378 && target_can_async_p ());
4384 exists_non_stop_target ()
4386 if (target_is_non_stop_p ())
4389 scoped_restore_current_thread restore_thread
;
4391 for (inferior
*inf
: all_inferiors ())
4393 switch_to_inferior_no_thread (inf
);
4394 if (target_is_non_stop_p ())
4401 /* Controls if targets can report that they always run in non-stop
4402 mode. This is just for maintainers to use when debugging gdb. */
4403 enum auto_boolean target_non_stop_enabled
= AUTO_BOOLEAN_AUTO
;
4405 /* Set callback for maint target-non-stop setting. */
4408 set_maint_target_non_stop (auto_boolean enabled
)
4410 if (have_live_inferiors ())
4411 error (_("Cannot change this setting while the inferior is running."));
4413 target_non_stop_enabled
= enabled
;
4416 /* Get callback for maint target-non-stop setting. */
4419 get_maint_target_non_stop ()
4421 return target_non_stop_enabled
;
4425 show_maint_target_non_stop (ui_file
*file
, int from_tty
,
4426 cmd_list_element
*c
, const char *value
)
4428 if (target_non_stop_enabled
== AUTO_BOOLEAN_AUTO
)
4430 _("Whether the target is always in non-stop mode "
4431 "is %s (currently %s).\n"), value
,
4432 target_always_non_stop_p () ? "on" : "off");
4435 _("Whether the target is always in non-stop mode "
4436 "is %s.\n"), value
);
4439 /* Temporary copies of permission settings. */
4441 static bool may_write_registers_1
= true;
4442 static bool may_write_memory_1
= true;
4443 static bool may_insert_breakpoints_1
= true;
4444 static bool may_insert_tracepoints_1
= true;
4445 static bool may_insert_fast_tracepoints_1
= true;
4446 static bool may_stop_1
= true;
4448 /* Make the user-set values match the real values again. */
4451 update_target_permissions (void)
4453 may_write_registers_1
= may_write_registers
;
4454 may_write_memory_1
= may_write_memory
;
4455 may_insert_breakpoints_1
= may_insert_breakpoints
;
4456 may_insert_tracepoints_1
= may_insert_tracepoints
;
4457 may_insert_fast_tracepoints_1
= may_insert_fast_tracepoints
;
4458 may_stop_1
= may_stop
;
4461 /* The one function handles (most of) the permission flags in the same
4465 set_target_permissions (const char *args
, int from_tty
,
4466 struct cmd_list_element
*c
)
4468 if (target_has_execution ())
4470 update_target_permissions ();
4471 error (_("Cannot change this setting while the inferior is running."));
4474 /* Make the real values match the user-changed values. */
4475 may_insert_breakpoints
= may_insert_breakpoints_1
;
4476 may_insert_tracepoints
= may_insert_tracepoints_1
;
4477 may_insert_fast_tracepoints
= may_insert_fast_tracepoints_1
;
4478 may_stop
= may_stop_1
;
4479 update_observer_mode ();
4482 /* Set some permissions independently of observer mode. */
4485 set_write_memory_registers_permission (const char *args
, int from_tty
,
4486 struct cmd_list_element
*c
)
4488 /* Make the real values match the user-changed values. */
4489 may_write_memory
= may_write_memory_1
;
4490 may_write_registers
= may_write_registers_1
;
4491 update_observer_mode ();
4494 void _initialize_target ();
4497 _initialize_target ()
4499 the_debug_target
= new debug_target ();
4501 add_info ("target", info_target_command
, targ_desc
);
4502 add_info ("files", info_target_command
, targ_desc
);
4504 add_setshow_zuinteger_cmd ("target", class_maintenance
, &targetdebug
, _("\
4505 Set target debugging."), _("\
4506 Show target debugging."), _("\
4507 When non-zero, target debugging is enabled. Higher numbers are more\n\
4511 &setdebuglist
, &showdebuglist
);
4513 add_setshow_boolean_cmd ("trust-readonly-sections", class_support
,
4514 &trust_readonly
, _("\
4515 Set mode for reading from readonly sections."), _("\
4516 Show mode for reading from readonly sections."), _("\
4517 When this mode is on, memory reads from readonly sections (such as .text)\n\
4518 will be read from the object file instead of from the target. This will\n\
4519 result in significant performance improvement for remote targets."),
4521 show_trust_readonly
,
4522 &setlist
, &showlist
);
4524 add_com ("monitor", class_obscure
, do_monitor_command
,
4525 _("Send a command to the remote monitor (remote targets only)."));
4527 add_cmd ("target-stack", class_maintenance
, maintenance_print_target_stack
,
4528 _("Print the name of each layer of the internal target stack."),
4529 &maintenanceprintlist
);
4531 add_setshow_boolean_cmd ("target-async", no_class
,
4533 Set whether gdb controls the inferior in asynchronous mode."), _("\
4534 Show whether gdb controls the inferior in asynchronous mode."), _("\
4535 Tells gdb whether to control the inferior in asynchronous mode."),
4536 set_maint_target_async
,
4537 get_maint_target_async
,
4538 show_maint_target_async
,
4539 &maintenance_set_cmdlist
,
4540 &maintenance_show_cmdlist
);
4542 add_setshow_auto_boolean_cmd ("target-non-stop", no_class
,
4544 Set whether gdb always controls the inferior in non-stop mode."), _("\
4545 Show whether gdb always controls the inferior in non-stop mode."), _("\
4546 Tells gdb whether to control the inferior in non-stop mode."),
4547 set_maint_target_non_stop
,
4548 get_maint_target_non_stop
,
4549 show_maint_target_non_stop
,
4550 &maintenance_set_cmdlist
,
4551 &maintenance_show_cmdlist
);
4553 add_setshow_boolean_cmd ("may-write-registers", class_support
,
4554 &may_write_registers_1
, _("\
4555 Set permission to write into registers."), _("\
4556 Show permission to write into registers."), _("\
4557 When this permission is on, GDB may write into the target's registers.\n\
4558 Otherwise, any sort of write attempt will result in an error."),
4559 set_write_memory_registers_permission
, NULL
,
4560 &setlist
, &showlist
);
4562 add_setshow_boolean_cmd ("may-write-memory", class_support
,
4563 &may_write_memory_1
, _("\
4564 Set permission to write into target memory."), _("\
4565 Show permission to write into target memory."), _("\
4566 When this permission is on, GDB may write into the target's memory.\n\
4567 Otherwise, any sort of write attempt will result in an error."),
4568 set_write_memory_registers_permission
, NULL
,
4569 &setlist
, &showlist
);
4571 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support
,
4572 &may_insert_breakpoints_1
, _("\
4573 Set permission to insert breakpoints in the target."), _("\
4574 Show permission to insert breakpoints in the target."), _("\
4575 When this permission is on, GDB may insert breakpoints in the program.\n\
4576 Otherwise, any sort of insertion attempt will result in an error."),
4577 set_target_permissions
, NULL
,
4578 &setlist
, &showlist
);
4580 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support
,
4581 &may_insert_tracepoints_1
, _("\
4582 Set permission to insert tracepoints in the target."), _("\
4583 Show permission to insert tracepoints in the target."), _("\
4584 When this permission is on, GDB may insert tracepoints in the program.\n\
4585 Otherwise, any sort of insertion attempt will result in an error."),
4586 set_target_permissions
, NULL
,
4587 &setlist
, &showlist
);
4589 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support
,
4590 &may_insert_fast_tracepoints_1
, _("\
4591 Set permission to insert fast tracepoints in the target."), _("\
4592 Show permission to insert fast tracepoints in the target."), _("\
4593 When this permission is on, GDB may insert fast tracepoints.\n\
4594 Otherwise, any sort of insertion attempt will result in an error."),
4595 set_target_permissions
, NULL
,
4596 &setlist
, &showlist
);
4598 add_setshow_boolean_cmd ("may-interrupt", class_support
,
4600 Set permission to interrupt or signal the target."), _("\
4601 Show permission to interrupt or signal the target."), _("\
4602 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4603 Otherwise, any attempt to interrupt or stop will be ignored."),
4604 set_target_permissions
, NULL
,
4605 &setlist
, &showlist
);
4607 add_com ("flash-erase", no_class
, flash_erase_command
,
4608 _("Erase all flash memory regions."));
4610 add_setshow_boolean_cmd ("auto-connect-native-target", class_support
,
4611 &auto_connect_native_target
, _("\
4612 Set whether GDB may automatically connect to the native target."), _("\
4613 Show whether GDB may automatically connect to the native target."), _("\
4614 When on, and GDB is not connected to a target yet, GDB\n\
4615 attempts \"run\" and other commands with the native target."),
4616 NULL
, show_auto_connect_native_target
,
4617 &setlist
, &showlist
);