1 /* Select target systems and architectures at runtime for GDB.
3 Copyright (C) 1990-2023 Free Software Foundation, Inc.
5 Contributed by Cygnus Support.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "target-dcache.h"
29 #include "observable.h"
37 #include "target-descriptions.h"
38 #include "gdbthread.h"
41 #include "inline-frame.h"
42 #include "tracepoint.h"
43 #include "gdbsupport/fileio.h"
44 #include "gdbsupport/agent.h"
46 #include "target-debug.h"
48 #include "event-top.h"
50 #include "gdbsupport/byte-vector.h"
51 #include "gdbsupport/search.h"
53 #include <unordered_map>
54 #include "target-connection.h"
56 #include "cli/cli-decode.h"
58 static void generic_tls_error (void) ATTRIBUTE_NORETURN
;
60 static void default_terminal_info (struct target_ops
*, const char *, int);
62 static int default_watchpoint_addr_within_range (struct target_ops
*,
63 CORE_ADDR
, CORE_ADDR
, int);
65 static int default_region_ok_for_hw_watchpoint (struct target_ops
*,
68 static void default_rcmd (struct target_ops
*, const char *, struct ui_file
*);
70 static ptid_t
default_get_ada_task_ptid (struct target_ops
*self
,
71 long lwp
, ULONGEST tid
);
73 static void default_mourn_inferior (struct target_ops
*self
);
75 static int default_search_memory (struct target_ops
*ops
,
77 ULONGEST search_space_len
,
78 const gdb_byte
*pattern
,
80 CORE_ADDR
*found_addrp
);
82 static int default_verify_memory (struct target_ops
*self
,
84 CORE_ADDR memaddr
, ULONGEST size
);
86 static void tcomplain (void) ATTRIBUTE_NORETURN
;
88 static struct target_ops
*find_default_run_target (const char *);
90 static int dummy_find_memory_regions (struct target_ops
*self
,
91 find_memory_region_ftype ignore1
,
94 static gdb::unique_xmalloc_ptr
<char> dummy_make_corefile_notes
95 (struct target_ops
*self
, bfd
*ignore1
, int *ignore2
);
97 static std::string
default_pid_to_str (struct target_ops
*ops
, ptid_t ptid
);
99 static enum exec_direction_kind default_execution_direction
100 (struct target_ops
*self
);
102 /* Mapping between target_info objects (which have address identity)
103 and corresponding open/factory function/callback. Each add_target
104 call adds one entry to this map, and registers a "target
105 TARGET_NAME" command that when invoked calls the factory registered
106 here. The target_info object is associated with the command via
107 the command's context. */
108 static std::unordered_map
<const target_info
*, target_open_ftype
*>
111 /* The singleton debug target. */
113 static struct target_ops
*the_debug_target
;
115 /* Command list for target. */
117 static struct cmd_list_element
*targetlist
= NULL
;
119 /* True if we should trust readonly sections from the
120 executable when reading memory. */
122 static bool trust_readonly
= false;
124 /* Nonzero if we should show true memory content including
125 memory breakpoint inserted by gdb. */
127 static int show_memory_breakpoints
= 0;
129 /* These globals control whether GDB attempts to perform these
130 operations; they are useful for targets that need to prevent
131 inadvertent disruption, such as in non-stop mode. */
133 bool may_write_registers
= true;
135 bool may_write_memory
= true;
137 bool may_insert_breakpoints
= true;
139 bool may_insert_tracepoints
= true;
141 bool may_insert_fast_tracepoints
= true;
143 bool may_stop
= true;
145 /* Non-zero if we want to see trace of target level stuff. */
147 static unsigned int targetdebug
= 0;
150 set_targetdebug (const char *args
, int from_tty
, struct cmd_list_element
*c
)
153 current_inferior ()->push_target (the_debug_target
);
155 current_inferior ()->unpush_target (the_debug_target
);
159 show_targetdebug (struct ui_file
*file
, int from_tty
,
160 struct cmd_list_element
*c
, const char *value
)
162 gdb_printf (file
, _("Target debugging is %s.\n"), value
);
168 for (target_ops
*t
= current_inferior ()->top_target ();
171 if (t
->has_memory ())
180 for (target_ops
*t
= current_inferior ()->top_target ();
190 target_has_registers ()
192 for (target_ops
*t
= current_inferior ()->top_target ();
195 if (t
->has_registers ())
202 target_has_execution (inferior
*inf
)
205 inf
= current_inferior ();
207 for (target_ops
*t
= inf
->top_target ();
209 t
= inf
->find_target_beneath (t
))
210 if (t
->has_execution (inf
))
219 return current_inferior ()->top_target ()->shortname ();
225 target_attach_no_wait ()
227 return current_inferior ()->top_target ()->attach_no_wait ();
233 target_post_attach (int pid
)
235 return current_inferior ()->top_target ()->post_attach (pid
);
241 target_prepare_to_store (regcache
*regcache
)
243 return current_inferior ()->top_target ()->prepare_to_store (regcache
);
249 target_supports_enable_disable_tracepoint ()
251 target_ops
*target
= current_inferior ()->top_target ();
253 return target
->supports_enable_disable_tracepoint ();
257 target_supports_string_tracing ()
259 return current_inferior ()->top_target ()->supports_string_tracing ();
265 target_supports_evaluation_of_breakpoint_conditions ()
267 target_ops
*target
= current_inferior ()->top_target ();
269 return target
->supports_evaluation_of_breakpoint_conditions ();
275 target_supports_dumpcore ()
277 return current_inferior ()->top_target ()->supports_dumpcore ();
283 target_dumpcore (const char *filename
)
285 return current_inferior ()->top_target ()->dumpcore (filename
);
291 target_can_run_breakpoint_commands ()
293 return current_inferior ()->top_target ()->can_run_breakpoint_commands ();
301 return current_inferior ()->top_target ()->files_info ();
307 target_insert_fork_catchpoint (int pid
)
309 return current_inferior ()->top_target ()->insert_fork_catchpoint (pid
);
315 target_remove_fork_catchpoint (int pid
)
317 return current_inferior ()->top_target ()->remove_fork_catchpoint (pid
);
323 target_insert_vfork_catchpoint (int pid
)
325 return current_inferior ()->top_target ()->insert_vfork_catchpoint (pid
);
331 target_remove_vfork_catchpoint (int pid
)
333 return current_inferior ()->top_target ()->remove_vfork_catchpoint (pid
);
339 target_insert_exec_catchpoint (int pid
)
341 return current_inferior ()->top_target ()->insert_exec_catchpoint (pid
);
347 target_remove_exec_catchpoint (int pid
)
349 return current_inferior ()->top_target ()->remove_exec_catchpoint (pid
);
355 target_set_syscall_catchpoint (int pid
, bool needed
, int any_count
,
356 gdb::array_view
<const int> syscall_counts
)
358 target_ops
*target
= current_inferior ()->top_target ();
360 return target
->set_syscall_catchpoint (pid
, needed
, any_count
,
367 target_rcmd (const char *command
, struct ui_file
*outbuf
)
369 return current_inferior ()->top_target ()->rcmd (command
, outbuf
);
375 target_can_lock_scheduler ()
377 target_ops
*target
= current_inferior ()->top_target ();
379 return (target
->get_thread_control_capabilities ()& tc_schedlock
) != 0;
385 target_can_async_p ()
387 return target_can_async_p (current_inferior ()->top_target ());
393 target_can_async_p (struct target_ops
*target
)
395 if (!target_async_permitted
)
397 return target
->can_async_p ();
405 bool result
= current_inferior ()->top_target ()->is_async_p ();
406 gdb_assert (target_async_permitted
|| !result
);
411 target_execution_direction ()
413 return current_inferior ()->top_target ()->execution_direction ();
419 target_extra_thread_info (thread_info
*tp
)
421 return current_inferior ()->top_target ()->extra_thread_info (tp
);
427 target_pid_to_exec_file (int pid
)
429 return current_inferior ()->top_target ()->pid_to_exec_file (pid
);
435 target_thread_architecture (ptid_t ptid
)
437 return current_inferior ()->top_target ()->thread_architecture (ptid
);
443 target_find_memory_regions (find_memory_region_ftype func
, void *data
)
445 return current_inferior ()->top_target ()->find_memory_regions (func
, data
);
450 gdb::unique_xmalloc_ptr
<char>
451 target_make_corefile_notes (bfd
*bfd
, int *size_p
)
453 return current_inferior ()->top_target ()->make_corefile_notes (bfd
, size_p
);
457 target_get_bookmark (const char *args
, int from_tty
)
459 return current_inferior ()->top_target ()->get_bookmark (args
, from_tty
);
463 target_goto_bookmark (const gdb_byte
*arg
, int from_tty
)
465 return current_inferior ()->top_target ()->goto_bookmark (arg
, from_tty
);
471 target_stopped_by_watchpoint ()
473 return current_inferior ()->top_target ()->stopped_by_watchpoint ();
479 target_stopped_by_sw_breakpoint ()
481 return current_inferior ()->top_target ()->stopped_by_sw_breakpoint ();
485 target_supports_stopped_by_sw_breakpoint ()
487 target_ops
*target
= current_inferior ()->top_target ();
489 return target
->supports_stopped_by_sw_breakpoint ();
493 target_stopped_by_hw_breakpoint ()
495 return current_inferior ()->top_target ()->stopped_by_hw_breakpoint ();
499 target_supports_stopped_by_hw_breakpoint ()
501 target_ops
*target
= current_inferior ()->top_target ();
503 return target
->supports_stopped_by_hw_breakpoint ();
509 target_have_steppable_watchpoint ()
511 return current_inferior ()->top_target ()->have_steppable_watchpoint ();
517 target_can_use_hardware_watchpoint (bptype type
, int cnt
, int othertype
)
519 target_ops
*target
= current_inferior ()->top_target ();
521 return target
->can_use_hw_breakpoint (type
, cnt
, othertype
);
527 target_region_ok_for_hw_watchpoint (CORE_ADDR addr
, int len
)
529 target_ops
*target
= current_inferior ()->top_target ();
531 return target
->region_ok_for_hw_watchpoint (addr
, len
);
536 target_can_do_single_step ()
538 return current_inferior ()->top_target ()->can_do_single_step ();
544 target_insert_watchpoint (CORE_ADDR addr
, int len
, target_hw_bp_type type
,
547 target_ops
*target
= current_inferior ()->top_target ();
549 return target
->insert_watchpoint (addr
, len
, type
, cond
);
555 target_remove_watchpoint (CORE_ADDR addr
, int len
, target_hw_bp_type type
,
558 target_ops
*target
= current_inferior ()->top_target ();
560 return target
->remove_watchpoint (addr
, len
, type
, cond
);
566 target_insert_hw_breakpoint (gdbarch
*gdbarch
, bp_target_info
*bp_tgt
)
568 target_ops
*target
= current_inferior ()->top_target ();
570 return target
->insert_hw_breakpoint (gdbarch
, bp_tgt
);
576 target_remove_hw_breakpoint (gdbarch
*gdbarch
, bp_target_info
*bp_tgt
)
578 target_ops
*target
= current_inferior ()->top_target ();
580 return target
->remove_hw_breakpoint (gdbarch
, bp_tgt
);
586 target_can_accel_watchpoint_condition (CORE_ADDR addr
, int len
, int type
,
589 target_ops
*target
= current_inferior ()->top_target ();
591 return target
->can_accel_watchpoint_condition (addr
, len
, type
, cond
);
597 target_can_execute_reverse ()
599 return current_inferior ()->top_target ()->can_execute_reverse ();
603 target_get_ada_task_ptid (long lwp
, ULONGEST tid
)
605 return current_inferior ()->top_target ()->get_ada_task_ptid (lwp
, tid
);
609 target_filesystem_is_local ()
611 return current_inferior ()->top_target ()->filesystem_is_local ();
617 return current_inferior ()->top_target ()->trace_init ();
621 target_download_tracepoint (bp_location
*location
)
623 return current_inferior ()->top_target ()->download_tracepoint (location
);
627 target_can_download_tracepoint ()
629 return current_inferior ()->top_target ()->can_download_tracepoint ();
633 target_download_trace_state_variable (const trace_state_variable
&tsv
)
635 target_ops
*target
= current_inferior ()->top_target ();
637 return target
->download_trace_state_variable (tsv
);
641 target_enable_tracepoint (bp_location
*loc
)
643 return current_inferior ()->top_target ()->enable_tracepoint (loc
);
647 target_disable_tracepoint (bp_location
*loc
)
649 return current_inferior ()->top_target ()->disable_tracepoint (loc
);
653 target_trace_start ()
655 return current_inferior ()->top_target ()->trace_start ();
659 target_trace_set_readonly_regions ()
661 return current_inferior ()->top_target ()->trace_set_readonly_regions ();
665 target_get_trace_status (trace_status
*ts
)
667 return current_inferior ()->top_target ()->get_trace_status (ts
);
671 target_get_tracepoint_status (breakpoint
*tp
, uploaded_tp
*utp
)
673 return current_inferior ()->top_target ()->get_tracepoint_status (tp
, utp
);
679 return current_inferior ()->top_target ()->trace_stop ();
683 target_trace_find (trace_find_type type
, int num
,
684 CORE_ADDR addr1
, CORE_ADDR addr2
, int *tpp
)
686 target_ops
*target
= current_inferior ()->top_target ();
688 return target
->trace_find (type
, num
, addr1
, addr2
, tpp
);
692 target_get_trace_state_variable_value (int tsv
, LONGEST
*val
)
694 target_ops
*target
= current_inferior ()->top_target ();
696 return target
->get_trace_state_variable_value (tsv
, val
);
700 target_save_trace_data (const char *filename
)
702 return current_inferior ()->top_target ()->save_trace_data (filename
);
706 target_upload_tracepoints (uploaded_tp
**utpp
)
708 return current_inferior ()->top_target ()->upload_tracepoints (utpp
);
712 target_upload_trace_state_variables (uploaded_tsv
**utsvp
)
714 target_ops
*target
= current_inferior ()->top_target ();
716 return target
->upload_trace_state_variables (utsvp
);
720 target_get_raw_trace_data (gdb_byte
*buf
, ULONGEST offset
, LONGEST len
)
722 target_ops
*target
= current_inferior ()->top_target ();
724 return target
->get_raw_trace_data (buf
, offset
, len
);
728 target_get_min_fast_tracepoint_insn_len ()
730 target_ops
*target
= current_inferior ()->top_target ();
732 return target
->get_min_fast_tracepoint_insn_len ();
736 target_set_disconnected_tracing (int val
)
738 return current_inferior ()->top_target ()->set_disconnected_tracing (val
);
742 target_set_circular_trace_buffer (int val
)
744 return current_inferior ()->top_target ()->set_circular_trace_buffer (val
);
748 target_set_trace_buffer_size (LONGEST val
)
750 return current_inferior ()->top_target ()->set_trace_buffer_size (val
);
754 target_set_trace_notes (const char *user
, const char *notes
,
755 const char *stopnotes
)
757 target_ops
*target
= current_inferior ()->top_target ();
759 return target
->set_trace_notes (user
, notes
, stopnotes
);
763 target_get_tib_address (ptid_t ptid
, CORE_ADDR
*addr
)
765 return current_inferior ()->top_target ()->get_tib_address (ptid
, addr
);
769 target_set_permissions ()
771 return current_inferior ()->top_target ()->set_permissions ();
775 target_static_tracepoint_marker_at (CORE_ADDR addr
,
776 static_tracepoint_marker
*marker
)
778 target_ops
*target
= current_inferior ()->top_target ();
780 return target
->static_tracepoint_marker_at (addr
, marker
);
783 std::vector
<static_tracepoint_marker
>
784 target_static_tracepoint_markers_by_strid (const char *marker_id
)
786 target_ops
*target
= current_inferior ()->top_target ();
788 return target
->static_tracepoint_markers_by_strid (marker_id
);
792 target_traceframe_info ()
794 return current_inferior ()->top_target ()->traceframe_info ();
798 target_use_agent (bool use
)
800 return current_inferior ()->top_target ()->use_agent (use
);
804 target_can_use_agent ()
806 return current_inferior ()->top_target ()->can_use_agent ();
810 target_augmented_libraries_svr4_read ()
812 return current_inferior ()->top_target ()->augmented_libraries_svr4_read ();
816 target_supports_memory_tagging ()
818 return current_inferior ()->top_target ()->supports_memory_tagging ();
822 target_fetch_memtags (CORE_ADDR address
, size_t len
, gdb::byte_vector
&tags
,
825 return current_inferior ()->top_target ()->fetch_memtags (address
, len
, tags
, type
);
829 target_store_memtags (CORE_ADDR address
, size_t len
,
830 const gdb::byte_vector
&tags
, int type
)
832 return current_inferior ()->top_target ()->store_memtags (address
, len
, tags
, type
);
836 target_log_command (const char *p
)
838 return current_inferior ()->top_target ()->log_command (p
);
841 /* This is used to implement the various target commands. */
844 open_target (const char *args
, int from_tty
, struct cmd_list_element
*command
)
846 auto *ti
= static_cast<target_info
*> (command
->context ());
847 target_open_ftype
*func
= target_factories
[ti
];
850 gdb_printf (gdb_stdlog
, "-> %s->open (...)\n",
853 func (args
, from_tty
);
856 gdb_printf (gdb_stdlog
, "<- %s->open (%s, %d)\n",
857 ti
->shortname
, args
, from_tty
);
863 add_target (const target_info
&t
, target_open_ftype
*func
,
864 completer_ftype
*completer
)
866 struct cmd_list_element
*c
;
868 auto &func_slot
= target_factories
[&t
];
869 if (func_slot
!= nullptr)
870 internal_error (_("target already added (\"%s\")."), t
.shortname
);
873 if (targetlist
== NULL
)
874 add_basic_prefix_cmd ("target", class_run
, _("\
875 Connect to a target machine or process.\n\
876 The first argument is the type or protocol of the target machine.\n\
877 Remaining arguments are interpreted by the target protocol. For more\n\
878 information on the arguments for a particular protocol, type\n\
879 `help target ' followed by the protocol name."),
880 &targetlist
, 0, &cmdlist
);
881 c
= add_cmd (t
.shortname
, no_class
, t
.doc
, &targetlist
);
882 c
->set_context ((void *) &t
);
883 c
->func
= open_target
;
884 if (completer
!= NULL
)
885 set_cmd_completer (c
, completer
);
891 add_deprecated_target_alias (const target_info
&tinfo
, const char *alias
)
893 struct cmd_list_element
*c
;
895 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
897 c
= add_cmd (alias
, no_class
, tinfo
.doc
, &targetlist
);
898 c
->func
= open_target
;
899 c
->set_context ((void *) &tinfo
);
900 gdb::unique_xmalloc_ptr
<char> alt
901 = xstrprintf ("target %s", tinfo
.shortname
);
902 deprecate_cmd (c
, alt
.release ());
911 /* If the commit_resume_state of the to-be-killed-inferior's process stratum
912 is true, and this inferior is the last live inferior with resumed threads
913 of that target, then we want to leave commit_resume_state to false, as the
914 target won't have any resumed threads anymore. We achieve this with
915 this scoped_disable_commit_resumed. On construction, it will set the flag
916 to false. On destruction, it will only set it to true if there are resumed
918 scoped_disable_commit_resumed
disable ("killing");
919 current_inferior ()->top_target ()->kill ();
923 target_load (const char *arg
, int from_tty
)
925 target_dcache_invalidate ();
926 current_inferior ()->top_target ()->load (arg
, from_tty
);
931 target_terminal_state
target_terminal::m_terminal_state
932 = target_terminal_state::is_ours
;
934 /* See target/target.h. */
937 target_terminal::init (void)
939 current_inferior ()->top_target ()->terminal_init ();
941 m_terminal_state
= target_terminal_state::is_ours
;
944 /* See target/target.h. */
947 target_terminal::inferior (void)
949 struct ui
*ui
= current_ui
;
951 /* A background resume (``run&'') should leave GDB in control of the
953 if (ui
->prompt_state
!= PROMPT_BLOCKED
)
956 /* Since we always run the inferior in the main console (unless "set
957 inferior-tty" is in effect), when some UI other than the main one
958 calls target_terminal::inferior, then we leave the main UI's
959 terminal settings as is. */
963 /* If GDB is resuming the inferior in the foreground, install
964 inferior's terminal modes. */
966 struct inferior
*inf
= current_inferior ();
968 if (inf
->terminal_state
!= target_terminal_state::is_inferior
)
970 current_inferior ()->top_target ()->terminal_inferior ();
971 inf
->terminal_state
= target_terminal_state::is_inferior
;
974 m_terminal_state
= target_terminal_state::is_inferior
;
976 /* If the user hit C-c before, pretend that it was hit right
978 if (check_quit_flag ())
979 target_pass_ctrlc ();
982 /* See target/target.h. */
985 target_terminal::restore_inferior (void)
987 struct ui
*ui
= current_ui
;
989 /* See target_terminal::inferior(). */
990 if (ui
->prompt_state
!= PROMPT_BLOCKED
|| ui
!= main_ui
)
993 /* Restore the terminal settings of inferiors that were in the
994 foreground but are now ours_for_output due to a temporary
995 target_target::ours_for_output() call. */
998 scoped_restore_current_inferior restore_inferior
;
1000 for (::inferior
*inf
: all_inferiors ())
1002 if (inf
->terminal_state
== target_terminal_state::is_ours_for_output
)
1004 set_current_inferior (inf
);
1005 current_inferior ()->top_target ()->terminal_inferior ();
1006 inf
->terminal_state
= target_terminal_state::is_inferior
;
1011 m_terminal_state
= target_terminal_state::is_inferior
;
1013 /* If the user hit C-c before, pretend that it was hit right
1015 if (check_quit_flag ())
1016 target_pass_ctrlc ();
1019 /* Switch terminal state to DESIRED_STATE, either is_ours, or
1020 is_ours_for_output. */
1023 target_terminal_is_ours_kind (target_terminal_state desired_state
)
1025 scoped_restore_current_inferior restore_inferior
;
1027 /* Must do this in two passes. First, have all inferiors save the
1028 current terminal settings. Then, after all inferiors have add a
1029 chance to safely save the terminal settings, restore GDB's
1030 terminal settings. */
1032 for (inferior
*inf
: all_inferiors ())
1034 if (inf
->terminal_state
== target_terminal_state::is_inferior
)
1036 set_current_inferior (inf
);
1037 current_inferior ()->top_target ()->terminal_save_inferior ();
1041 for (inferior
*inf
: all_inferiors ())
1043 /* Note we don't check is_inferior here like above because we
1044 need to handle 'is_ours_for_output -> is_ours' too. Careful
1045 to never transition from 'is_ours' to 'is_ours_for_output',
1047 if (inf
->terminal_state
!= target_terminal_state::is_ours
1048 && inf
->terminal_state
!= desired_state
)
1050 set_current_inferior (inf
);
1051 if (desired_state
== target_terminal_state::is_ours
)
1052 current_inferior ()->top_target ()->terminal_ours ();
1053 else if (desired_state
== target_terminal_state::is_ours_for_output
)
1054 current_inferior ()->top_target ()->terminal_ours_for_output ();
1056 gdb_assert_not_reached ("unhandled desired state");
1057 inf
->terminal_state
= desired_state
;
1062 /* See target/target.h. */
1065 target_terminal::ours ()
1067 struct ui
*ui
= current_ui
;
1069 /* See target_terminal::inferior. */
1073 if (m_terminal_state
== target_terminal_state::is_ours
)
1076 target_terminal_is_ours_kind (target_terminal_state::is_ours
);
1077 m_terminal_state
= target_terminal_state::is_ours
;
1080 /* See target/target.h. */
1083 target_terminal::ours_for_output ()
1085 struct ui
*ui
= current_ui
;
1087 /* See target_terminal::inferior. */
1091 if (!target_terminal::is_inferior ())
1094 target_terminal_is_ours_kind (target_terminal_state::is_ours_for_output
);
1095 target_terminal::m_terminal_state
= target_terminal_state::is_ours_for_output
;
1098 /* See target/target.h. */
1101 target_terminal::info (const char *arg
, int from_tty
)
1103 current_inferior ()->top_target ()->terminal_info (arg
, from_tty
);
1109 target_supports_terminal_ours (void)
1111 /* The current top target is the target at the top of the target
1112 stack of the current inferior. While normally there's always an
1113 inferior, we must check for nullptr here because we can get here
1114 very early during startup, before the initial inferior is first
1116 inferior
*inf
= current_inferior ();
1120 return inf
->top_target ()->supports_terminal_ours ();
1126 error (_("You can't do that when your target is `%s'"),
1127 current_inferior ()->top_target ()->shortname ());
1133 error (_("You can't do that without a process to debug."));
1137 default_terminal_info (struct target_ops
*self
, const char *args
, int from_tty
)
1139 gdb_printf (_("No saved terminal information.\n"));
1142 /* A default implementation for the to_get_ada_task_ptid target method.
1144 This function builds the PTID by using both LWP and TID as part of
1145 the PTID lwp and tid elements. The pid used is the pid of the
1149 default_get_ada_task_ptid (struct target_ops
*self
, long lwp
, ULONGEST tid
)
1151 return ptid_t (inferior_ptid
.pid (), lwp
, tid
);
1154 static enum exec_direction_kind
1155 default_execution_direction (struct target_ops
*self
)
1157 if (!target_can_execute_reverse ())
1158 return EXEC_FORWARD
;
1159 else if (!target_can_async_p ())
1160 return EXEC_FORWARD
;
1162 gdb_assert_not_reached ("\
1163 to_execution_direction must be implemented for reverse async");
1169 target_ops_ref_policy::decref (target_ops
*t
)
1172 if (t
->refcount () == 0)
1174 if (t
->stratum () == process_stratum
)
1175 connection_list_remove (as_process_stratum_target (t
));
1183 target_stack::push (target_ops
*t
)
1185 /* We must create a new reference first. It is possible that T is
1186 already pushed on this target stack, in which case we will first
1187 unpush it below, before re-pushing it. If we don't increment the
1188 reference count now, then when we unpush it, we might end up deleting
1189 T, which is not good. */
1190 auto ref
= target_ops_ref::new_reference (t
);
1192 strata stratum
= t
->stratum ();
1194 /* If there's already a target at this stratum, remove it. */
1196 if (m_stack
[stratum
].get () != nullptr)
1197 unpush (m_stack
[stratum
].get ());
1199 /* Now add the new one. */
1200 m_stack
[stratum
] = std::move (ref
);
1202 if (m_top
< stratum
)
1205 if (stratum
== process_stratum
)
1206 connection_list_add (as_process_stratum_target (t
));
1212 target_stack::unpush (target_ops
*t
)
1214 gdb_assert (t
!= NULL
);
1216 strata stratum
= t
->stratum ();
1218 if (stratum
== dummy_stratum
)
1219 internal_error (_("Attempt to unpush the dummy target"));
1221 /* Look for the specified target. Note that a target can only occur
1222 once in the target stack. */
1224 if (m_stack
[stratum
] != t
)
1226 /* If T wasn't pushed, quit. Only open targets should be
1231 if (m_top
== stratum
)
1232 m_top
= this->find_beneath (t
)->stratum ();
1234 /* Move the target reference off the target stack, this sets the pointer
1235 held in m_stack to nullptr, and places the reference in ref. When
1236 ref goes out of scope its reference count will be decremented, which
1237 might cause the target to close.
1239 We have to do it this way, and not just set the value in m_stack to
1240 nullptr directly, because doing so would decrement the reference
1241 count first, which might close the target, and closing the target
1242 does a check that the target is not on any inferiors target_stack. */
1243 auto ref
= std::move (m_stack
[stratum
]);
1249 target_unpusher::operator() (struct target_ops
*ops
) const
1251 current_inferior ()->unpush_target (ops
);
1254 /* Default implementation of to_get_thread_local_address. */
1257 generic_tls_error (void)
1259 throw_error (TLS_GENERIC_ERROR
,
1260 _("Cannot find thread-local variables on this target"));
1263 /* Using the objfile specified in OBJFILE, find the address for the
1264 current thread's thread-local storage with offset OFFSET. */
1266 target_translate_tls_address (struct objfile
*objfile
, CORE_ADDR offset
)
1268 volatile CORE_ADDR addr
= 0;
1269 struct target_ops
*target
= current_inferior ()->top_target ();
1270 struct gdbarch
*gdbarch
= target_gdbarch ();
1272 /* If OBJFILE is a separate debug object file, look for the
1273 original object file. */
1274 if (objfile
->separate_debug_objfile_backlink
!= NULL
)
1275 objfile
= objfile
->separate_debug_objfile_backlink
;
1277 if (gdbarch_fetch_tls_load_module_address_p (gdbarch
))
1279 ptid_t ptid
= inferior_ptid
;
1285 /* Fetch the load module address for this objfile. */
1286 lm_addr
= gdbarch_fetch_tls_load_module_address (gdbarch
,
1289 if (gdbarch_get_thread_local_address_p (gdbarch
))
1290 addr
= gdbarch_get_thread_local_address (gdbarch
, ptid
, lm_addr
,
1293 addr
= target
->get_thread_local_address (ptid
, lm_addr
, offset
);
1295 /* If an error occurred, print TLS related messages here. Otherwise,
1296 throw the error to some higher catcher. */
1297 catch (const gdb_exception
&ex
)
1299 int objfile_is_library
= (objfile
->flags
& OBJF_SHARED
);
1303 case TLS_NO_LIBRARY_SUPPORT_ERROR
:
1304 error (_("Cannot find thread-local variables "
1305 "in this thread library."));
1307 case TLS_LOAD_MODULE_NOT_FOUND_ERROR
:
1308 if (objfile_is_library
)
1309 error (_("Cannot find shared library `%s' in dynamic"
1310 " linker's load module list"), objfile_name (objfile
));
1312 error (_("Cannot find executable file `%s' in dynamic"
1313 " linker's load module list"), objfile_name (objfile
));
1315 case TLS_NOT_ALLOCATED_YET_ERROR
:
1316 if (objfile_is_library
)
1317 error (_("The inferior has not yet allocated storage for"
1318 " thread-local variables in\n"
1319 "the shared library `%s'\n"
1321 objfile_name (objfile
),
1322 target_pid_to_str (ptid
).c_str ());
1324 error (_("The inferior has not yet allocated storage for"
1325 " thread-local variables in\n"
1326 "the executable `%s'\n"
1328 objfile_name (objfile
),
1329 target_pid_to_str (ptid
).c_str ());
1331 case TLS_GENERIC_ERROR
:
1332 if (objfile_is_library
)
1333 error (_("Cannot find thread-local storage for %s, "
1334 "shared library %s:\n%s"),
1335 target_pid_to_str (ptid
).c_str (),
1336 objfile_name (objfile
), ex
.what ());
1338 error (_("Cannot find thread-local storage for %s, "
1339 "executable file %s:\n%s"),
1340 target_pid_to_str (ptid
).c_str (),
1341 objfile_name (objfile
), ex
.what ());
1350 error (_("Cannot find thread-local variables on this target"));
1356 target_xfer_status_to_string (enum target_xfer_status status
)
1358 #define CASE(X) case X: return #X
1361 CASE(TARGET_XFER_E_IO
);
1362 CASE(TARGET_XFER_UNAVAILABLE
);
1370 const target_section_table
*
1371 target_get_section_table (struct target_ops
*target
)
1373 return target
->get_section_table ();
1376 /* Find a section containing ADDR. */
1378 const struct target_section
*
1379 target_section_by_addr (struct target_ops
*target
, CORE_ADDR addr
)
1381 const target_section_table
*table
= target_get_section_table (target
);
1386 for (const target_section
&secp
: *table
)
1388 if (addr
>= secp
.addr
&& addr
< secp
.endaddr
)
1396 const target_section_table
*
1397 default_get_section_table ()
1399 return ¤t_program_space
->target_sections ();
1402 /* Helper for the memory xfer routines. Checks the attributes of the
1403 memory region of MEMADDR against the read or write being attempted.
1404 If the access is permitted returns true, otherwise returns false.
1405 REGION_P is an optional output parameter. If not-NULL, it is
1406 filled with a pointer to the memory region of MEMADDR. REG_LEN
1407 returns LEN trimmed to the end of the region. This is how much the
1408 caller can continue requesting, if the access is permitted. A
1409 single xfer request must not straddle memory region boundaries. */
1412 memory_xfer_check_region (gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1413 ULONGEST memaddr
, ULONGEST len
, ULONGEST
*reg_len
,
1414 struct mem_region
**region_p
)
1416 struct mem_region
*region
;
1418 region
= lookup_mem_region (memaddr
);
1420 if (region_p
!= NULL
)
1423 switch (region
->attrib
.mode
)
1426 if (writebuf
!= NULL
)
1431 if (readbuf
!= NULL
)
1436 /* We only support writing to flash during "load" for now. */
1437 if (writebuf
!= NULL
)
1438 error (_("Writing to flash memory forbidden in this context"));
1445 /* region->hi == 0 means there's no upper bound. */
1446 if (memaddr
+ len
< region
->hi
|| region
->hi
== 0)
1449 *reg_len
= region
->hi
- memaddr
;
1454 /* Read memory from more than one valid target. A core file, for
1455 instance, could have some of memory but delegate other bits to
1456 the target below it. So, we must manually try all targets. */
1458 enum target_xfer_status
1459 raw_memory_xfer_partial (struct target_ops
*ops
, gdb_byte
*readbuf
,
1460 const gdb_byte
*writebuf
, ULONGEST memaddr
, LONGEST len
,
1461 ULONGEST
*xfered_len
)
1463 enum target_xfer_status res
;
1467 res
= ops
->xfer_partial (TARGET_OBJECT_MEMORY
, NULL
,
1468 readbuf
, writebuf
, memaddr
, len
,
1470 if (res
== TARGET_XFER_OK
)
1473 /* Stop if the target reports that the memory is not available. */
1474 if (res
== TARGET_XFER_UNAVAILABLE
)
1477 /* Don't continue past targets which have all the memory.
1478 At one time, this code was necessary to read data from
1479 executables / shared libraries when data for the requested
1480 addresses weren't available in the core file. But now the
1481 core target handles this case itself. */
1482 if (ops
->has_all_memory ())
1485 ops
= ops
->beneath ();
1487 while (ops
!= NULL
);
1489 /* The cache works at the raw memory level. Make sure the cache
1490 gets updated with raw contents no matter what kind of memory
1491 object was originally being written. Note we do write-through
1492 first, so that if it fails, we don't write to the cache contents
1493 that never made it to the target. */
1494 if (writebuf
!= NULL
1495 && inferior_ptid
!= null_ptid
1496 && target_dcache_init_p ()
1497 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
1499 DCACHE
*dcache
= target_dcache_get ();
1501 /* Note that writing to an area of memory which wasn't present
1502 in the cache doesn't cause it to be loaded in. */
1503 dcache_update (dcache
, res
, memaddr
, writebuf
, *xfered_len
);
1509 /* Perform a partial memory transfer.
1510 For docs see target.h, to_xfer_partial. */
1512 static enum target_xfer_status
1513 memory_xfer_partial_1 (struct target_ops
*ops
, enum target_object object
,
1514 gdb_byte
*readbuf
, const gdb_byte
*writebuf
, ULONGEST memaddr
,
1515 ULONGEST len
, ULONGEST
*xfered_len
)
1517 enum target_xfer_status res
;
1519 struct mem_region
*region
;
1520 struct inferior
*inf
;
1522 /* For accesses to unmapped overlay sections, read directly from
1523 files. Must do this first, as MEMADDR may need adjustment. */
1524 if (readbuf
!= NULL
&& overlay_debugging
)
1526 struct obj_section
*section
= find_pc_overlay (memaddr
);
1528 if (pc_in_unmapped_range (memaddr
, section
))
1530 const target_section_table
*table
= target_get_section_table (ops
);
1531 const char *section_name
= section
->the_bfd_section
->name
;
1533 memaddr
= overlay_mapped_address (memaddr
, section
);
1535 auto match_cb
= [=] (const struct target_section
*s
)
1537 return (strcmp (section_name
, s
->the_bfd_section
->name
) == 0);
1540 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1541 memaddr
, len
, xfered_len
,
1546 /* Try the executable files, if "trust-readonly-sections" is set. */
1547 if (readbuf
!= NULL
&& trust_readonly
)
1549 const struct target_section
*secp
1550 = target_section_by_addr (ops
, memaddr
);
1552 && (bfd_section_flags (secp
->the_bfd_section
) & SEC_READONLY
))
1554 const target_section_table
*table
= target_get_section_table (ops
);
1555 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1556 memaddr
, len
, xfered_len
,
1561 /* Try GDB's internal data cache. */
1563 if (!memory_xfer_check_region (readbuf
, writebuf
, memaddr
, len
, ®_len
,
1565 return TARGET_XFER_E_IO
;
1567 if (inferior_ptid
!= null_ptid
)
1568 inf
= current_inferior ();
1574 /* The dcache reads whole cache lines; that doesn't play well
1575 with reading from a trace buffer, because reading outside of
1576 the collected memory range fails. */
1577 && get_traceframe_number () == -1
1578 && (region
->attrib
.cache
1579 || (stack_cache_enabled_p () && object
== TARGET_OBJECT_STACK_MEMORY
)
1580 || (code_cache_enabled_p () && object
== TARGET_OBJECT_CODE_MEMORY
)))
1582 DCACHE
*dcache
= target_dcache_get_or_init ();
1584 return dcache_read_memory_partial (ops
, dcache
, memaddr
, readbuf
,
1585 reg_len
, xfered_len
);
1588 /* If none of those methods found the memory we wanted, fall back
1589 to a target partial transfer. Normally a single call to
1590 to_xfer_partial is enough; if it doesn't recognize an object
1591 it will call the to_xfer_partial of the next target down.
1592 But for memory this won't do. Memory is the only target
1593 object which can be read from more than one valid target.
1594 A core file, for instance, could have some of memory but
1595 delegate other bits to the target below it. So, we must
1596 manually try all targets. */
1598 res
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, memaddr
, reg_len
,
1601 /* If we still haven't got anything, return the last error. We
1606 /* Perform a partial memory transfer. For docs see target.h,
1609 static enum target_xfer_status
1610 memory_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1611 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1612 ULONGEST memaddr
, ULONGEST len
, ULONGEST
*xfered_len
)
1614 enum target_xfer_status res
;
1616 /* Zero length requests are ok and require no work. */
1618 return TARGET_XFER_EOF
;
1620 memaddr
= gdbarch_remove_non_address_bits (target_gdbarch (), memaddr
);
1622 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1623 breakpoint insns, thus hiding out from higher layers whether
1624 there are software breakpoints inserted in the code stream. */
1625 if (readbuf
!= NULL
)
1627 res
= memory_xfer_partial_1 (ops
, object
, readbuf
, NULL
, memaddr
, len
,
1630 if (res
== TARGET_XFER_OK
&& !show_memory_breakpoints
)
1631 breakpoint_xfer_memory (readbuf
, NULL
, NULL
, memaddr
, *xfered_len
);
1635 /* A large write request is likely to be partially satisfied
1636 by memory_xfer_partial_1. We will continually malloc
1637 and free a copy of the entire write request for breakpoint
1638 shadow handling even though we only end up writing a small
1639 subset of it. Cap writes to a limit specified by the target
1640 to mitigate this. */
1641 len
= std::min (ops
->get_memory_xfer_limit (), len
);
1643 gdb::byte_vector
buf (writebuf
, writebuf
+ len
);
1644 breakpoint_xfer_memory (NULL
, buf
.data (), writebuf
, memaddr
, len
);
1645 res
= memory_xfer_partial_1 (ops
, object
, NULL
, buf
.data (), memaddr
, len
,
1652 scoped_restore_tmpl
<int>
1653 make_scoped_restore_show_memory_breakpoints (int show
)
1655 return make_scoped_restore (&show_memory_breakpoints
, show
);
1658 /* For docs see target.h, to_xfer_partial. */
1660 enum target_xfer_status
1661 target_xfer_partial (struct target_ops
*ops
,
1662 enum target_object object
, const char *annex
,
1663 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1664 ULONGEST offset
, ULONGEST len
,
1665 ULONGEST
*xfered_len
)
1667 enum target_xfer_status retval
;
1669 /* Transfer is done when LEN is zero. */
1671 return TARGET_XFER_EOF
;
1673 if (writebuf
&& !may_write_memory
)
1674 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1675 core_addr_to_string_nz (offset
), plongest (len
));
1679 /* If this is a memory transfer, let the memory-specific code
1680 have a look at it instead. Memory transfers are more
1682 if (object
== TARGET_OBJECT_MEMORY
|| object
== TARGET_OBJECT_STACK_MEMORY
1683 || object
== TARGET_OBJECT_CODE_MEMORY
)
1684 retval
= memory_xfer_partial (ops
, object
, readbuf
,
1685 writebuf
, offset
, len
, xfered_len
);
1686 else if (object
== TARGET_OBJECT_RAW_MEMORY
)
1688 /* Skip/avoid accessing the target if the memory region
1689 attributes block the access. Check this here instead of in
1690 raw_memory_xfer_partial as otherwise we'd end up checking
1691 this twice in the case of the memory_xfer_partial path is
1692 taken; once before checking the dcache, and another in the
1693 tail call to raw_memory_xfer_partial. */
1694 if (!memory_xfer_check_region (readbuf
, writebuf
, offset
, len
, &len
,
1696 return TARGET_XFER_E_IO
;
1698 /* Request the normal memory object from other layers. */
1699 retval
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, offset
, len
,
1703 retval
= ops
->xfer_partial (object
, annex
, readbuf
,
1704 writebuf
, offset
, len
, xfered_len
);
1708 const unsigned char *myaddr
= NULL
;
1710 gdb_printf (gdb_stdlog
,
1711 "%s:target_xfer_partial "
1712 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1715 (annex
? annex
: "(null)"),
1716 host_address_to_string (readbuf
),
1717 host_address_to_string (writebuf
),
1718 core_addr_to_string_nz (offset
),
1719 pulongest (len
), retval
,
1720 pulongest (*xfered_len
));
1726 if (retval
== TARGET_XFER_OK
&& myaddr
!= NULL
)
1730 gdb_puts (", bytes =", gdb_stdlog
);
1731 for (i
= 0; i
< *xfered_len
; i
++)
1733 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
1735 if (targetdebug
< 2 && i
> 0)
1737 gdb_printf (gdb_stdlog
, " ...");
1740 gdb_printf (gdb_stdlog
, "\n");
1743 gdb_printf (gdb_stdlog
, " %02x", myaddr
[i
] & 0xff);
1747 gdb_putc ('\n', gdb_stdlog
);
1750 /* Check implementations of to_xfer_partial update *XFERED_LEN
1751 properly. Do assertion after printing debug messages, so that we
1752 can find more clues on assertion failure from debugging messages. */
1753 if (retval
== TARGET_XFER_OK
|| retval
== TARGET_XFER_UNAVAILABLE
)
1754 gdb_assert (*xfered_len
> 0);
1759 /* Read LEN bytes of target memory at address MEMADDR, placing the
1760 results in GDB's memory at MYADDR. Returns either 0 for success or
1761 -1 if any error occurs.
1763 If an error occurs, no guarantee is made about the contents of the data at
1764 MYADDR. In particular, the caller should not depend upon partial reads
1765 filling the buffer with good data. There is no way for the caller to know
1766 how much good data might have been transfered anyway. Callers that can
1767 deal with partial reads should call target_read (which will retry until
1768 it makes no progress, and then return how much was transferred). */
1771 target_read_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1773 if (target_read (current_inferior ()->top_target (),
1774 TARGET_OBJECT_MEMORY
, NULL
,
1775 myaddr
, memaddr
, len
) == len
)
1781 /* See target/target.h. */
1784 target_read_uint32 (CORE_ADDR memaddr
, uint32_t *result
)
1789 r
= target_read_memory (memaddr
, buf
, sizeof buf
);
1792 *result
= extract_unsigned_integer (buf
, sizeof buf
,
1793 gdbarch_byte_order (target_gdbarch ()));
1797 /* Like target_read_memory, but specify explicitly that this is a read
1798 from the target's raw memory. That is, this read bypasses the
1799 dcache, breakpoint shadowing, etc. */
1802 target_read_raw_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1804 if (target_read (current_inferior ()->top_target (),
1805 TARGET_OBJECT_RAW_MEMORY
, NULL
,
1806 myaddr
, memaddr
, len
) == len
)
1812 /* Like target_read_memory, but specify explicitly that this is a read from
1813 the target's stack. This may trigger different cache behavior. */
1816 target_read_stack (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1818 if (target_read (current_inferior ()->top_target (),
1819 TARGET_OBJECT_STACK_MEMORY
, NULL
,
1820 myaddr
, memaddr
, len
) == len
)
1826 /* Like target_read_memory, but specify explicitly that this is a read from
1827 the target's code. This may trigger different cache behavior. */
1830 target_read_code (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1832 if (target_read (current_inferior ()->top_target (),
1833 TARGET_OBJECT_CODE_MEMORY
, NULL
,
1834 myaddr
, memaddr
, len
) == len
)
1840 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1841 Returns either 0 for success or -1 if any error occurs. If an
1842 error occurs, no guarantee is made about how much data got written.
1843 Callers that can deal with partial writes should call
1847 target_write_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1849 if (target_write (current_inferior ()->top_target (),
1850 TARGET_OBJECT_MEMORY
, NULL
,
1851 myaddr
, memaddr
, len
) == len
)
1857 /* Write LEN bytes from MYADDR to target raw memory at address
1858 MEMADDR. Returns either 0 for success or -1 if any error occurs.
1859 If an error occurs, no guarantee is made about how much data got
1860 written. Callers that can deal with partial writes should call
1864 target_write_raw_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1866 if (target_write (current_inferior ()->top_target (),
1867 TARGET_OBJECT_RAW_MEMORY
, NULL
,
1868 myaddr
, memaddr
, len
) == len
)
1874 /* Fetch the target's memory map. */
1876 std::vector
<mem_region
>
1877 target_memory_map (void)
1879 target_ops
*target
= current_inferior ()->top_target ();
1880 std::vector
<mem_region
> result
= target
->memory_map ();
1881 if (result
.empty ())
1884 std::sort (result
.begin (), result
.end ());
1886 /* Check that regions do not overlap. Simultaneously assign
1887 a numbering for the "mem" commands to use to refer to
1889 mem_region
*last_one
= NULL
;
1890 for (size_t ix
= 0; ix
< result
.size (); ix
++)
1892 mem_region
*this_one
= &result
[ix
];
1893 this_one
->number
= ix
;
1895 if (last_one
!= NULL
&& last_one
->hi
> this_one
->lo
)
1897 warning (_("Overlapping regions in memory map: ignoring"));
1898 return std::vector
<mem_region
> ();
1901 last_one
= this_one
;
1908 target_flash_erase (ULONGEST address
, LONGEST length
)
1910 current_inferior ()->top_target ()->flash_erase (address
, length
);
1914 target_flash_done (void)
1916 current_inferior ()->top_target ()->flash_done ();
1920 show_trust_readonly (struct ui_file
*file
, int from_tty
,
1921 struct cmd_list_element
*c
, const char *value
)
1924 _("Mode for reading from readonly sections is %s.\n"),
1928 /* Target vector read/write partial wrapper functions. */
1930 static enum target_xfer_status
1931 target_read_partial (struct target_ops
*ops
,
1932 enum target_object object
,
1933 const char *annex
, gdb_byte
*buf
,
1934 ULONGEST offset
, ULONGEST len
,
1935 ULONGEST
*xfered_len
)
1937 return target_xfer_partial (ops
, object
, annex
, buf
, NULL
, offset
, len
,
1941 static enum target_xfer_status
1942 target_write_partial (struct target_ops
*ops
,
1943 enum target_object object
,
1944 const char *annex
, const gdb_byte
*buf
,
1945 ULONGEST offset
, LONGEST len
, ULONGEST
*xfered_len
)
1947 return target_xfer_partial (ops
, object
, annex
, NULL
, buf
, offset
, len
,
1951 /* Wrappers to perform the full transfer. */
1953 /* For docs on target_read see target.h. */
1956 target_read (struct target_ops
*ops
,
1957 enum target_object object
,
1958 const char *annex
, gdb_byte
*buf
,
1959 ULONGEST offset
, LONGEST len
)
1961 LONGEST xfered_total
= 0;
1964 /* If we are reading from a memory object, find the length of an addressable
1965 unit for that architecture. */
1966 if (object
== TARGET_OBJECT_MEMORY
1967 || object
== TARGET_OBJECT_STACK_MEMORY
1968 || object
== TARGET_OBJECT_CODE_MEMORY
1969 || object
== TARGET_OBJECT_RAW_MEMORY
)
1970 unit_size
= gdbarch_addressable_memory_unit_size (target_gdbarch ());
1972 while (xfered_total
< len
)
1974 ULONGEST xfered_partial
;
1975 enum target_xfer_status status
;
1977 status
= target_read_partial (ops
, object
, annex
,
1978 buf
+ xfered_total
* unit_size
,
1979 offset
+ xfered_total
, len
- xfered_total
,
1982 /* Call an observer, notifying them of the xfer progress? */
1983 if (status
== TARGET_XFER_EOF
)
1984 return xfered_total
;
1985 else if (status
== TARGET_XFER_OK
)
1987 xfered_total
+= xfered_partial
;
1991 return TARGET_XFER_E_IO
;
1997 /* Assuming that the entire [begin, end) range of memory cannot be
1998 read, try to read whatever subrange is possible to read.
2000 The function returns, in RESULT, either zero or one memory block.
2001 If there's a readable subrange at the beginning, it is completely
2002 read and returned. Any further readable subrange will not be read.
2003 Otherwise, if there's a readable subrange at the end, it will be
2004 completely read and returned. Any readable subranges before it
2005 (obviously, not starting at the beginning), will be ignored. In
2006 other cases -- either no readable subrange, or readable subrange(s)
2007 that is neither at the beginning, or end, nothing is returned.
2009 The purpose of this function is to handle a read across a boundary
2010 of accessible memory in a case when memory map is not available.
2011 The above restrictions are fine for this case, but will give
2012 incorrect results if the memory is 'patchy'. However, supporting
2013 'patchy' memory would require trying to read every single byte,
2014 and it seems unacceptable solution. Explicit memory map is
2015 recommended for this case -- and target_read_memory_robust will
2016 take care of reading multiple ranges then. */
2019 read_whatever_is_readable (struct target_ops
*ops
,
2020 const ULONGEST begin
, const ULONGEST end
,
2022 std::vector
<memory_read_result
> *result
)
2024 ULONGEST current_begin
= begin
;
2025 ULONGEST current_end
= end
;
2027 ULONGEST xfered_len
;
2029 /* If we previously failed to read 1 byte, nothing can be done here. */
2030 if (end
- begin
<= 1)
2033 gdb::unique_xmalloc_ptr
<gdb_byte
> buf ((gdb_byte
*) xmalloc (end
- begin
));
2035 /* Check that either first or the last byte is readable, and give up
2036 if not. This heuristic is meant to permit reading accessible memory
2037 at the boundary of accessible region. */
2038 if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2039 buf
.get (), begin
, 1, &xfered_len
) == TARGET_XFER_OK
)
2044 else if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2045 buf
.get () + (end
- begin
) - 1, end
- 1, 1,
2046 &xfered_len
) == TARGET_XFER_OK
)
2054 /* Loop invariant is that the [current_begin, current_end) was previously
2055 found to be not readable as a whole.
2057 Note loop condition -- if the range has 1 byte, we can't divide the range
2058 so there's no point trying further. */
2059 while (current_end
- current_begin
> 1)
2061 ULONGEST first_half_begin
, first_half_end
;
2062 ULONGEST second_half_begin
, second_half_end
;
2064 ULONGEST middle
= current_begin
+ (current_end
- current_begin
) / 2;
2068 first_half_begin
= current_begin
;
2069 first_half_end
= middle
;
2070 second_half_begin
= middle
;
2071 second_half_end
= current_end
;
2075 first_half_begin
= middle
;
2076 first_half_end
= current_end
;
2077 second_half_begin
= current_begin
;
2078 second_half_end
= middle
;
2081 xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2082 buf
.get () + (first_half_begin
- begin
) * unit_size
,
2084 first_half_end
- first_half_begin
);
2086 if (xfer
== first_half_end
- first_half_begin
)
2088 /* This half reads up fine. So, the error must be in the
2090 current_begin
= second_half_begin
;
2091 current_end
= second_half_end
;
2095 /* This half is not readable. Because we've tried one byte, we
2096 know some part of this half if actually readable. Go to the next
2097 iteration to divide again and try to read.
2099 We don't handle the other half, because this function only tries
2100 to read a single readable subrange. */
2101 current_begin
= first_half_begin
;
2102 current_end
= first_half_end
;
2108 /* The [begin, current_begin) range has been read. */
2109 result
->emplace_back (begin
, current_end
, std::move (buf
));
2113 /* The [current_end, end) range has been read. */
2114 LONGEST region_len
= end
- current_end
;
2116 gdb::unique_xmalloc_ptr
<gdb_byte
> data
2117 ((gdb_byte
*) xmalloc (region_len
* unit_size
));
2118 memcpy (data
.get (), buf
.get () + (current_end
- begin
) * unit_size
,
2119 region_len
* unit_size
);
2120 result
->emplace_back (current_end
, end
, std::move (data
));
2124 std::vector
<memory_read_result
>
2125 read_memory_robust (struct target_ops
*ops
,
2126 const ULONGEST offset
, const LONGEST len
)
2128 std::vector
<memory_read_result
> result
;
2129 int unit_size
= gdbarch_addressable_memory_unit_size (target_gdbarch ());
2131 LONGEST xfered_total
= 0;
2132 while (xfered_total
< len
)
2134 struct mem_region
*region
= lookup_mem_region (offset
+ xfered_total
);
2137 /* If there is no explicit region, a fake one should be created. */
2138 gdb_assert (region
);
2140 if (region
->hi
== 0)
2141 region_len
= len
- xfered_total
;
2143 region_len
= region
->hi
- offset
;
2145 if (region
->attrib
.mode
== MEM_NONE
|| region
->attrib
.mode
== MEM_WO
)
2147 /* Cannot read this region. Note that we can end up here only
2148 if the region is explicitly marked inaccessible, or
2149 'inaccessible-by-default' is in effect. */
2150 xfered_total
+= region_len
;
2154 LONGEST to_read
= std::min (len
- xfered_total
, region_len
);
2155 gdb::unique_xmalloc_ptr
<gdb_byte
> buffer
2156 ((gdb_byte
*) xmalloc (to_read
* unit_size
));
2158 LONGEST xfered_partial
=
2159 target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
, buffer
.get (),
2160 offset
+ xfered_total
, to_read
);
2161 /* Call an observer, notifying them of the xfer progress? */
2162 if (xfered_partial
<= 0)
2164 /* Got an error reading full chunk. See if maybe we can read
2166 read_whatever_is_readable (ops
, offset
+ xfered_total
,
2167 offset
+ xfered_total
+ to_read
,
2168 unit_size
, &result
);
2169 xfered_total
+= to_read
;
2173 result
.emplace_back (offset
+ xfered_total
,
2174 offset
+ xfered_total
+ xfered_partial
,
2175 std::move (buffer
));
2176 xfered_total
+= xfered_partial
;
2186 /* An alternative to target_write with progress callbacks. */
2189 target_write_with_progress (struct target_ops
*ops
,
2190 enum target_object object
,
2191 const char *annex
, const gdb_byte
*buf
,
2192 ULONGEST offset
, LONGEST len
,
2193 void (*progress
) (ULONGEST
, void *), void *baton
)
2195 LONGEST xfered_total
= 0;
2198 /* If we are writing to a memory object, find the length of an addressable
2199 unit for that architecture. */
2200 if (object
== TARGET_OBJECT_MEMORY
2201 || object
== TARGET_OBJECT_STACK_MEMORY
2202 || object
== TARGET_OBJECT_CODE_MEMORY
2203 || object
== TARGET_OBJECT_RAW_MEMORY
)
2204 unit_size
= gdbarch_addressable_memory_unit_size (target_gdbarch ());
2206 /* Give the progress callback a chance to set up. */
2208 (*progress
) (0, baton
);
2210 while (xfered_total
< len
)
2212 ULONGEST xfered_partial
;
2213 enum target_xfer_status status
;
2215 status
= target_write_partial (ops
, object
, annex
,
2216 buf
+ xfered_total
* unit_size
,
2217 offset
+ xfered_total
, len
- xfered_total
,
2220 if (status
!= TARGET_XFER_OK
)
2221 return status
== TARGET_XFER_EOF
? xfered_total
: TARGET_XFER_E_IO
;
2224 (*progress
) (xfered_partial
, baton
);
2226 xfered_total
+= xfered_partial
;
2232 /* For docs on target_write see target.h. */
2235 target_write (struct target_ops
*ops
,
2236 enum target_object object
,
2237 const char *annex
, const gdb_byte
*buf
,
2238 ULONGEST offset
, LONGEST len
)
2240 return target_write_with_progress (ops
, object
, annex
, buf
, offset
, len
,
2244 /* Help for target_read_alloc and target_read_stralloc. See their comments
2247 template <typename T
>
2248 gdb::optional
<gdb::def_vector
<T
>>
2249 target_read_alloc_1 (struct target_ops
*ops
, enum target_object object
,
2252 gdb::def_vector
<T
> buf
;
2254 const int chunk
= 4096;
2256 /* This function does not have a length parameter; it reads the
2257 entire OBJECT). Also, it doesn't support objects fetched partly
2258 from one target and partly from another (in a different stratum,
2259 e.g. a core file and an executable). Both reasons make it
2260 unsuitable for reading memory. */
2261 gdb_assert (object
!= TARGET_OBJECT_MEMORY
);
2263 /* Start by reading up to 4K at a time. The target will throttle
2264 this number down if necessary. */
2267 ULONGEST xfered_len
;
2268 enum target_xfer_status status
;
2270 buf
.resize (buf_pos
+ chunk
);
2272 status
= target_read_partial (ops
, object
, annex
,
2273 (gdb_byte
*) &buf
[buf_pos
],
2277 if (status
== TARGET_XFER_EOF
)
2279 /* Read all there was. */
2280 buf
.resize (buf_pos
);
2283 else if (status
!= TARGET_XFER_OK
)
2285 /* An error occurred. */
2289 buf_pos
+= xfered_len
;
2297 gdb::optional
<gdb::byte_vector
>
2298 target_read_alloc (struct target_ops
*ops
, enum target_object object
,
2301 return target_read_alloc_1
<gdb_byte
> (ops
, object
, annex
);
2306 gdb::optional
<gdb::char_vector
>
2307 target_read_stralloc (struct target_ops
*ops
, enum target_object object
,
2310 gdb::optional
<gdb::char_vector
> buf
2311 = target_read_alloc_1
<char> (ops
, object
, annex
);
2316 if (buf
->empty () || buf
->back () != '\0')
2317 buf
->push_back ('\0');
2319 /* Check for embedded NUL bytes; but allow trailing NULs. */
2320 for (auto it
= std::find (buf
->begin (), buf
->end (), '\0');
2321 it
!= buf
->end (); it
++)
2324 warning (_("target object %d, annex %s, "
2325 "contained unexpected null characters"),
2326 (int) object
, annex
? annex
: "(none)");
2333 /* Memory transfer methods. */
2336 get_target_memory (struct target_ops
*ops
, CORE_ADDR addr
, gdb_byte
*buf
,
2339 /* This method is used to read from an alternate, non-current
2340 target. This read must bypass the overlay support (as symbols
2341 don't match this target), and GDB's internal cache (wrong cache
2342 for this target). */
2343 if (target_read (ops
, TARGET_OBJECT_RAW_MEMORY
, NULL
, buf
, addr
, len
)
2345 memory_error (TARGET_XFER_E_IO
, addr
);
2349 get_target_memory_unsigned (struct target_ops
*ops
, CORE_ADDR addr
,
2350 int len
, enum bfd_endian byte_order
)
2352 gdb_byte buf
[sizeof (ULONGEST
)];
2354 gdb_assert (len
<= sizeof (buf
));
2355 get_target_memory (ops
, addr
, buf
, len
);
2356 return extract_unsigned_integer (buf
, len
, byte_order
);
2362 target_insert_breakpoint (struct gdbarch
*gdbarch
,
2363 struct bp_target_info
*bp_tgt
)
2365 if (!may_insert_breakpoints
)
2367 warning (_("May not insert breakpoints"));
2371 target_ops
*target
= current_inferior ()->top_target ();
2373 return target
->insert_breakpoint (gdbarch
, bp_tgt
);
2379 target_remove_breakpoint (struct gdbarch
*gdbarch
,
2380 struct bp_target_info
*bp_tgt
,
2381 enum remove_bp_reason reason
)
2383 /* This is kind of a weird case to handle, but the permission might
2384 have been changed after breakpoints were inserted - in which case
2385 we should just take the user literally and assume that any
2386 breakpoints should be left in place. */
2387 if (!may_insert_breakpoints
)
2389 warning (_("May not remove breakpoints"));
2393 target_ops
*target
= current_inferior ()->top_target ();
2395 return target
->remove_breakpoint (gdbarch
, bp_tgt
, reason
);
2399 info_target_command (const char *args
, int from_tty
)
2401 int has_all_mem
= 0;
2403 if (current_program_space
->symfile_object_file
!= NULL
)
2405 objfile
*objf
= current_program_space
->symfile_object_file
;
2406 gdb_printf (_("Symbols from \"%s\".\n"),
2407 objfile_name (objf
));
2410 for (target_ops
*t
= current_inferior ()->top_target ();
2414 if (!t
->has_memory ())
2417 if ((int) (t
->stratum ()) <= (int) dummy_stratum
)
2420 gdb_printf (_("\tWhile running this, "
2421 "GDB does not access memory from...\n"));
2422 gdb_printf ("%s:\n", t
->longname ());
2424 has_all_mem
= t
->has_all_memory ();
2428 /* This function is called before any new inferior is created, e.g.
2429 by running a program, attaching, or connecting to a target.
2430 It cleans up any state from previous invocations which might
2431 change between runs. This is a subset of what target_preopen
2432 resets (things which might change between targets). */
2435 target_pre_inferior (int from_tty
)
2437 /* Clear out solib state. Otherwise the solib state of the previous
2438 inferior might have survived and is entirely wrong for the new
2439 target. This has been observed on GNU/Linux using glibc 2.3. How
2451 Cannot access memory at address 0xdeadbeef
2454 /* In some OSs, the shared library list is the same/global/shared
2455 across inferiors. If code is shared between processes, so are
2456 memory regions and features. */
2457 if (!gdbarch_has_global_solist (target_gdbarch ()))
2459 no_shared_libraries (NULL
, from_tty
);
2461 invalidate_target_mem_regions ();
2463 target_clear_description ();
2466 /* attach_flag may be set if the previous process associated with
2467 the inferior was attached to. */
2468 current_inferior ()->attach_flag
= false;
2470 current_inferior ()->highest_thread_num
= 0;
2472 update_previous_thread ();
2474 agent_capability_invalidate ();
2477 /* This is to be called by the open routine before it does
2481 target_preopen (int from_tty
)
2485 if (current_inferior ()->pid
!= 0)
2488 || !target_has_execution ()
2489 || query (_("A program is being debugged already. Kill it? ")))
2491 /* Core inferiors actually should be detached, not
2493 if (target_has_execution ())
2496 target_detach (current_inferior (), 0);
2499 error (_("Program not killed."));
2502 /* Release reference to old previous thread. */
2503 update_previous_thread ();
2505 /* Calling target_kill may remove the target from the stack. But if
2506 it doesn't (which seems like a win for UDI), remove it now. */
2507 /* Leave the exec target, though. The user may be switching from a
2508 live process to a core of the same program. */
2509 current_inferior ()->pop_all_targets_above (file_stratum
);
2511 target_pre_inferior (from_tty
);
2517 target_detach (inferior
*inf
, int from_tty
)
2519 /* Thread's don't need to be resumed until the end of this function. */
2520 scoped_disable_commit_resumed
disable_commit_resumed ("detaching");
2522 /* After we have detached, we will clear the register cache for this inferior
2523 by calling registers_changed_ptid. We must save the pid_ptid before
2524 detaching, as the target detach method will clear inf->pid. */
2525 ptid_t save_pid_ptid
= ptid_t (inf
->pid
);
2527 /* As long as some to_detach implementations rely on the current_inferior
2528 (either directly, or indirectly, like through target_gdbarch or by
2529 reading memory), INF needs to be the current inferior. When that
2530 requirement will become no longer true, then we can remove this
2532 gdb_assert (inf
== current_inferior ());
2534 prepare_for_detach ();
2536 gdb::observers::inferior_pre_detach
.notify (inf
);
2538 /* Hold a strong reference because detaching may unpush the
2540 auto proc_target_ref
= target_ops_ref::new_reference (inf
->process_target ());
2542 current_inferior ()->top_target ()->detach (inf
, from_tty
);
2544 process_stratum_target
*proc_target
2545 = as_process_stratum_target (proc_target_ref
.get ());
2547 registers_changed_ptid (proc_target
, save_pid_ptid
);
2549 /* We have to ensure we have no frame cache left. Normally,
2550 registers_changed_ptid (save_pid_ptid) calls reinit_frame_cache when
2551 inferior_ptid matches save_pid_ptid, but in our case, it does not
2552 call it, as inferior_ptid has been reset. */
2553 reinit_frame_cache ();
2555 disable_commit_resumed
.reset_and_commit ();
2559 target_disconnect (const char *args
, int from_tty
)
2561 /* If we're in breakpoints-always-inserted mode or if breakpoints
2562 are global across processes, we have to remove them before
2564 remove_breakpoints ();
2566 current_inferior ()->top_target ()->disconnect (args
, from_tty
);
2569 /* See target/target.h. */
2572 target_wait (ptid_t ptid
, struct target_waitstatus
*status
,
2573 target_wait_flags options
)
2575 target_ops
*target
= current_inferior ()->top_target ();
2576 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
2578 gdb_assert (!proc_target
->commit_resumed_state
);
2580 if (!target_can_async_p (target
))
2581 gdb_assert ((options
& TARGET_WNOHANG
) == 0);
2585 gdb::observers::target_pre_wait
.notify (ptid
);
2586 ptid_t event_ptid
= target
->wait (ptid
, status
, options
);
2587 gdb::observers::target_post_wait
.notify (event_ptid
);
2592 gdb::observers::target_post_wait
.notify (null_ptid
);
2600 default_target_wait (struct target_ops
*ops
,
2601 ptid_t ptid
, struct target_waitstatus
*status
,
2602 target_wait_flags options
)
2604 status
->set_ignore ();
2605 return minus_one_ptid
;
2609 target_pid_to_str (ptid_t ptid
)
2611 return current_inferior ()->top_target ()->pid_to_str (ptid
);
2615 target_thread_name (struct thread_info
*info
)
2617 gdb_assert (info
->inf
== current_inferior ());
2619 return current_inferior ()->top_target ()->thread_name (info
);
2622 struct thread_info
*
2623 target_thread_handle_to_thread_info (const gdb_byte
*thread_handle
,
2625 struct inferior
*inf
)
2627 target_ops
*target
= current_inferior ()->top_target ();
2629 return target
->thread_handle_to_thread_info (thread_handle
, handle_len
, inf
);
2635 target_thread_info_to_thread_handle (struct thread_info
*tip
)
2637 target_ops
*target
= current_inferior ()->top_target ();
2639 return target
->thread_info_to_thread_handle (tip
);
2643 target_resume (ptid_t scope_ptid
, int step
, enum gdb_signal signal
)
2645 process_stratum_target
*curr_target
= current_inferior ()->process_target ();
2646 gdb_assert (!curr_target
->commit_resumed_state
);
2648 gdb_assert (inferior_ptid
!= null_ptid
);
2649 gdb_assert (inferior_ptid
.matches (scope_ptid
));
2651 target_dcache_invalidate ();
2653 current_inferior ()->top_target ()->resume (scope_ptid
, step
, signal
);
2655 registers_changed_ptid (curr_target
, scope_ptid
);
2656 /* We only set the internal executing state here. The user/frontend
2657 running state is set at a higher level. This also clears the
2658 thread's stop_pc as side effect. */
2659 set_executing (curr_target
, scope_ptid
, true);
2660 clear_inline_frame_state (curr_target
, scope_ptid
);
2662 if (target_can_async_p ())
2663 target_async (true);
2669 target_commit_resumed ()
2671 gdb_assert (current_inferior ()->process_target ()->commit_resumed_state
);
2672 current_inferior ()->top_target ()->commit_resumed ();
2678 target_has_pending_events ()
2680 return current_inferior ()->top_target ()->has_pending_events ();
2684 target_pass_signals (gdb::array_view
<const unsigned char> pass_signals
)
2686 current_inferior ()->top_target ()->pass_signals (pass_signals
);
2690 target_program_signals (gdb::array_view
<const unsigned char> program_signals
)
2692 current_inferior ()->top_target ()->program_signals (program_signals
);
2696 default_follow_fork (struct target_ops
*self
, inferior
*child_inf
,
2697 ptid_t child_ptid
, target_waitkind fork_kind
,
2698 bool follow_child
, bool detach_fork
)
2700 /* Some target returned a fork event, but did not know how to follow it. */
2701 internal_error (_("could not find a target to follow fork"));
2707 target_follow_fork (inferior
*child_inf
, ptid_t child_ptid
,
2708 target_waitkind fork_kind
, bool follow_child
,
2711 target_ops
*target
= current_inferior ()->top_target ();
2713 /* Check consistency between CHILD_INF, CHILD_PTID, FOLLOW_CHILD and
2715 if (child_inf
!= nullptr)
2717 gdb_assert (follow_child
|| !detach_fork
);
2718 gdb_assert (child_inf
->pid
== child_ptid
.pid ());
2721 gdb_assert (!follow_child
&& detach_fork
);
2723 return target
->follow_fork (child_inf
, child_ptid
, fork_kind
, follow_child
,
2730 target_follow_exec (inferior
*follow_inf
, ptid_t ptid
,
2731 const char *execd_pathname
)
2733 current_inferior ()->top_target ()->follow_exec (follow_inf
, ptid
,
2738 default_mourn_inferior (struct target_ops
*self
)
2740 internal_error (_("could not find a target to follow mourn inferior"));
2744 target_mourn_inferior (ptid_t ptid
)
2746 gdb_assert (ptid
.pid () == inferior_ptid
.pid ());
2747 current_inferior ()->top_target ()->mourn_inferior ();
2749 /* We no longer need to keep handles on any of the object files.
2750 Make sure to release them to avoid unnecessarily locking any
2751 of them while we're not actually debugging. */
2752 bfd_cache_close_all ();
2755 /* Look for a target which can describe architectural features, starting
2756 from TARGET. If we find one, return its description. */
2758 const struct target_desc
*
2759 target_read_description (struct target_ops
*target
)
2761 return target
->read_description ();
2765 /* Default implementation of memory-searching. */
2768 default_search_memory (struct target_ops
*self
,
2769 CORE_ADDR start_addr
, ULONGEST search_space_len
,
2770 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2771 CORE_ADDR
*found_addrp
)
2773 auto read_memory
= [=] (CORE_ADDR addr
, gdb_byte
*result
, size_t len
)
2775 return target_read (current_inferior ()->top_target (),
2776 TARGET_OBJECT_MEMORY
, NULL
,
2777 result
, addr
, len
) == len
;
2780 /* Start over from the top of the target stack. */
2781 return simple_search_memory (read_memory
, start_addr
, search_space_len
,
2782 pattern
, pattern_len
, found_addrp
);
2785 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2786 sequence of bytes in PATTERN with length PATTERN_LEN.
2788 The result is 1 if found, 0 if not found, and -1 if there was an error
2789 requiring halting of the search (e.g. memory read error).
2790 If the pattern is found the address is recorded in FOUND_ADDRP. */
2793 target_search_memory (CORE_ADDR start_addr
, ULONGEST search_space_len
,
2794 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2795 CORE_ADDR
*found_addrp
)
2797 target_ops
*target
= current_inferior ()->top_target ();
2799 return target
->search_memory (start_addr
, search_space_len
, pattern
,
2800 pattern_len
, found_addrp
);
2803 /* Look through the currently pushed targets. If none of them will
2804 be able to restart the currently running process, issue an error
2808 target_require_runnable (void)
2810 for (target_ops
*t
= current_inferior ()->top_target ();
2814 /* If this target knows how to create a new program, then
2815 assume we will still be able to after killing the current
2816 one. Either killing and mourning will not pop T, or else
2817 find_default_run_target will find it again. */
2818 if (t
->can_create_inferior ())
2821 /* Do not worry about targets at certain strata that can not
2822 create inferiors. Assume they will be pushed again if
2823 necessary, and continue to the process_stratum. */
2824 if (t
->stratum () > process_stratum
)
2827 error (_("The \"%s\" target does not support \"run\". "
2828 "Try \"help target\" or \"continue\"."),
2832 /* This function is only called if the target is running. In that
2833 case there should have been a process_stratum target and it
2834 should either know how to create inferiors, or not... */
2835 internal_error (_("No targets found"));
2838 /* Whether GDB is allowed to fall back to the default run target for
2839 "run", "attach", etc. when no target is connected yet. */
2840 static bool auto_connect_native_target
= true;
2843 show_auto_connect_native_target (struct ui_file
*file
, int from_tty
,
2844 struct cmd_list_element
*c
, const char *value
)
2847 _("Whether GDB may automatically connect to the "
2848 "native target is %s.\n"),
2852 /* A pointer to the target that can respond to "run" or "attach".
2853 Native targets are always singletons and instantiated early at GDB
2855 static target_ops
*the_native_target
;
2860 set_native_target (target_ops
*target
)
2862 if (the_native_target
!= NULL
)
2863 internal_error (_("native target already set (\"%s\")."),
2864 the_native_target
->longname ());
2866 the_native_target
= target
;
2872 get_native_target ()
2874 return the_native_target
;
2877 /* Look through the list of possible targets for a target that can
2878 execute a run or attach command without any other data. This is
2879 used to locate the default process stratum.
2881 If DO_MESG is not NULL, the result is always valid (error() is
2882 called for errors); else, return NULL on error. */
2884 static struct target_ops
*
2885 find_default_run_target (const char *do_mesg
)
2887 if (auto_connect_native_target
&& the_native_target
!= NULL
)
2888 return the_native_target
;
2890 if (do_mesg
!= NULL
)
2891 error (_("Don't know how to %s. Try \"help target\"."), do_mesg
);
2898 find_attach_target (void)
2900 /* If a target on the current stack can attach, use it. */
2901 for (target_ops
*t
= current_inferior ()->top_target ();
2905 if (t
->can_attach ())
2909 /* Otherwise, use the default run target for attaching. */
2910 return find_default_run_target ("attach");
2916 find_run_target (void)
2918 /* If a target on the current stack can run, use it. */
2919 for (target_ops
*t
= current_inferior ()->top_target ();
2923 if (t
->can_create_inferior ())
2927 /* Otherwise, use the default run target. */
2928 return find_default_run_target ("run");
2932 target_ops::info_proc (const char *args
, enum info_proc_what what
)
2937 /* Implement the "info proc" command. */
2940 target_info_proc (const char *args
, enum info_proc_what what
)
2942 struct target_ops
*t
;
2944 /* If we're already connected to something that can get us OS
2945 related data, use it. Otherwise, try using the native
2947 t
= find_target_at (process_stratum
);
2949 t
= find_default_run_target (NULL
);
2951 for (; t
!= NULL
; t
= t
->beneath ())
2953 if (t
->info_proc (args
, what
))
2956 gdb_printf (gdb_stdlog
,
2957 "target_info_proc (\"%s\", %d)\n", args
, what
);
2967 find_default_supports_disable_randomization (struct target_ops
*self
)
2969 struct target_ops
*t
;
2971 t
= find_default_run_target (NULL
);
2973 return t
->supports_disable_randomization ();
2978 target_supports_disable_randomization (void)
2980 return current_inferior ()->top_target ()->supports_disable_randomization ();
2983 /* See target/target.h. */
2986 target_supports_multi_process (void)
2988 return current_inferior ()->top_target ()->supports_multi_process ();
2993 gdb::optional
<gdb::char_vector
>
2994 target_get_osdata (const char *type
)
2996 struct target_ops
*t
;
2998 /* If we're already connected to something that can get us OS
2999 related data, use it. Otherwise, try using the native
3001 t
= find_target_at (process_stratum
);
3003 t
= find_default_run_target ("get OS data");
3008 return target_read_stralloc (t
, TARGET_OBJECT_OSDATA
, type
);
3011 /* Determine the current address space of thread PTID. */
3013 struct address_space
*
3014 target_thread_address_space (ptid_t ptid
)
3016 struct address_space
*aspace
;
3018 aspace
= current_inferior ()->top_target ()->thread_address_space (ptid
);
3019 gdb_assert (aspace
!= NULL
);
3027 target_ops::beneath () const
3029 return current_inferior ()->find_target_beneath (this);
3033 target_ops::close ()
3038 target_ops::can_attach ()
3044 target_ops::attach (const char *, int)
3046 gdb_assert_not_reached ("target_ops::attach called");
3050 target_ops::can_create_inferior ()
3056 target_ops::create_inferior (const char *, const std::string
&,
3059 gdb_assert_not_reached ("target_ops::create_inferior called");
3063 target_ops::can_run ()
3071 for (target_ops
*t
= current_inferior ()->top_target ();
3082 /* Target file operations. */
3084 static struct target_ops
*
3085 default_fileio_target (void)
3087 struct target_ops
*t
;
3089 /* If we're already connected to something that can perform
3090 file I/O, use it. Otherwise, try using the native target. */
3091 t
= find_target_at (process_stratum
);
3094 return find_default_run_target ("file I/O");
3097 /* File handle for target file operations. */
3101 /* The target on which this file is open. NULL if the target is
3102 meanwhile closed while the handle is open. */
3105 /* The file descriptor on the target. */
3108 /* Check whether this fileio_fh_t represents a closed file. */
3111 return target_fd
< 0;
3115 /* Vector of currently open file handles. The value returned by
3116 target_fileio_open and passed as the FD argument to other
3117 target_fileio_* functions is an index into this vector. This
3118 vector's entries are never freed; instead, files are marked as
3119 closed, and the handle becomes available for reuse. */
3120 static std::vector
<fileio_fh_t
> fileio_fhandles
;
3122 /* Index into fileio_fhandles of the lowest handle that might be
3123 closed. This permits handle reuse without searching the whole
3124 list each time a new file is opened. */
3125 static int lowest_closed_fd
;
3130 fileio_handles_invalidate_target (target_ops
*targ
)
3132 for (fileio_fh_t
&fh
: fileio_fhandles
)
3133 if (fh
.target
== targ
)
3137 /* Acquire a target fileio file descriptor. */
3140 acquire_fileio_fd (target_ops
*target
, int target_fd
)
3142 /* Search for closed handles to reuse. */
3143 for (; lowest_closed_fd
< fileio_fhandles
.size (); lowest_closed_fd
++)
3145 fileio_fh_t
&fh
= fileio_fhandles
[lowest_closed_fd
];
3147 if (fh
.is_closed ())
3151 /* Push a new handle if no closed handles were found. */
3152 if (lowest_closed_fd
== fileio_fhandles
.size ())
3153 fileio_fhandles
.push_back (fileio_fh_t
{target
, target_fd
});
3155 fileio_fhandles
[lowest_closed_fd
] = {target
, target_fd
};
3157 /* Should no longer be marked closed. */
3158 gdb_assert (!fileio_fhandles
[lowest_closed_fd
].is_closed ());
3160 /* Return its index, and start the next lookup at
3162 return lowest_closed_fd
++;
3165 /* Release a target fileio file descriptor. */
3168 release_fileio_fd (int fd
, fileio_fh_t
*fh
)
3171 lowest_closed_fd
= std::min (lowest_closed_fd
, fd
);
3174 /* Return a pointer to the fileio_fhandle_t corresponding to FD. */
3176 static fileio_fh_t
*
3177 fileio_fd_to_fh (int fd
)
3179 return &fileio_fhandles
[fd
];
3183 /* Default implementations of file i/o methods. We don't want these
3184 to delegate automatically, because we need to know which target
3185 supported the method, in order to call it directly from within
3186 pread/pwrite, etc. */
3189 target_ops::fileio_open (struct inferior
*inf
, const char *filename
,
3190 int flags
, int mode
, int warn_if_slow
,
3191 fileio_error
*target_errno
)
3193 *target_errno
= FILEIO_ENOSYS
;
3198 target_ops::fileio_pwrite (int fd
, const gdb_byte
*write_buf
, int len
,
3199 ULONGEST offset
, fileio_error
*target_errno
)
3201 *target_errno
= FILEIO_ENOSYS
;
3206 target_ops::fileio_pread (int fd
, gdb_byte
*read_buf
, int len
,
3207 ULONGEST offset
, fileio_error
*target_errno
)
3209 *target_errno
= FILEIO_ENOSYS
;
3214 target_ops::fileio_fstat (int fd
, struct stat
*sb
, fileio_error
*target_errno
)
3216 *target_errno
= FILEIO_ENOSYS
;
3221 target_ops::fileio_close (int fd
, fileio_error
*target_errno
)
3223 *target_errno
= FILEIO_ENOSYS
;
3228 target_ops::fileio_unlink (struct inferior
*inf
, const char *filename
,
3229 fileio_error
*target_errno
)
3231 *target_errno
= FILEIO_ENOSYS
;
3235 gdb::optional
<std::string
>
3236 target_ops::fileio_readlink (struct inferior
*inf
, const char *filename
,
3237 fileio_error
*target_errno
)
3239 *target_errno
= FILEIO_ENOSYS
;
3246 target_fileio_open (struct inferior
*inf
, const char *filename
,
3247 int flags
, int mode
, bool warn_if_slow
, fileio_error
*target_errno
)
3249 for (target_ops
*t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath ())
3251 int fd
= t
->fileio_open (inf
, filename
, flags
, mode
,
3252 warn_if_slow
, target_errno
);
3254 if (fd
== -1 && *target_errno
== FILEIO_ENOSYS
)
3260 fd
= acquire_fileio_fd (t
, fd
);
3263 gdb_printf (gdb_stdlog
,
3264 "target_fileio_open (%d,%s,0x%x,0%o,%d)"
3266 inf
== NULL
? 0 : inf
->num
,
3267 filename
, flags
, mode
,
3269 fd
!= -1 ? 0 : *target_errno
);
3273 *target_errno
= FILEIO_ENOSYS
;
3280 target_fileio_pwrite (int fd
, const gdb_byte
*write_buf
, int len
,
3281 ULONGEST offset
, fileio_error
*target_errno
)
3283 fileio_fh_t
*fh
= fileio_fd_to_fh (fd
);
3286 if (fh
->is_closed ())
3287 *target_errno
= FILEIO_EBADF
;
3288 else if (fh
->target
== NULL
)
3289 *target_errno
= FILEIO_EIO
;
3291 ret
= fh
->target
->fileio_pwrite (fh
->target_fd
, write_buf
,
3292 len
, offset
, target_errno
);
3295 gdb_printf (gdb_stdlog
,
3296 "target_fileio_pwrite (%d,...,%d,%s) "
3298 fd
, len
, pulongest (offset
),
3299 ret
, ret
!= -1 ? 0 : *target_errno
);
3306 target_fileio_pread (int fd
, gdb_byte
*read_buf
, int len
,
3307 ULONGEST offset
, fileio_error
*target_errno
)
3309 fileio_fh_t
*fh
= fileio_fd_to_fh (fd
);
3312 if (fh
->is_closed ())
3313 *target_errno
= FILEIO_EBADF
;
3314 else if (fh
->target
== NULL
)
3315 *target_errno
= FILEIO_EIO
;
3317 ret
= fh
->target
->fileio_pread (fh
->target_fd
, read_buf
,
3318 len
, offset
, target_errno
);
3321 gdb_printf (gdb_stdlog
,
3322 "target_fileio_pread (%d,...,%d,%s) "
3324 fd
, len
, pulongest (offset
),
3325 ret
, ret
!= -1 ? 0 : *target_errno
);
3332 target_fileio_fstat (int fd
, struct stat
*sb
, fileio_error
*target_errno
)
3334 fileio_fh_t
*fh
= fileio_fd_to_fh (fd
);
3337 if (fh
->is_closed ())
3338 *target_errno
= FILEIO_EBADF
;
3339 else if (fh
->target
== NULL
)
3340 *target_errno
= FILEIO_EIO
;
3342 ret
= fh
->target
->fileio_fstat (fh
->target_fd
, sb
, target_errno
);
3345 gdb_printf (gdb_stdlog
,
3346 "target_fileio_fstat (%d) = %d (%d)\n",
3347 fd
, ret
, ret
!= -1 ? 0 : *target_errno
);
3354 target_fileio_close (int fd
, fileio_error
*target_errno
)
3356 fileio_fh_t
*fh
= fileio_fd_to_fh (fd
);
3359 if (fh
->is_closed ())
3360 *target_errno
= FILEIO_EBADF
;
3363 if (fh
->target
!= NULL
)
3364 ret
= fh
->target
->fileio_close (fh
->target_fd
,
3368 release_fileio_fd (fd
, fh
);
3372 gdb_printf (gdb_stdlog
,
3373 "target_fileio_close (%d) = %d (%d)\n",
3374 fd
, ret
, ret
!= -1 ? 0 : *target_errno
);
3381 target_fileio_unlink (struct inferior
*inf
, const char *filename
,
3382 fileio_error
*target_errno
)
3384 for (target_ops
*t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath ())
3386 int ret
= t
->fileio_unlink (inf
, filename
, target_errno
);
3388 if (ret
== -1 && *target_errno
== FILEIO_ENOSYS
)
3392 gdb_printf (gdb_stdlog
,
3393 "target_fileio_unlink (%d,%s)"
3395 inf
== NULL
? 0 : inf
->num
, filename
,
3396 ret
, ret
!= -1 ? 0 : *target_errno
);
3400 *target_errno
= FILEIO_ENOSYS
;
3406 gdb::optional
<std::string
>
3407 target_fileio_readlink (struct inferior
*inf
, const char *filename
,
3408 fileio_error
*target_errno
)
3410 for (target_ops
*t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath ())
3412 gdb::optional
<std::string
> ret
3413 = t
->fileio_readlink (inf
, filename
, target_errno
);
3415 if (!ret
.has_value () && *target_errno
== FILEIO_ENOSYS
)
3419 gdb_printf (gdb_stdlog
,
3420 "target_fileio_readlink (%d,%s)"
3422 inf
== NULL
? 0 : inf
->num
,
3423 filename
, ret
? ret
->c_str () : "(nil)",
3424 ret
? 0 : *target_errno
);
3428 *target_errno
= FILEIO_ENOSYS
;
3432 /* Like scoped_fd, but specific to target fileio. */
3434 class scoped_target_fd
3437 explicit scoped_target_fd (int fd
) noexcept
3442 ~scoped_target_fd ()
3446 fileio_error target_errno
;
3448 target_fileio_close (m_fd
, &target_errno
);
3452 DISABLE_COPY_AND_ASSIGN (scoped_target_fd
);
3454 int get () const noexcept
3463 /* Read target file FILENAME, in the filesystem as seen by INF. If
3464 INF is NULL, use the filesystem seen by the debugger (GDB or, for
3465 remote targets, the remote stub). Store the result in *BUF_P and
3466 return the size of the transferred data. PADDING additional bytes
3467 are available in *BUF_P. This is a helper function for
3468 target_fileio_read_alloc; see the declaration of that function for
3469 more information. */
3472 target_fileio_read_alloc_1 (struct inferior
*inf
, const char *filename
,
3473 gdb_byte
**buf_p
, int padding
)
3475 size_t buf_alloc
, buf_pos
;
3478 fileio_error target_errno
;
3480 scoped_target_fd
fd (target_fileio_open (inf
, filename
, FILEIO_O_RDONLY
,
3481 0700, false, &target_errno
));
3482 if (fd
.get () == -1)
3485 /* Start by reading up to 4K at a time. The target will throttle
3486 this number down if necessary. */
3488 buf
= (gdb_byte
*) xmalloc (buf_alloc
);
3492 n
= target_fileio_pread (fd
.get (), &buf
[buf_pos
],
3493 buf_alloc
- buf_pos
- padding
, buf_pos
,
3497 /* An error occurred. */
3503 /* Read all there was. */
3513 /* If the buffer is filling up, expand it. */
3514 if (buf_alloc
< buf_pos
* 2)
3517 buf
= (gdb_byte
*) xrealloc (buf
, buf_alloc
);
3527 target_fileio_read_alloc (struct inferior
*inf
, const char *filename
,
3530 return target_fileio_read_alloc_1 (inf
, filename
, buf_p
, 0);
3535 gdb::unique_xmalloc_ptr
<char>
3536 target_fileio_read_stralloc (struct inferior
*inf
, const char *filename
)
3540 LONGEST i
, transferred
;
3542 transferred
= target_fileio_read_alloc_1 (inf
, filename
, &buffer
, 1);
3543 bufstr
= (char *) buffer
;
3545 if (transferred
< 0)
3546 return gdb::unique_xmalloc_ptr
<char> (nullptr);
3548 if (transferred
== 0)
3549 return make_unique_xstrdup ("");
3551 bufstr
[transferred
] = 0;
3553 /* Check for embedded NUL bytes; but allow trailing NULs. */
3554 for (i
= strlen (bufstr
); i
< transferred
; i
++)
3557 warning (_("target file %s "
3558 "contained unexpected null characters"),
3563 return gdb::unique_xmalloc_ptr
<char> (bufstr
);
3568 default_region_ok_for_hw_watchpoint (struct target_ops
*self
,
3569 CORE_ADDR addr
, int len
)
3571 return (len
<= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT
);
3575 default_watchpoint_addr_within_range (struct target_ops
*target
,
3577 CORE_ADDR start
, int length
)
3579 return addr
>= start
&& addr
< start
+ length
;
3585 target_stack::find_beneath (const target_ops
*t
) const
3587 /* Look for a non-empty slot at stratum levels beneath T's. */
3588 for (int stratum
= t
->stratum () - 1; stratum
>= 0; --stratum
)
3589 if (m_stack
[stratum
].get () != NULL
)
3590 return m_stack
[stratum
].get ();
3598 find_target_at (enum strata stratum
)
3600 return current_inferior ()->target_at (stratum
);
3608 target_announce_detach (int from_tty
)
3611 const char *exec_file
;
3616 pid
= inferior_ptid
.pid ();
3617 exec_file
= get_exec_file (0);
3618 if (exec_file
== nullptr)
3619 gdb_printf ("Detaching from pid %s\n",
3620 target_pid_to_str (ptid_t (pid
)).c_str ());
3622 gdb_printf (_("Detaching from program: %s, %s\n"), exec_file
,
3623 target_pid_to_str (ptid_t (pid
)).c_str ());
3629 target_announce_attach (int from_tty
, int pid
)
3634 const char *exec_file
= get_exec_file (0);
3636 if (exec_file
!= nullptr)
3637 gdb_printf ("Attaching to program: %s, %s\n", exec_file
,
3638 target_pid_to_str (ptid_t (pid
)).c_str ());
3640 gdb_printf ("Attaching to %s\n",
3641 target_pid_to_str (ptid_t (pid
)).c_str ());
3644 /* The inferior process has died. Long live the inferior! */
3647 generic_mourn_inferior (void)
3649 inferior
*inf
= current_inferior ();
3651 switch_to_no_thread ();
3653 /* Mark breakpoints uninserted in case something tries to delete a
3654 breakpoint while we delete the inferior's threads (which would
3655 fail, since the inferior is long gone). */
3656 mark_breakpoints_out ();
3659 exit_inferior (inf
);
3661 /* Note this wipes step-resume breakpoints, so needs to be done
3662 after exit_inferior, which ends up referencing the step-resume
3663 breakpoints through clear_thread_inferior_resources. */
3664 breakpoint_init_inferior (inf_exited
);
3666 registers_changed ();
3668 reopen_exec_file ();
3669 reinit_frame_cache ();
3671 if (deprecated_detach_hook
)
3672 deprecated_detach_hook ();
3675 /* Convert a normal process ID to a string. Returns the string in a
3679 normal_pid_to_str (ptid_t ptid
)
3681 return string_printf ("process %d", ptid
.pid ());
3685 default_pid_to_str (struct target_ops
*ops
, ptid_t ptid
)
3687 return normal_pid_to_str (ptid
);
3690 /* Error-catcher for target_find_memory_regions. */
3692 dummy_find_memory_regions (struct target_ops
*self
,
3693 find_memory_region_ftype ignore1
, void *ignore2
)
3695 error (_("Command not implemented for this target."));
3699 /* Error-catcher for target_make_corefile_notes. */
3700 static gdb::unique_xmalloc_ptr
<char>
3701 dummy_make_corefile_notes (struct target_ops
*self
,
3702 bfd
*ignore1
, int *ignore2
)
3704 error (_("Command not implemented for this target."));
3708 #include "target-delegates.c"
3710 /* The initial current target, so that there is always a semi-valid
3713 static dummy_target the_dummy_target
;
3720 return &the_dummy_target
;
3723 static const target_info dummy_target_info
= {
3730 dummy_target::stratum () const
3732 return dummy_stratum
;
3736 debug_target::stratum () const
3738 return debug_stratum
;
3742 dummy_target::info () const
3744 return dummy_target_info
;
3748 debug_target::info () const
3750 return beneath ()->info ();
3756 target_close (struct target_ops
*targ
)
3758 for (inferior
*inf
: all_inferiors ())
3759 gdb_assert (!inf
->target_is_pushed (targ
));
3761 fileio_handles_invalidate_target (targ
);
3766 gdb_printf (gdb_stdlog
, "target_close ()\n");
3770 target_thread_alive (ptid_t ptid
)
3772 return current_inferior ()->top_target ()->thread_alive (ptid
);
3776 target_update_thread_list (void)
3778 current_inferior ()->top_target ()->update_thread_list ();
3782 target_stop (ptid_t ptid
)
3784 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
3786 gdb_assert (!proc_target
->commit_resumed_state
);
3790 warning (_("May not interrupt or stop the target, ignoring attempt"));
3794 current_inferior ()->top_target ()->stop (ptid
);
3802 warning (_("May not interrupt or stop the target, ignoring attempt"));
3806 current_inferior ()->top_target ()->interrupt ();
3812 target_pass_ctrlc (void)
3814 /* Pass the Ctrl-C to the first target that has a thread
3816 for (inferior
*inf
: all_inferiors ())
3818 target_ops
*proc_target
= inf
->process_target ();
3819 if (proc_target
== NULL
)
3822 for (thread_info
*thr
: inf
->non_exited_threads ())
3824 /* A thread can be THREAD_STOPPED and executing, while
3825 running an infcall. */
3826 if (thr
->state
== THREAD_RUNNING
|| thr
->executing ())
3828 /* We can get here quite deep in target layers. Avoid
3829 switching thread context or anything that would
3830 communicate with the target (e.g., to fetch
3831 registers), or flushing e.g., the frame cache. We
3832 just switch inferior in order to be able to call
3833 through the target_stack. */
3834 scoped_restore_current_inferior restore_inferior
;
3835 set_current_inferior (inf
);
3836 current_inferior ()->top_target ()->pass_ctrlc ();
3846 default_target_pass_ctrlc (struct target_ops
*ops
)
3848 target_interrupt ();
3851 /* See target/target.h. */
3854 target_stop_and_wait (ptid_t ptid
)
3856 struct target_waitstatus status
;
3857 bool was_non_stop
= non_stop
;
3862 target_wait (ptid
, &status
, 0);
3864 non_stop
= was_non_stop
;
3867 /* See target/target.h. */
3870 target_continue_no_signal (ptid_t ptid
)
3872 target_resume (ptid
, 0, GDB_SIGNAL_0
);
3875 /* See target/target.h. */
3878 target_continue (ptid_t ptid
, enum gdb_signal signal
)
3880 target_resume (ptid
, 0, signal
);
3883 /* Concatenate ELEM to LIST, a comma-separated list. */
3886 str_comma_list_concat_elem (std::string
*list
, const char *elem
)
3888 if (!list
->empty ())
3889 list
->append (", ");
3891 list
->append (elem
);
3894 /* Helper for target_options_to_string. If OPT is present in
3895 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3896 OPT is removed from TARGET_OPTIONS. */
3899 do_option (target_wait_flags
*target_options
, std::string
*ret
,
3900 target_wait_flag opt
, const char *opt_str
)
3902 if ((*target_options
& opt
) != 0)
3904 str_comma_list_concat_elem (ret
, opt_str
);
3905 *target_options
&= ~opt
;
3912 target_options_to_string (target_wait_flags target_options
)
3916 #define DO_TARG_OPTION(OPT) \
3917 do_option (&target_options, &ret, OPT, #OPT)
3919 DO_TARG_OPTION (TARGET_WNOHANG
);
3921 if (target_options
!= 0)
3922 str_comma_list_concat_elem (&ret
, "unknown???");
3928 target_fetch_registers (struct regcache
*regcache
, int regno
)
3930 current_inferior ()->top_target ()->fetch_registers (regcache
, regno
);
3932 regcache
->debug_print_register ("target_fetch_registers", regno
);
3936 target_store_registers (struct regcache
*regcache
, int regno
)
3938 if (!may_write_registers
)
3939 error (_("Writing to registers is not allowed (regno %d)"), regno
);
3941 current_inferior ()->top_target ()->store_registers (regcache
, regno
);
3944 regcache
->debug_print_register ("target_store_registers", regno
);
3949 target_core_of_thread (ptid_t ptid
)
3951 return current_inferior ()->top_target ()->core_of_thread (ptid
);
3955 simple_verify_memory (struct target_ops
*ops
,
3956 const gdb_byte
*data
, CORE_ADDR lma
, ULONGEST size
)
3958 LONGEST total_xfered
= 0;
3960 while (total_xfered
< size
)
3962 ULONGEST xfered_len
;
3963 enum target_xfer_status status
;
3965 ULONGEST howmuch
= std::min
<ULONGEST
> (sizeof (buf
), size
- total_xfered
);
3967 status
= target_xfer_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
3968 buf
, NULL
, lma
+ total_xfered
, howmuch
,
3970 if (status
== TARGET_XFER_OK
3971 && memcmp (data
+ total_xfered
, buf
, xfered_len
) == 0)
3973 total_xfered
+= xfered_len
;
3982 /* Default implementation of memory verification. */
3985 default_verify_memory (struct target_ops
*self
,
3986 const gdb_byte
*data
, CORE_ADDR memaddr
, ULONGEST size
)
3988 /* Start over from the top of the target stack. */
3989 return simple_verify_memory (current_inferior ()->top_target (),
3990 data
, memaddr
, size
);
3994 target_verify_memory (const gdb_byte
*data
, CORE_ADDR memaddr
, ULONGEST size
)
3996 target_ops
*target
= current_inferior ()->top_target ();
3998 return target
->verify_memory (data
, memaddr
, size
);
4001 /* The documentation for this function is in its prototype declaration in
4005 target_insert_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
,
4006 enum target_hw_bp_type rw
)
4008 target_ops
*target
= current_inferior ()->top_target ();
4010 return target
->insert_mask_watchpoint (addr
, mask
, rw
);
4013 /* The documentation for this function is in its prototype declaration in
4017 target_remove_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
,
4018 enum target_hw_bp_type rw
)
4020 target_ops
*target
= current_inferior ()->top_target ();
4022 return target
->remove_mask_watchpoint (addr
, mask
, rw
);
4025 /* The documentation for this function is in its prototype declaration
4029 target_masked_watch_num_registers (CORE_ADDR addr
, CORE_ADDR mask
)
4031 target_ops
*target
= current_inferior ()->top_target ();
4033 return target
->masked_watch_num_registers (addr
, mask
);
4036 /* The documentation for this function is in its prototype declaration
4040 target_ranged_break_num_registers (void)
4042 return current_inferior ()->top_target ()->ranged_break_num_registers ();
4047 struct btrace_target_info
*
4048 target_enable_btrace (thread_info
*tp
, const struct btrace_config
*conf
)
4050 return current_inferior ()->top_target ()->enable_btrace (tp
, conf
);
4056 target_disable_btrace (struct btrace_target_info
*btinfo
)
4058 current_inferior ()->top_target ()->disable_btrace (btinfo
);
4064 target_teardown_btrace (struct btrace_target_info
*btinfo
)
4066 current_inferior ()->top_target ()->teardown_btrace (btinfo
);
4072 target_read_btrace (struct btrace_data
*btrace
,
4073 struct btrace_target_info
*btinfo
,
4074 enum btrace_read_type type
)
4076 target_ops
*target
= current_inferior ()->top_target ();
4078 return target
->read_btrace (btrace
, btinfo
, type
);
4083 const struct btrace_config
*
4084 target_btrace_conf (const struct btrace_target_info
*btinfo
)
4086 return current_inferior ()->top_target ()->btrace_conf (btinfo
);
4092 target_stop_recording (void)
4094 current_inferior ()->top_target ()->stop_recording ();
4100 target_save_record (const char *filename
)
4102 current_inferior ()->top_target ()->save_record (filename
);
4108 target_supports_delete_record ()
4110 return current_inferior ()->top_target ()->supports_delete_record ();
4116 target_delete_record (void)
4118 current_inferior ()->top_target ()->delete_record ();
4124 target_record_method (ptid_t ptid
)
4126 return current_inferior ()->top_target ()->record_method (ptid
);
4132 target_record_is_replaying (ptid_t ptid
)
4134 return current_inferior ()->top_target ()->record_is_replaying (ptid
);
4140 target_record_will_replay (ptid_t ptid
, int dir
)
4142 return current_inferior ()->top_target ()->record_will_replay (ptid
, dir
);
4148 target_record_stop_replaying (void)
4150 current_inferior ()->top_target ()->record_stop_replaying ();
4156 target_goto_record_begin (void)
4158 current_inferior ()->top_target ()->goto_record_begin ();
4164 target_goto_record_end (void)
4166 current_inferior ()->top_target ()->goto_record_end ();
4172 target_goto_record (ULONGEST insn
)
4174 current_inferior ()->top_target ()->goto_record (insn
);
4180 target_insn_history (int size
, gdb_disassembly_flags flags
)
4182 current_inferior ()->top_target ()->insn_history (size
, flags
);
4188 target_insn_history_from (ULONGEST from
, int size
,
4189 gdb_disassembly_flags flags
)
4191 current_inferior ()->top_target ()->insn_history_from (from
, size
, flags
);
4197 target_insn_history_range (ULONGEST begin
, ULONGEST end
,
4198 gdb_disassembly_flags flags
)
4200 current_inferior ()->top_target ()->insn_history_range (begin
, end
, flags
);
4206 target_call_history (int size
, record_print_flags flags
)
4208 current_inferior ()->top_target ()->call_history (size
, flags
);
4214 target_call_history_from (ULONGEST begin
, int size
, record_print_flags flags
)
4216 current_inferior ()->top_target ()->call_history_from (begin
, size
, flags
);
4222 target_call_history_range (ULONGEST begin
, ULONGEST end
, record_print_flags flags
)
4224 current_inferior ()->top_target ()->call_history_range (begin
, end
, flags
);
4229 const struct frame_unwind
*
4230 target_get_unwinder (void)
4232 return current_inferior ()->top_target ()->get_unwinder ();
4237 const struct frame_unwind
*
4238 target_get_tailcall_unwinder (void)
4240 return current_inferior ()->top_target ()->get_tailcall_unwinder ();
4246 target_prepare_to_generate_core (void)
4248 current_inferior ()->top_target ()->prepare_to_generate_core ();
4254 target_done_generating_core (void)
4256 current_inferior ()->top_target ()->done_generating_core ();
4261 static char targ_desc
[] =
4262 "Names of targets and files being debugged.\nShows the entire \
4263 stack of targets currently in use (including the exec-file,\n\
4264 core-file, and process, if any), as well as the symbol file name.";
4267 default_rcmd (struct target_ops
*self
, const char *command
,
4268 struct ui_file
*output
)
4270 error (_("\"monitor\" command not supported by this target."));
4274 do_monitor_command (const char *cmd
, int from_tty
)
4276 target_rcmd (cmd
, gdb_stdtarg
);
4279 /* Erases all the memory regions marked as flash. CMD and FROM_TTY are
4283 flash_erase_command (const char *cmd
, int from_tty
)
4285 /* Used to communicate termination of flash operations to the target. */
4286 bool found_flash_region
= false;
4287 struct gdbarch
*gdbarch
= target_gdbarch ();
4289 std::vector
<mem_region
> mem_regions
= target_memory_map ();
4291 /* Iterate over all memory regions. */
4292 for (const mem_region
&m
: mem_regions
)
4294 /* Is this a flash memory region? */
4295 if (m
.attrib
.mode
== MEM_FLASH
)
4297 found_flash_region
= true;
4298 target_flash_erase (m
.lo
, m
.hi
- m
.lo
);
4300 ui_out_emit_tuple
tuple_emitter (current_uiout
, "erased-regions");
4302 current_uiout
->message (_("Erasing flash memory region at address "));
4303 current_uiout
->field_core_addr ("address", gdbarch
, m
.lo
);
4304 current_uiout
->message (", size = ");
4305 current_uiout
->field_string ("size", hex_string (m
.hi
- m
.lo
));
4306 current_uiout
->message ("\n");
4310 /* Did we do any flash operations? If so, we need to finalize them. */
4311 if (found_flash_region
)
4312 target_flash_done ();
4314 current_uiout
->message (_("No flash memory regions found.\n"));
4317 /* Print the name of each layers of our target stack. */
4320 maintenance_print_target_stack (const char *cmd
, int from_tty
)
4322 gdb_printf (_("The current target stack is:\n"));
4324 for (target_ops
*t
= current_inferior ()->top_target ();
4328 if (t
->stratum () == debug_stratum
)
4330 gdb_printf (" - %s (%s)\n", t
->shortname (), t
->longname ());
4337 target_async (bool enable
)
4339 /* If we are trying to enable async mode then it must be the case that
4340 async mode is possible for this target. */
4341 gdb_assert (!enable
|| target_can_async_p ());
4342 infrun_async (enable
);
4343 current_inferior ()->top_target ()->async (enable
);
4349 target_thread_events (int enable
)
4351 current_inferior ()->top_target ()->thread_events (enable
);
4354 /* Controls if targets can report that they can/are async. This is
4355 just for maintainers to use when debugging gdb. */
4356 bool target_async_permitted
= true;
4359 set_maint_target_async (bool permitted
)
4361 if (have_live_inferiors ())
4362 error (_("Cannot change this setting while the inferior is running."));
4364 target_async_permitted
= permitted
;
4368 get_maint_target_async ()
4370 return target_async_permitted
;
4374 show_maint_target_async (ui_file
*file
, int from_tty
,
4375 cmd_list_element
*c
, const char *value
)
4378 _("Controlling the inferior in "
4379 "asynchronous mode is %s.\n"), value
);
4382 /* Return true if the target operates in non-stop mode even with "set
4386 target_always_non_stop_p (void)
4388 return current_inferior ()->top_target ()->always_non_stop_p ();
4394 target_is_non_stop_p ()
4397 || target_non_stop_enabled
== AUTO_BOOLEAN_TRUE
4398 || (target_non_stop_enabled
== AUTO_BOOLEAN_AUTO
4399 && target_always_non_stop_p ()))
4400 && target_can_async_p ());
4406 exists_non_stop_target ()
4408 if (target_is_non_stop_p ())
4411 scoped_restore_current_thread restore_thread
;
4413 for (inferior
*inf
: all_inferiors ())
4415 switch_to_inferior_no_thread (inf
);
4416 if (target_is_non_stop_p ())
4423 /* Controls if targets can report that they always run in non-stop
4424 mode. This is just for maintainers to use when debugging gdb. */
4425 enum auto_boolean target_non_stop_enabled
= AUTO_BOOLEAN_AUTO
;
4427 /* Set callback for maint target-non-stop setting. */
4430 set_maint_target_non_stop (auto_boolean enabled
)
4432 if (have_live_inferiors ())
4433 error (_("Cannot change this setting while the inferior is running."));
4435 target_non_stop_enabled
= enabled
;
4438 /* Get callback for maint target-non-stop setting. */
4441 get_maint_target_non_stop ()
4443 return target_non_stop_enabled
;
4447 show_maint_target_non_stop (ui_file
*file
, int from_tty
,
4448 cmd_list_element
*c
, const char *value
)
4450 if (target_non_stop_enabled
== AUTO_BOOLEAN_AUTO
)
4452 _("Whether the target is always in non-stop mode "
4453 "is %s (currently %s).\n"), value
,
4454 target_always_non_stop_p () ? "on" : "off");
4457 _("Whether the target is always in non-stop mode "
4458 "is %s.\n"), value
);
4461 /* Temporary copies of permission settings. */
4463 static bool may_write_registers_1
= true;
4464 static bool may_write_memory_1
= true;
4465 static bool may_insert_breakpoints_1
= true;
4466 static bool may_insert_tracepoints_1
= true;
4467 static bool may_insert_fast_tracepoints_1
= true;
4468 static bool may_stop_1
= true;
4470 /* Make the user-set values match the real values again. */
4473 update_target_permissions (void)
4475 may_write_registers_1
= may_write_registers
;
4476 may_write_memory_1
= may_write_memory
;
4477 may_insert_breakpoints_1
= may_insert_breakpoints
;
4478 may_insert_tracepoints_1
= may_insert_tracepoints
;
4479 may_insert_fast_tracepoints_1
= may_insert_fast_tracepoints
;
4480 may_stop_1
= may_stop
;
4483 /* The one function handles (most of) the permission flags in the same
4487 set_target_permissions (const char *args
, int from_tty
,
4488 struct cmd_list_element
*c
)
4490 if (target_has_execution ())
4492 update_target_permissions ();
4493 error (_("Cannot change this setting while the inferior is running."));
4496 /* Make the real values match the user-changed values. */
4497 may_write_registers
= may_write_registers_1
;
4498 may_insert_breakpoints
= may_insert_breakpoints_1
;
4499 may_insert_tracepoints
= may_insert_tracepoints_1
;
4500 may_insert_fast_tracepoints
= may_insert_fast_tracepoints_1
;
4501 may_stop
= may_stop_1
;
4502 update_observer_mode ();
4505 /* Set memory write permission independently of observer mode. */
4508 set_write_memory_permission (const char *args
, int from_tty
,
4509 struct cmd_list_element
*c
)
4511 /* Make the real values match the user-changed values. */
4512 may_write_memory
= may_write_memory_1
;
4513 update_observer_mode ();
4516 void _initialize_target ();
4519 _initialize_target ()
4521 the_debug_target
= new debug_target ();
4523 add_info ("target", info_target_command
, targ_desc
);
4524 add_info ("files", info_target_command
, targ_desc
);
4526 add_setshow_zuinteger_cmd ("target", class_maintenance
, &targetdebug
, _("\
4527 Set target debugging."), _("\
4528 Show target debugging."), _("\
4529 When non-zero, target debugging is enabled. Higher numbers are more\n\
4533 &setdebuglist
, &showdebuglist
);
4535 add_setshow_boolean_cmd ("trust-readonly-sections", class_support
,
4536 &trust_readonly
, _("\
4537 Set mode for reading from readonly sections."), _("\
4538 Show mode for reading from readonly sections."), _("\
4539 When this mode is on, memory reads from readonly sections (such as .text)\n\
4540 will be read from the object file instead of from the target. This will\n\
4541 result in significant performance improvement for remote targets."),
4543 show_trust_readonly
,
4544 &setlist
, &showlist
);
4546 add_com ("monitor", class_obscure
, do_monitor_command
,
4547 _("Send a command to the remote monitor (remote targets only)."));
4549 add_cmd ("target-stack", class_maintenance
, maintenance_print_target_stack
,
4550 _("Print the name of each layer of the internal target stack."),
4551 &maintenanceprintlist
);
4553 add_setshow_boolean_cmd ("target-async", no_class
,
4555 Set whether gdb controls the inferior in asynchronous mode."), _("\
4556 Show whether gdb controls the inferior in asynchronous mode."), _("\
4557 Tells gdb whether to control the inferior in asynchronous mode."),
4558 set_maint_target_async
,
4559 get_maint_target_async
,
4560 show_maint_target_async
,
4561 &maintenance_set_cmdlist
,
4562 &maintenance_show_cmdlist
);
4564 add_setshow_auto_boolean_cmd ("target-non-stop", no_class
,
4566 Set whether gdb always controls the inferior in non-stop mode."), _("\
4567 Show whether gdb always controls the inferior in non-stop mode."), _("\
4568 Tells gdb whether to control the inferior in non-stop mode."),
4569 set_maint_target_non_stop
,
4570 get_maint_target_non_stop
,
4571 show_maint_target_non_stop
,
4572 &maintenance_set_cmdlist
,
4573 &maintenance_show_cmdlist
);
4575 add_setshow_boolean_cmd ("may-write-registers", class_support
,
4576 &may_write_registers_1
, _("\
4577 Set permission to write into registers."), _("\
4578 Show permission to write into registers."), _("\
4579 When this permission is on, GDB may write into the target's registers.\n\
4580 Otherwise, any sort of write attempt will result in an error."),
4581 set_target_permissions
, NULL
,
4582 &setlist
, &showlist
);
4584 add_setshow_boolean_cmd ("may-write-memory", class_support
,
4585 &may_write_memory_1
, _("\
4586 Set permission to write into target memory."), _("\
4587 Show permission to write into target memory."), _("\
4588 When this permission is on, GDB may write into the target's memory.\n\
4589 Otherwise, any sort of write attempt will result in an error."),
4590 set_write_memory_permission
, NULL
,
4591 &setlist
, &showlist
);
4593 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support
,
4594 &may_insert_breakpoints_1
, _("\
4595 Set permission to insert breakpoints in the target."), _("\
4596 Show permission to insert breakpoints in the target."), _("\
4597 When this permission is on, GDB may insert breakpoints in the program.\n\
4598 Otherwise, any sort of insertion attempt will result in an error."),
4599 set_target_permissions
, NULL
,
4600 &setlist
, &showlist
);
4602 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support
,
4603 &may_insert_tracepoints_1
, _("\
4604 Set permission to insert tracepoints in the target."), _("\
4605 Show permission to insert tracepoints in the target."), _("\
4606 When this permission is on, GDB may insert tracepoints in the program.\n\
4607 Otherwise, any sort of insertion attempt will result in an error."),
4608 set_target_permissions
, NULL
,
4609 &setlist
, &showlist
);
4611 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support
,
4612 &may_insert_fast_tracepoints_1
, _("\
4613 Set permission to insert fast tracepoints in the target."), _("\
4614 Show permission to insert fast tracepoints in the target."), _("\
4615 When this permission is on, GDB may insert fast tracepoints.\n\
4616 Otherwise, any sort of insertion attempt will result in an error."),
4617 set_target_permissions
, NULL
,
4618 &setlist
, &showlist
);
4620 add_setshow_boolean_cmd ("may-interrupt", class_support
,
4622 Set permission to interrupt or signal the target."), _("\
4623 Show permission to interrupt or signal the target."), _("\
4624 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4625 Otherwise, any attempt to interrupt or stop will be ignored."),
4626 set_target_permissions
, NULL
,
4627 &setlist
, &showlist
);
4629 add_com ("flash-erase", no_class
, flash_erase_command
,
4630 _("Erase all flash memory regions."));
4632 add_setshow_boolean_cmd ("auto-connect-native-target", class_support
,
4633 &auto_connect_native_target
, _("\
4634 Set whether GDB may automatically connect to the native target."), _("\
4635 Show whether GDB may automatically connect to the native target."), _("\
4636 When on, and GDB is not connected to a target yet, GDB\n\
4637 attempts \"run\" and other commands with the native target."),
4638 NULL
, show_auto_connect_native_target
,
4639 &setlist
, &showlist
);