1 /* Select target systems and architectures at runtime for GDB.
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
5 Contributed by Cygnus Support.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
26 #include "target-dcache.h"
36 #include "gdb_assert.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
48 static void target_info (char *, int);
50 static void default_terminal_info (struct target_ops
*, const char *, int);
52 static int default_watchpoint_addr_within_range (struct target_ops
*,
53 CORE_ADDR
, CORE_ADDR
, int);
55 static int default_region_ok_for_hw_watchpoint (struct target_ops
*,
58 static void default_rcmd (struct target_ops
*, char *, struct ui_file
*);
60 static ptid_t
default_get_ada_task_ptid (struct target_ops
*self
,
63 static int default_follow_fork (struct target_ops
*self
, int follow_child
,
66 static void tcomplain (void) ATTRIBUTE_NORETURN
;
68 static int nomemory (CORE_ADDR
, char *, int, int, struct target_ops
*);
70 static int return_zero (void);
72 void target_ignore (void);
74 static void target_command (char *, int);
76 static struct target_ops
*find_default_run_target (char *);
78 static target_xfer_partial_ftype default_xfer_partial
;
80 static struct gdbarch
*default_thread_architecture (struct target_ops
*ops
,
83 static int dummy_find_memory_regions (struct target_ops
*self
,
84 find_memory_region_ftype ignore1
,
87 static char *dummy_make_corefile_notes (struct target_ops
*self
,
88 bfd
*ignore1
, int *ignore2
);
90 static int find_default_can_async_p (struct target_ops
*ignore
);
92 static int find_default_is_async_p (struct target_ops
*ignore
);
94 static enum exec_direction_kind default_execution_direction
95 (struct target_ops
*self
);
97 #include "target-delegates.c"
99 static void init_dummy_target (void);
101 static struct target_ops debug_target
;
103 static void debug_to_open (char *, int);
105 static void debug_to_prepare_to_store (struct target_ops
*self
,
108 static void debug_to_files_info (struct target_ops
*);
110 static int debug_to_insert_breakpoint (struct target_ops
*, struct gdbarch
*,
111 struct bp_target_info
*);
113 static int debug_to_remove_breakpoint (struct target_ops
*, struct gdbarch
*,
114 struct bp_target_info
*);
116 static int debug_to_can_use_hw_breakpoint (struct target_ops
*self
,
119 static int debug_to_insert_hw_breakpoint (struct target_ops
*self
,
121 struct bp_target_info
*);
123 static int debug_to_remove_hw_breakpoint (struct target_ops
*self
,
125 struct bp_target_info
*);
127 static int debug_to_insert_watchpoint (struct target_ops
*self
,
129 struct expression
*);
131 static int debug_to_remove_watchpoint (struct target_ops
*self
,
133 struct expression
*);
135 static int debug_to_stopped_data_address (struct target_ops
*, CORE_ADDR
*);
137 static int debug_to_watchpoint_addr_within_range (struct target_ops
*,
138 CORE_ADDR
, CORE_ADDR
, int);
140 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops
*self
,
143 static int debug_to_can_accel_watchpoint_condition (struct target_ops
*self
,
145 struct expression
*);
147 static void debug_to_terminal_init (struct target_ops
*self
);
149 static void debug_to_terminal_inferior (struct target_ops
*self
);
151 static void debug_to_terminal_ours_for_output (struct target_ops
*self
);
153 static void debug_to_terminal_save_ours (struct target_ops
*self
);
155 static void debug_to_terminal_ours (struct target_ops
*self
);
157 static void debug_to_load (struct target_ops
*self
, char *, int);
159 static int debug_to_can_run (struct target_ops
*self
);
161 static void debug_to_stop (struct target_ops
*self
, ptid_t
);
163 /* Pointer to array of target architecture structures; the size of the
164 array; the current index into the array; the allocated size of the
166 struct target_ops
**target_structs
;
167 unsigned target_struct_size
;
168 unsigned target_struct_allocsize
;
169 #define DEFAULT_ALLOCSIZE 10
171 /* The initial current target, so that there is always a semi-valid
174 static struct target_ops dummy_target
;
176 /* Top of target stack. */
178 static struct target_ops
*target_stack
;
180 /* The target structure we are currently using to talk to a process
181 or file or whatever "inferior" we have. */
183 struct target_ops current_target
;
185 /* Command list for target. */
187 static struct cmd_list_element
*targetlist
= NULL
;
189 /* Nonzero if we should trust readonly sections from the
190 executable when reading memory. */
192 static int trust_readonly
= 0;
194 /* Nonzero if we should show true memory content including
195 memory breakpoint inserted by gdb. */
197 static int show_memory_breakpoints
= 0;
199 /* These globals control whether GDB attempts to perform these
200 operations; they are useful for targets that need to prevent
201 inadvertant disruption, such as in non-stop mode. */
203 int may_write_registers
= 1;
205 int may_write_memory
= 1;
207 int may_insert_breakpoints
= 1;
209 int may_insert_tracepoints
= 1;
211 int may_insert_fast_tracepoints
= 1;
215 /* Non-zero if we want to see trace of target level stuff. */
217 static unsigned int targetdebug
= 0;
219 show_targetdebug (struct ui_file
*file
, int from_tty
,
220 struct cmd_list_element
*c
, const char *value
)
222 fprintf_filtered (file
, _("Target debugging is %s.\n"), value
);
225 static void setup_target_debug (void);
227 /* The user just typed 'target' without the name of a target. */
230 target_command (char *arg
, int from_tty
)
232 fputs_filtered ("Argument required (target name). Try `help target'\n",
236 /* Default target_has_* methods for process_stratum targets. */
239 default_child_has_all_memory (struct target_ops
*ops
)
241 /* If no inferior selected, then we can't read memory here. */
242 if (ptid_equal (inferior_ptid
, null_ptid
))
249 default_child_has_memory (struct target_ops
*ops
)
251 /* If no inferior selected, then we can't read memory here. */
252 if (ptid_equal (inferior_ptid
, null_ptid
))
259 default_child_has_stack (struct target_ops
*ops
)
261 /* If no inferior selected, there's no stack. */
262 if (ptid_equal (inferior_ptid
, null_ptid
))
269 default_child_has_registers (struct target_ops
*ops
)
271 /* Can't read registers from no inferior. */
272 if (ptid_equal (inferior_ptid
, null_ptid
))
279 default_child_has_execution (struct target_ops
*ops
, ptid_t the_ptid
)
281 /* If there's no thread selected, then we can't make it run through
283 if (ptid_equal (the_ptid
, null_ptid
))
291 target_has_all_memory_1 (void)
293 struct target_ops
*t
;
295 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
296 if (t
->to_has_all_memory (t
))
303 target_has_memory_1 (void)
305 struct target_ops
*t
;
307 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
308 if (t
->to_has_memory (t
))
315 target_has_stack_1 (void)
317 struct target_ops
*t
;
319 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
320 if (t
->to_has_stack (t
))
327 target_has_registers_1 (void)
329 struct target_ops
*t
;
331 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
332 if (t
->to_has_registers (t
))
339 target_has_execution_1 (ptid_t the_ptid
)
341 struct target_ops
*t
;
343 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
344 if (t
->to_has_execution (t
, the_ptid
))
351 target_has_execution_current (void)
353 return target_has_execution_1 (inferior_ptid
);
356 /* Complete initialization of T. This ensures that various fields in
357 T are set, if needed by the target implementation. */
360 complete_target_initialization (struct target_ops
*t
)
362 /* Provide default values for all "must have" methods. */
363 if (t
->to_xfer_partial
== NULL
)
364 t
->to_xfer_partial
= default_xfer_partial
;
366 if (t
->to_has_all_memory
== NULL
)
367 t
->to_has_all_memory
= (int (*) (struct target_ops
*)) return_zero
;
369 if (t
->to_has_memory
== NULL
)
370 t
->to_has_memory
= (int (*) (struct target_ops
*)) return_zero
;
372 if (t
->to_has_stack
== NULL
)
373 t
->to_has_stack
= (int (*) (struct target_ops
*)) return_zero
;
375 if (t
->to_has_registers
== NULL
)
376 t
->to_has_registers
= (int (*) (struct target_ops
*)) return_zero
;
378 if (t
->to_has_execution
== NULL
)
379 t
->to_has_execution
= (int (*) (struct target_ops
*, ptid_t
)) return_zero
;
381 install_delegators (t
);
384 /* Add possible target architecture T to the list and add a new
385 command 'target T->to_shortname'. Set COMPLETER as the command's
386 completer if not NULL. */
389 add_target_with_completer (struct target_ops
*t
,
390 completer_ftype
*completer
)
392 struct cmd_list_element
*c
;
394 complete_target_initialization (t
);
398 target_struct_allocsize
= DEFAULT_ALLOCSIZE
;
399 target_structs
= (struct target_ops
**) xmalloc
400 (target_struct_allocsize
* sizeof (*target_structs
));
402 if (target_struct_size
>= target_struct_allocsize
)
404 target_struct_allocsize
*= 2;
405 target_structs
= (struct target_ops
**)
406 xrealloc ((char *) target_structs
,
407 target_struct_allocsize
* sizeof (*target_structs
));
409 target_structs
[target_struct_size
++] = t
;
411 if (targetlist
== NULL
)
412 add_prefix_cmd ("target", class_run
, target_command
, _("\
413 Connect to a target machine or process.\n\
414 The first argument is the type or protocol of the target machine.\n\
415 Remaining arguments are interpreted by the target protocol. For more\n\
416 information on the arguments for a particular protocol, type\n\
417 `help target ' followed by the protocol name."),
418 &targetlist
, "target ", 0, &cmdlist
);
419 c
= add_cmd (t
->to_shortname
, no_class
, t
->to_open
, t
->to_doc
,
421 if (completer
!= NULL
)
422 set_cmd_completer (c
, completer
);
425 /* Add a possible target architecture to the list. */
428 add_target (struct target_ops
*t
)
430 add_target_with_completer (t
, NULL
);
436 add_deprecated_target_alias (struct target_ops
*t
, char *alias
)
438 struct cmd_list_element
*c
;
441 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
443 c
= add_cmd (alias
, no_class
, t
->to_open
, t
->to_doc
, &targetlist
);
444 alt
= xstrprintf ("target %s", t
->to_shortname
);
445 deprecate_cmd (c
, alt
);
459 fprintf_unfiltered (gdb_stdlog
, "target_kill ()\n");
461 current_target
.to_kill (¤t_target
);
465 target_load (char *arg
, int from_tty
)
467 target_dcache_invalidate ();
468 (*current_target
.to_load
) (¤t_target
, arg
, from_tty
);
472 target_create_inferior (char *exec_file
, char *args
,
473 char **env
, int from_tty
)
475 struct target_ops
*t
;
477 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
479 if (t
->to_create_inferior
!= NULL
)
481 t
->to_create_inferior (t
, exec_file
, args
, env
, from_tty
);
483 fprintf_unfiltered (gdb_stdlog
,
484 "target_create_inferior (%s, %s, xxx, %d)\n",
485 exec_file
, args
, from_tty
);
490 internal_error (__FILE__
, __LINE__
,
491 _("could not find a target to create inferior"));
495 target_terminal_inferior (void)
497 /* A background resume (``run&'') should leave GDB in control of the
498 terminal. Use target_can_async_p, not target_is_async_p, since at
499 this point the target is not async yet. However, if sync_execution
500 is not set, we know it will become async prior to resume. */
501 if (target_can_async_p () && !sync_execution
)
504 /* If GDB is resuming the inferior in the foreground, install
505 inferior's terminal modes. */
506 (*current_target
.to_terminal_inferior
) (¤t_target
);
510 nomemory (CORE_ADDR memaddr
, char *myaddr
, int len
, int write
,
511 struct target_ops
*t
)
513 errno
= EIO
; /* Can't read/write this location. */
514 return 0; /* No bytes handled. */
520 error (_("You can't do that when your target is `%s'"),
521 current_target
.to_shortname
);
527 error (_("You can't do that without a process to debug."));
531 default_terminal_info (struct target_ops
*self
, const char *args
, int from_tty
)
533 printf_unfiltered (_("No saved terminal information.\n"));
536 /* A default implementation for the to_get_ada_task_ptid target method.
538 This function builds the PTID by using both LWP and TID as part of
539 the PTID lwp and tid elements. The pid used is the pid of the
543 default_get_ada_task_ptid (struct target_ops
*self
, long lwp
, long tid
)
545 return ptid_build (ptid_get_pid (inferior_ptid
), lwp
, tid
);
548 static enum exec_direction_kind
549 default_execution_direction (struct target_ops
*self
)
551 if (!target_can_execute_reverse
)
553 else if (!target_can_async_p ())
556 gdb_assert_not_reached ("\
557 to_execution_direction must be implemented for reverse async");
560 /* Go through the target stack from top to bottom, copying over zero
561 entries in current_target, then filling in still empty entries. In
562 effect, we are doing class inheritance through the pushed target
565 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
566 is currently implemented, is that it discards any knowledge of
567 which target an inherited method originally belonged to.
568 Consequently, new new target methods should instead explicitly and
569 locally search the target stack for the target that can handle the
573 update_current_target (void)
575 struct target_ops
*t
;
577 /* First, reset current's contents. */
578 memset (¤t_target
, 0, sizeof (current_target
));
580 /* Install the delegators. */
581 install_delegators (¤t_target
);
583 #define INHERIT(FIELD, TARGET) \
584 if (!current_target.FIELD) \
585 current_target.FIELD = (TARGET)->FIELD
587 for (t
= target_stack
; t
; t
= t
->beneath
)
589 INHERIT (to_shortname
, t
);
590 INHERIT (to_longname
, t
);
592 /* Do not inherit to_open. */
593 /* Do not inherit to_close. */
594 /* Do not inherit to_attach. */
595 /* Do not inherit to_post_attach. */
596 INHERIT (to_attach_no_wait
, t
);
597 /* Do not inherit to_detach. */
598 /* Do not inherit to_disconnect. */
599 /* Do not inherit to_resume. */
600 /* Do not inherit to_wait. */
601 /* Do not inherit to_fetch_registers. */
602 /* Do not inherit to_store_registers. */
603 /* Do not inherit to_prepare_to_store. */
604 INHERIT (deprecated_xfer_memory
, t
);
605 /* Do not inherit to_files_info. */
606 /* Do not inherit to_insert_breakpoint. */
607 /* Do not inherit to_remove_breakpoint. */
608 /* Do not inherit to_can_use_hw_breakpoint. */
609 /* Do not inherit to_insert_hw_breakpoint. */
610 /* Do not inherit to_remove_hw_breakpoint. */
611 /* Do not inherit to_ranged_break_num_registers. */
612 /* Do not inherit to_insert_watchpoint. */
613 /* Do not inherit to_remove_watchpoint. */
614 /* Do not inherit to_insert_mask_watchpoint. */
615 /* Do not inherit to_remove_mask_watchpoint. */
616 /* Do not inherit to_stopped_data_address. */
617 INHERIT (to_have_steppable_watchpoint
, t
);
618 INHERIT (to_have_continuable_watchpoint
, t
);
619 /* Do not inherit to_stopped_by_watchpoint. */
620 /* Do not inherit to_watchpoint_addr_within_range. */
621 /* Do not inherit to_region_ok_for_hw_watchpoint. */
622 /* Do not inherit to_can_accel_watchpoint_condition. */
623 /* Do not inherit to_masked_watch_num_registers. */
624 /* Do not inherit to_terminal_init. */
625 /* Do not inherit to_terminal_inferior. */
626 /* Do not inherit to_terminal_ours_for_output. */
627 /* Do not inherit to_terminal_ours. */
628 /* Do not inherit to_terminal_save_ours. */
629 /* Do not inherit to_terminal_info. */
630 /* Do not inherit to_kill. */
631 /* Do not inherit to_load. */
632 /* Do no inherit to_create_inferior. */
633 /* Do not inherit to_post_startup_inferior. */
634 /* Do not inherit to_insert_fork_catchpoint. */
635 /* Do not inherit to_remove_fork_catchpoint. */
636 /* Do not inherit to_insert_vfork_catchpoint. */
637 /* Do not inherit to_remove_vfork_catchpoint. */
638 /* Do not inherit to_follow_fork. */
639 /* Do not inherit to_insert_exec_catchpoint. */
640 /* Do not inherit to_remove_exec_catchpoint. */
641 /* Do not inherit to_set_syscall_catchpoint. */
642 /* Do not inherit to_has_exited. */
643 /* Do not inherit to_mourn_inferior. */
644 INHERIT (to_can_run
, t
);
645 /* Do not inherit to_pass_signals. */
646 /* Do not inherit to_program_signals. */
647 /* Do not inherit to_thread_alive. */
648 /* Do not inherit to_find_new_threads. */
649 /* Do not inherit to_pid_to_str. */
650 /* Do not inherit to_extra_thread_info. */
651 /* Do not inherit to_thread_name. */
652 /* Do not inherit to_stop. */
653 /* Do not inherit to_xfer_partial. */
654 /* Do not inherit to_rcmd. */
655 /* Do not inherit to_pid_to_exec_file. */
656 /* Do not inherit to_log_command. */
657 INHERIT (to_stratum
, t
);
658 /* Do not inherit to_has_all_memory. */
659 /* Do not inherit to_has_memory. */
660 /* Do not inherit to_has_stack. */
661 /* Do not inherit to_has_registers. */
662 /* Do not inherit to_has_execution. */
663 INHERIT (to_has_thread_control
, t
);
664 /* Do not inherit to_can_async_p. */
665 /* Do not inherit to_is_async_p. */
666 /* Do not inherit to_async. */
667 /* Do not inherit to_find_memory_regions. */
668 /* Do not inherit to_make_corefile_notes. */
669 /* Do not inherit to_get_bookmark. */
670 /* Do not inherit to_goto_bookmark. */
671 /* Do not inherit to_get_thread_local_address. */
672 /* Do not inherit to_can_execute_reverse. */
673 /* Do not inherit to_execution_direction. */
674 /* Do not inherit to_thread_architecture. */
675 /* Do not inherit to_read_description. */
676 /* Do not inherit to_get_ada_task_ptid. */
677 /* Do not inherit to_search_memory. */
678 /* Do not inherit to_supports_multi_process. */
679 /* Do not inherit to_supports_enable_disable_tracepoint. */
680 /* Do not inherit to_supports_string_tracing. */
681 /* Do not inherit to_trace_init. */
682 /* Do not inherit to_download_tracepoint. */
683 /* Do not inherit to_can_download_tracepoint. */
684 /* Do not inherit to_download_trace_state_variable. */
685 /* Do not inherit to_enable_tracepoint. */
686 /* Do not inherit to_disable_tracepoint. */
687 /* Do not inherit to_trace_set_readonly_regions. */
688 /* Do not inherit to_trace_start. */
689 /* Do not inherit to_get_trace_status. */
690 /* Do not inherit to_get_tracepoint_status. */
691 /* Do not inherit to_trace_stop. */
692 /* Do not inherit to_trace_find. */
693 /* Do not inherit to_get_trace_state_variable_value. */
694 /* Do not inherit to_save_trace_data. */
695 /* Do not inherit to_upload_tracepoints. */
696 /* Do not inherit to_upload_trace_state_variables. */
697 /* Do not inherit to_get_raw_trace_data. */
698 /* Do not inherit to_get_min_fast_tracepoint_insn_len. */
699 /* Do not inherit to_set_disconnected_tracing. */
700 /* Do not inherit to_set_circular_trace_buffer. */
701 /* Do not inherit to_set_trace_buffer_size. */
702 /* Do not inherit to_set_trace_notes. */
703 /* Do not inherit to_get_tib_address. */
704 /* Do not inherit to_set_permissions. */
705 /* Do not inherit to_static_tracepoint_marker_at. */
706 /* Do not inherit to_static_tracepoint_markers_by_strid. */
707 /* Do not inherit to_traceframe_info. */
708 /* Do not inherit to_use_agent. */
709 /* Do not inherit to_can_use_agent. */
710 /* Do not inherit to_augmented_libraries_svr4_read. */
711 INHERIT (to_magic
, t
);
713 to_supports_evaluation_of_breakpoint_conditions. */
714 /* Do not inherit to_can_run_breakpoint_commands. */
715 /* Do not inherit to_memory_map. */
716 /* Do not inherit to_flash_erase. */
717 /* Do not inherit to_flash_done. */
721 /* Clean up a target struct so it no longer has any zero pointers in
722 it. Some entries are defaulted to a method that print an error,
723 others are hard-wired to a standard recursive default. */
725 #define de_fault(field, value) \
726 if (!current_target.field) \
727 current_target.field = value
730 (void (*) (char *, int))
733 (void (*) (struct target_ops
*))
735 de_fault (deprecated_xfer_memory
,
736 (int (*) (CORE_ADDR
, gdb_byte
*, int, int,
737 struct mem_attrib
*, struct target_ops
*))
739 de_fault (to_can_run
,
740 (int (*) (struct target_ops
*))
742 current_target
.to_read_description
= NULL
;
746 /* Finally, position the target-stack beneath the squashed
747 "current_target". That way code looking for a non-inherited
748 target method can quickly and simply find it. */
749 current_target
.beneath
= target_stack
;
752 setup_target_debug ();
755 /* Push a new target type into the stack of the existing target accessors,
756 possibly superseding some of the existing accessors.
758 Rather than allow an empty stack, we always have the dummy target at
759 the bottom stratum, so we can call the function vectors without
763 push_target (struct target_ops
*t
)
765 struct target_ops
**cur
;
767 /* Check magic number. If wrong, it probably means someone changed
768 the struct definition, but not all the places that initialize one. */
769 if (t
->to_magic
!= OPS_MAGIC
)
771 fprintf_unfiltered (gdb_stderr
,
772 "Magic number of %s target struct wrong\n",
774 internal_error (__FILE__
, __LINE__
,
775 _("failed internal consistency check"));
778 /* Find the proper stratum to install this target in. */
779 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
781 if ((int) (t
->to_stratum
) >= (int) (*cur
)->to_stratum
)
785 /* If there's already targets at this stratum, remove them. */
786 /* FIXME: cagney/2003-10-15: I think this should be popping all
787 targets to CUR, and not just those at this stratum level. */
788 while ((*cur
) != NULL
&& t
->to_stratum
== (*cur
)->to_stratum
)
790 /* There's already something at this stratum level. Close it,
791 and un-hook it from the stack. */
792 struct target_ops
*tmp
= (*cur
);
794 (*cur
) = (*cur
)->beneath
;
799 /* We have removed all targets in our stratum, now add the new one. */
803 update_current_target ();
806 /* Remove a target_ops vector from the stack, wherever it may be.
807 Return how many times it was removed (0 or 1). */
810 unpush_target (struct target_ops
*t
)
812 struct target_ops
**cur
;
813 struct target_ops
*tmp
;
815 if (t
->to_stratum
== dummy_stratum
)
816 internal_error (__FILE__
, __LINE__
,
817 _("Attempt to unpush the dummy target"));
819 /* Look for the specified target. Note that we assume that a target
820 can only occur once in the target stack. */
822 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
828 /* If we don't find target_ops, quit. Only open targets should be
833 /* Unchain the target. */
835 (*cur
) = (*cur
)->beneath
;
838 update_current_target ();
840 /* Finally close the target. Note we do this after unchaining, so
841 any target method calls from within the target_close
842 implementation don't end up in T anymore. */
849 pop_all_targets_above (enum strata above_stratum
)
851 while ((int) (current_target
.to_stratum
) > (int) above_stratum
)
853 if (!unpush_target (target_stack
))
855 fprintf_unfiltered (gdb_stderr
,
856 "pop_all_targets couldn't find target %s\n",
857 target_stack
->to_shortname
);
858 internal_error (__FILE__
, __LINE__
,
859 _("failed internal consistency check"));
866 pop_all_targets (void)
868 pop_all_targets_above (dummy_stratum
);
871 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
874 target_is_pushed (struct target_ops
*t
)
876 struct target_ops
**cur
;
878 /* Check magic number. If wrong, it probably means someone changed
879 the struct definition, but not all the places that initialize one. */
880 if (t
->to_magic
!= OPS_MAGIC
)
882 fprintf_unfiltered (gdb_stderr
,
883 "Magic number of %s target struct wrong\n",
885 internal_error (__FILE__
, __LINE__
,
886 _("failed internal consistency check"));
889 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
896 /* Using the objfile specified in OBJFILE, find the address for the
897 current thread's thread-local storage with offset OFFSET. */
899 target_translate_tls_address (struct objfile
*objfile
, CORE_ADDR offset
)
901 volatile CORE_ADDR addr
= 0;
902 struct target_ops
*target
;
904 for (target
= current_target
.beneath
;
906 target
= target
->beneath
)
908 if (target
->to_get_thread_local_address
!= NULL
)
913 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
915 ptid_t ptid
= inferior_ptid
;
916 volatile struct gdb_exception ex
;
918 TRY_CATCH (ex
, RETURN_MASK_ALL
)
922 /* Fetch the load module address for this objfile. */
923 lm_addr
= gdbarch_fetch_tls_load_module_address (target_gdbarch (),
925 /* If it's 0, throw the appropriate exception. */
927 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR
,
928 _("TLS load module not found"));
930 addr
= target
->to_get_thread_local_address (target
, ptid
,
933 /* If an error occurred, print TLS related messages here. Otherwise,
934 throw the error to some higher catcher. */
937 int objfile_is_library
= (objfile
->flags
& OBJF_SHARED
);
941 case TLS_NO_LIBRARY_SUPPORT_ERROR
:
942 error (_("Cannot find thread-local variables "
943 "in this thread library."));
945 case TLS_LOAD_MODULE_NOT_FOUND_ERROR
:
946 if (objfile_is_library
)
947 error (_("Cannot find shared library `%s' in dynamic"
948 " linker's load module list"), objfile_name (objfile
));
950 error (_("Cannot find executable file `%s' in dynamic"
951 " linker's load module list"), objfile_name (objfile
));
953 case TLS_NOT_ALLOCATED_YET_ERROR
:
954 if (objfile_is_library
)
955 error (_("The inferior has not yet allocated storage for"
956 " thread-local variables in\n"
957 "the shared library `%s'\n"
959 objfile_name (objfile
), target_pid_to_str (ptid
));
961 error (_("The inferior has not yet allocated storage for"
962 " thread-local variables in\n"
963 "the executable `%s'\n"
965 objfile_name (objfile
), target_pid_to_str (ptid
));
967 case TLS_GENERIC_ERROR
:
968 if (objfile_is_library
)
969 error (_("Cannot find thread-local storage for %s, "
970 "shared library %s:\n%s"),
971 target_pid_to_str (ptid
),
972 objfile_name (objfile
), ex
.message
);
974 error (_("Cannot find thread-local storage for %s, "
975 "executable file %s:\n%s"),
976 target_pid_to_str (ptid
),
977 objfile_name (objfile
), ex
.message
);
980 throw_exception (ex
);
985 /* It wouldn't be wrong here to try a gdbarch method, too; finding
986 TLS is an ABI-specific thing. But we don't do that yet. */
988 error (_("Cannot find thread-local variables on this target"));
994 target_xfer_status_to_string (enum target_xfer_status err
)
996 #define CASE(X) case X: return #X
999 CASE(TARGET_XFER_E_IO
);
1000 CASE(TARGET_XFER_E_UNAVAILABLE
);
1009 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1011 /* target_read_string -- read a null terminated string, up to LEN bytes,
1012 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1013 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1014 is responsible for freeing it. Return the number of bytes successfully
1018 target_read_string (CORE_ADDR memaddr
, char **string
, int len
, int *errnop
)
1020 int tlen
, offset
, i
;
1024 int buffer_allocated
;
1026 unsigned int nbytes_read
= 0;
1028 gdb_assert (string
);
1030 /* Small for testing. */
1031 buffer_allocated
= 4;
1032 buffer
= xmalloc (buffer_allocated
);
1037 tlen
= MIN (len
, 4 - (memaddr
& 3));
1038 offset
= memaddr
& 3;
1040 errcode
= target_read_memory (memaddr
& ~3, buf
, sizeof buf
);
1043 /* The transfer request might have crossed the boundary to an
1044 unallocated region of memory. Retry the transfer, requesting
1048 errcode
= target_read_memory (memaddr
, buf
, 1);
1053 if (bufptr
- buffer
+ tlen
> buffer_allocated
)
1057 bytes
= bufptr
- buffer
;
1058 buffer_allocated
*= 2;
1059 buffer
= xrealloc (buffer
, buffer_allocated
);
1060 bufptr
= buffer
+ bytes
;
1063 for (i
= 0; i
< tlen
; i
++)
1065 *bufptr
++ = buf
[i
+ offset
];
1066 if (buf
[i
+ offset
] == '\000')
1068 nbytes_read
+= i
+ 1;
1075 nbytes_read
+= tlen
;
1084 struct target_section_table
*
1085 target_get_section_table (struct target_ops
*target
)
1087 struct target_ops
*t
;
1090 fprintf_unfiltered (gdb_stdlog
, "target_get_section_table ()\n");
1092 for (t
= target
; t
!= NULL
; t
= t
->beneath
)
1093 if (t
->to_get_section_table
!= NULL
)
1094 return (*t
->to_get_section_table
) (t
);
1099 /* Find a section containing ADDR. */
1101 struct target_section
*
1102 target_section_by_addr (struct target_ops
*target
, CORE_ADDR addr
)
1104 struct target_section_table
*table
= target_get_section_table (target
);
1105 struct target_section
*secp
;
1110 for (secp
= table
->sections
; secp
< table
->sections_end
; secp
++)
1112 if (addr
>= secp
->addr
&& addr
< secp
->endaddr
)
1118 /* Read memory from the live target, even if currently inspecting a
1119 traceframe. The return is the same as that of target_read. */
1121 static enum target_xfer_status
1122 target_read_live_memory (enum target_object object
,
1123 ULONGEST memaddr
, gdb_byte
*myaddr
, ULONGEST len
,
1124 ULONGEST
*xfered_len
)
1126 enum target_xfer_status ret
;
1127 struct cleanup
*cleanup
;
1129 /* Switch momentarily out of tfind mode so to access live memory.
1130 Note that this must not clear global state, such as the frame
1131 cache, which must still remain valid for the previous traceframe.
1132 We may be _building_ the frame cache at this point. */
1133 cleanup
= make_cleanup_restore_traceframe_number ();
1134 set_traceframe_number (-1);
1136 ret
= target_xfer_partial (current_target
.beneath
, object
, NULL
,
1137 myaddr
, NULL
, memaddr
, len
, xfered_len
);
1139 do_cleanups (cleanup
);
1143 /* Using the set of read-only target sections of OPS, read live
1144 read-only memory. Note that the actual reads start from the
1145 top-most target again.
1147 For interface/parameters/return description see target.h,
1150 static enum target_xfer_status
1151 memory_xfer_live_readonly_partial (struct target_ops
*ops
,
1152 enum target_object object
,
1153 gdb_byte
*readbuf
, ULONGEST memaddr
,
1154 ULONGEST len
, ULONGEST
*xfered_len
)
1156 struct target_section
*secp
;
1157 struct target_section_table
*table
;
1159 secp
= target_section_by_addr (ops
, memaddr
);
1161 && (bfd_get_section_flags (secp
->the_bfd_section
->owner
,
1162 secp
->the_bfd_section
)
1165 struct target_section
*p
;
1166 ULONGEST memend
= memaddr
+ len
;
1168 table
= target_get_section_table (ops
);
1170 for (p
= table
->sections
; p
< table
->sections_end
; p
++)
1172 if (memaddr
>= p
->addr
)
1174 if (memend
<= p
->endaddr
)
1176 /* Entire transfer is within this section. */
1177 return target_read_live_memory (object
, memaddr
,
1178 readbuf
, len
, xfered_len
);
1180 else if (memaddr
>= p
->endaddr
)
1182 /* This section ends before the transfer starts. */
1187 /* This section overlaps the transfer. Just do half. */
1188 len
= p
->endaddr
- memaddr
;
1189 return target_read_live_memory (object
, memaddr
,
1190 readbuf
, len
, xfered_len
);
1196 return TARGET_XFER_EOF
;
1199 /* Read memory from more than one valid target. A core file, for
1200 instance, could have some of memory but delegate other bits to
1201 the target below it. So, we must manually try all targets. */
1203 static enum target_xfer_status
1204 raw_memory_xfer_partial (struct target_ops
*ops
, gdb_byte
*readbuf
,
1205 const gdb_byte
*writebuf
, ULONGEST memaddr
, LONGEST len
,
1206 ULONGEST
*xfered_len
)
1208 enum target_xfer_status res
;
1212 res
= ops
->to_xfer_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
1213 readbuf
, writebuf
, memaddr
, len
,
1215 if (res
== TARGET_XFER_OK
)
1218 /* Stop if the target reports that the memory is not available. */
1219 if (res
== TARGET_XFER_E_UNAVAILABLE
)
1222 /* We want to continue past core files to executables, but not
1223 past a running target's memory. */
1224 if (ops
->to_has_all_memory (ops
))
1229 while (ops
!= NULL
);
1234 /* Perform a partial memory transfer.
1235 For docs see target.h, to_xfer_partial. */
1237 static enum target_xfer_status
1238 memory_xfer_partial_1 (struct target_ops
*ops
, enum target_object object
,
1239 gdb_byte
*readbuf
, const gdb_byte
*writebuf
, ULONGEST memaddr
,
1240 ULONGEST len
, ULONGEST
*xfered_len
)
1242 enum target_xfer_status res
;
1244 struct mem_region
*region
;
1245 struct inferior
*inf
;
1247 /* For accesses to unmapped overlay sections, read directly from
1248 files. Must do this first, as MEMADDR may need adjustment. */
1249 if (readbuf
!= NULL
&& overlay_debugging
)
1251 struct obj_section
*section
= find_pc_overlay (memaddr
);
1253 if (pc_in_unmapped_range (memaddr
, section
))
1255 struct target_section_table
*table
1256 = target_get_section_table (ops
);
1257 const char *section_name
= section
->the_bfd_section
->name
;
1259 memaddr
= overlay_mapped_address (memaddr
, section
);
1260 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1261 memaddr
, len
, xfered_len
,
1263 table
->sections_end
,
1268 /* Try the executable files, if "trust-readonly-sections" is set. */
1269 if (readbuf
!= NULL
&& trust_readonly
)
1271 struct target_section
*secp
;
1272 struct target_section_table
*table
;
1274 secp
= target_section_by_addr (ops
, memaddr
);
1276 && (bfd_get_section_flags (secp
->the_bfd_section
->owner
,
1277 secp
->the_bfd_section
)
1280 table
= target_get_section_table (ops
);
1281 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1282 memaddr
, len
, xfered_len
,
1284 table
->sections_end
,
1289 /* If reading unavailable memory in the context of traceframes, and
1290 this address falls within a read-only section, fallback to
1291 reading from live memory. */
1292 if (readbuf
!= NULL
&& get_traceframe_number () != -1)
1294 VEC(mem_range_s
) *available
;
1296 /* If we fail to get the set of available memory, then the
1297 target does not support querying traceframe info, and so we
1298 attempt reading from the traceframe anyway (assuming the
1299 target implements the old QTro packet then). */
1300 if (traceframe_available_memory (&available
, memaddr
, len
))
1302 struct cleanup
*old_chain
;
1304 old_chain
= make_cleanup (VEC_cleanup(mem_range_s
), &available
);
1306 if (VEC_empty (mem_range_s
, available
)
1307 || VEC_index (mem_range_s
, available
, 0)->start
!= memaddr
)
1309 /* Don't read into the traceframe's available
1311 if (!VEC_empty (mem_range_s
, available
))
1313 LONGEST oldlen
= len
;
1315 len
= VEC_index (mem_range_s
, available
, 0)->start
- memaddr
;
1316 gdb_assert (len
<= oldlen
);
1319 do_cleanups (old_chain
);
1321 /* This goes through the topmost target again. */
1322 res
= memory_xfer_live_readonly_partial (ops
, object
,
1325 if (res
== TARGET_XFER_OK
)
1326 return TARGET_XFER_OK
;
1329 /* No use trying further, we know some memory starting
1330 at MEMADDR isn't available. */
1332 return TARGET_XFER_E_UNAVAILABLE
;
1336 /* Don't try to read more than how much is available, in
1337 case the target implements the deprecated QTro packet to
1338 cater for older GDBs (the target's knowledge of read-only
1339 sections may be outdated by now). */
1340 len
= VEC_index (mem_range_s
, available
, 0)->length
;
1342 do_cleanups (old_chain
);
1346 /* Try GDB's internal data cache. */
1347 region
= lookup_mem_region (memaddr
);
1348 /* region->hi == 0 means there's no upper bound. */
1349 if (memaddr
+ len
< region
->hi
|| region
->hi
== 0)
1352 reg_len
= region
->hi
- memaddr
;
1354 switch (region
->attrib
.mode
)
1357 if (writebuf
!= NULL
)
1358 return TARGET_XFER_E_IO
;
1362 if (readbuf
!= NULL
)
1363 return TARGET_XFER_E_IO
;
1367 /* We only support writing to flash during "load" for now. */
1368 if (writebuf
!= NULL
)
1369 error (_("Writing to flash memory forbidden in this context"));
1373 return TARGET_XFER_E_IO
;
1376 if (!ptid_equal (inferior_ptid
, null_ptid
))
1377 inf
= find_inferior_pid (ptid_get_pid (inferior_ptid
));
1382 /* The dcache reads whole cache lines; that doesn't play well
1383 with reading from a trace buffer, because reading outside of
1384 the collected memory range fails. */
1385 && get_traceframe_number () == -1
1386 && (region
->attrib
.cache
1387 || (stack_cache_enabled_p () && object
== TARGET_OBJECT_STACK_MEMORY
)
1388 || (code_cache_enabled_p () && object
== TARGET_OBJECT_CODE_MEMORY
)))
1390 DCACHE
*dcache
= target_dcache_get_or_init ();
1393 if (readbuf
!= NULL
)
1394 l
= dcache_xfer_memory (ops
, dcache
, memaddr
, readbuf
, reg_len
, 0);
1396 /* FIXME drow/2006-08-09: If we're going to preserve const
1397 correctness dcache_xfer_memory should take readbuf and
1399 l
= dcache_xfer_memory (ops
, dcache
, memaddr
, (void *) writebuf
,
1402 return TARGET_XFER_E_IO
;
1405 *xfered_len
= (ULONGEST
) l
;
1406 return TARGET_XFER_OK
;
1410 /* If none of those methods found the memory we wanted, fall back
1411 to a target partial transfer. Normally a single call to
1412 to_xfer_partial is enough; if it doesn't recognize an object
1413 it will call the to_xfer_partial of the next target down.
1414 But for memory this won't do. Memory is the only target
1415 object which can be read from more than one valid target.
1416 A core file, for instance, could have some of memory but
1417 delegate other bits to the target below it. So, we must
1418 manually try all targets. */
1420 res
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, memaddr
, reg_len
,
1423 /* Make sure the cache gets updated no matter what - if we are writing
1424 to the stack. Even if this write is not tagged as such, we still need
1425 to update the cache. */
1427 if (res
== TARGET_XFER_OK
1430 && target_dcache_init_p ()
1431 && !region
->attrib
.cache
1432 && ((stack_cache_enabled_p () && object
!= TARGET_OBJECT_STACK_MEMORY
)
1433 || (code_cache_enabled_p () && object
!= TARGET_OBJECT_CODE_MEMORY
)))
1435 DCACHE
*dcache
= target_dcache_get ();
1437 dcache_update (dcache
, memaddr
, (void *) writebuf
, reg_len
);
1440 /* If we still haven't got anything, return the last error. We
1445 /* Perform a partial memory transfer. For docs see target.h,
1448 static enum target_xfer_status
1449 memory_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1450 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1451 ULONGEST memaddr
, ULONGEST len
, ULONGEST
*xfered_len
)
1453 enum target_xfer_status res
;
1455 /* Zero length requests are ok and require no work. */
1457 return TARGET_XFER_EOF
;
1459 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1460 breakpoint insns, thus hiding out from higher layers whether
1461 there are software breakpoints inserted in the code stream. */
1462 if (readbuf
!= NULL
)
1464 res
= memory_xfer_partial_1 (ops
, object
, readbuf
, NULL
, memaddr
, len
,
1467 if (res
== TARGET_XFER_OK
&& !show_memory_breakpoints
)
1468 breakpoint_xfer_memory (readbuf
, NULL
, NULL
, memaddr
, res
);
1473 struct cleanup
*old_chain
;
1475 /* A large write request is likely to be partially satisfied
1476 by memory_xfer_partial_1. We will continually malloc
1477 and free a copy of the entire write request for breakpoint
1478 shadow handling even though we only end up writing a small
1479 subset of it. Cap writes to 4KB to mitigate this. */
1480 len
= min (4096, len
);
1482 buf
= xmalloc (len
);
1483 old_chain
= make_cleanup (xfree
, buf
);
1484 memcpy (buf
, writebuf
, len
);
1486 breakpoint_xfer_memory (NULL
, buf
, writebuf
, memaddr
, len
);
1487 res
= memory_xfer_partial_1 (ops
, object
, NULL
, buf
, memaddr
, len
,
1490 do_cleanups (old_chain
);
1497 restore_show_memory_breakpoints (void *arg
)
1499 show_memory_breakpoints
= (uintptr_t) arg
;
1503 make_show_memory_breakpoints_cleanup (int show
)
1505 int current
= show_memory_breakpoints
;
1507 show_memory_breakpoints
= show
;
1508 return make_cleanup (restore_show_memory_breakpoints
,
1509 (void *) (uintptr_t) current
);
1512 /* For docs see target.h, to_xfer_partial. */
1514 enum target_xfer_status
1515 target_xfer_partial (struct target_ops
*ops
,
1516 enum target_object object
, const char *annex
,
1517 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1518 ULONGEST offset
, ULONGEST len
,
1519 ULONGEST
*xfered_len
)
1521 enum target_xfer_status retval
;
1523 gdb_assert (ops
->to_xfer_partial
!= NULL
);
1525 /* Transfer is done when LEN is zero. */
1527 return TARGET_XFER_EOF
;
1529 if (writebuf
&& !may_write_memory
)
1530 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1531 core_addr_to_string_nz (offset
), plongest (len
));
1535 /* If this is a memory transfer, let the memory-specific code
1536 have a look at it instead. Memory transfers are more
1538 if (object
== TARGET_OBJECT_MEMORY
|| object
== TARGET_OBJECT_STACK_MEMORY
1539 || object
== TARGET_OBJECT_CODE_MEMORY
)
1540 retval
= memory_xfer_partial (ops
, object
, readbuf
,
1541 writebuf
, offset
, len
, xfered_len
);
1542 else if (object
== TARGET_OBJECT_RAW_MEMORY
)
1544 /* Request the normal memory object from other layers. */
1545 retval
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, offset
, len
,
1549 retval
= ops
->to_xfer_partial (ops
, object
, annex
, readbuf
,
1550 writebuf
, offset
, len
, xfered_len
);
1554 const unsigned char *myaddr
= NULL
;
1556 fprintf_unfiltered (gdb_stdlog
,
1557 "%s:target_xfer_partial "
1558 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1561 (annex
? annex
: "(null)"),
1562 host_address_to_string (readbuf
),
1563 host_address_to_string (writebuf
),
1564 core_addr_to_string_nz (offset
),
1565 pulongest (len
), retval
,
1566 pulongest (*xfered_len
));
1572 if (retval
== TARGET_XFER_OK
&& myaddr
!= NULL
)
1576 fputs_unfiltered (", bytes =", gdb_stdlog
);
1577 for (i
= 0; i
< *xfered_len
; i
++)
1579 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
1581 if (targetdebug
< 2 && i
> 0)
1583 fprintf_unfiltered (gdb_stdlog
, " ...");
1586 fprintf_unfiltered (gdb_stdlog
, "\n");
1589 fprintf_unfiltered (gdb_stdlog
, " %02x", myaddr
[i
] & 0xff);
1593 fputc_unfiltered ('\n', gdb_stdlog
);
1596 /* Check implementations of to_xfer_partial update *XFERED_LEN
1597 properly. Do assertion after printing debug messages, so that we
1598 can find more clues on assertion failure from debugging messages. */
1599 if (retval
== TARGET_XFER_OK
|| retval
== TARGET_XFER_E_UNAVAILABLE
)
1600 gdb_assert (*xfered_len
> 0);
1605 /* Read LEN bytes of target memory at address MEMADDR, placing the
1606 results in GDB's memory at MYADDR. Returns either 0 for success or
1607 TARGET_XFER_E_IO if any error occurs.
1609 If an error occurs, no guarantee is made about the contents of the data at
1610 MYADDR. In particular, the caller should not depend upon partial reads
1611 filling the buffer with good data. There is no way for the caller to know
1612 how much good data might have been transfered anyway. Callers that can
1613 deal with partial reads should call target_read (which will retry until
1614 it makes no progress, and then return how much was transferred). */
1617 target_read_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1619 /* Dispatch to the topmost target, not the flattened current_target.
1620 Memory accesses check target->to_has_(all_)memory, and the
1621 flattened target doesn't inherit those. */
1622 if (target_read (current_target
.beneath
, TARGET_OBJECT_MEMORY
, NULL
,
1623 myaddr
, memaddr
, len
) == len
)
1626 return TARGET_XFER_E_IO
;
1629 /* Like target_read_memory, but specify explicitly that this is a read
1630 from the target's raw memory. That is, this read bypasses the
1631 dcache, breakpoint shadowing, etc. */
1634 target_read_raw_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1636 /* See comment in target_read_memory about why the request starts at
1637 current_target.beneath. */
1638 if (target_read (current_target
.beneath
, TARGET_OBJECT_RAW_MEMORY
, NULL
,
1639 myaddr
, memaddr
, len
) == len
)
1642 return TARGET_XFER_E_IO
;
1645 /* Like target_read_memory, but specify explicitly that this is a read from
1646 the target's stack. This may trigger different cache behavior. */
1649 target_read_stack (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1651 /* See comment in target_read_memory about why the request starts at
1652 current_target.beneath. */
1653 if (target_read (current_target
.beneath
, TARGET_OBJECT_STACK_MEMORY
, NULL
,
1654 myaddr
, memaddr
, len
) == len
)
1657 return TARGET_XFER_E_IO
;
1660 /* Like target_read_memory, but specify explicitly that this is a read from
1661 the target's code. This may trigger different cache behavior. */
1664 target_read_code (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1666 /* See comment in target_read_memory about why the request starts at
1667 current_target.beneath. */
1668 if (target_read (current_target
.beneath
, TARGET_OBJECT_CODE_MEMORY
, NULL
,
1669 myaddr
, memaddr
, len
) == len
)
1672 return TARGET_XFER_E_IO
;
1675 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1676 Returns either 0 for success or TARGET_XFER_E_IO if any
1677 error occurs. If an error occurs, no guarantee is made about how
1678 much data got written. Callers that can deal with partial writes
1679 should call target_write. */
1682 target_write_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1684 /* See comment in target_read_memory about why the request starts at
1685 current_target.beneath. */
1686 if (target_write (current_target
.beneath
, TARGET_OBJECT_MEMORY
, NULL
,
1687 myaddr
, memaddr
, len
) == len
)
1690 return TARGET_XFER_E_IO
;
1693 /* Write LEN bytes from MYADDR to target raw memory at address
1694 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1695 if any error occurs. If an error occurs, no guarantee is made
1696 about how much data got written. Callers that can deal with
1697 partial writes should call target_write. */
1700 target_write_raw_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1702 /* See comment in target_read_memory about why the request starts at
1703 current_target.beneath. */
1704 if (target_write (current_target
.beneath
, TARGET_OBJECT_RAW_MEMORY
, NULL
,
1705 myaddr
, memaddr
, len
) == len
)
1708 return TARGET_XFER_E_IO
;
1711 /* Fetch the target's memory map. */
1714 target_memory_map (void)
1716 VEC(mem_region_s
) *result
;
1717 struct mem_region
*last_one
, *this_one
;
1719 struct target_ops
*t
;
1722 fprintf_unfiltered (gdb_stdlog
, "target_memory_map ()\n");
1724 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1725 if (t
->to_memory_map
!= NULL
)
1731 result
= t
->to_memory_map (t
);
1735 qsort (VEC_address (mem_region_s
, result
),
1736 VEC_length (mem_region_s
, result
),
1737 sizeof (struct mem_region
), mem_region_cmp
);
1739 /* Check that regions do not overlap. Simultaneously assign
1740 a numbering for the "mem" commands to use to refer to
1743 for (ix
= 0; VEC_iterate (mem_region_s
, result
, ix
, this_one
); ix
++)
1745 this_one
->number
= ix
;
1747 if (last_one
&& last_one
->hi
> this_one
->lo
)
1749 warning (_("Overlapping regions in memory map: ignoring"));
1750 VEC_free (mem_region_s
, result
);
1753 last_one
= this_one
;
1760 target_flash_erase (ULONGEST address
, LONGEST length
)
1762 struct target_ops
*t
;
1764 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1765 if (t
->to_flash_erase
!= NULL
)
1768 fprintf_unfiltered (gdb_stdlog
, "target_flash_erase (%s, %s)\n",
1769 hex_string (address
), phex (length
, 0));
1770 t
->to_flash_erase (t
, address
, length
);
1778 target_flash_done (void)
1780 struct target_ops
*t
;
1782 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1783 if (t
->to_flash_done
!= NULL
)
1786 fprintf_unfiltered (gdb_stdlog
, "target_flash_done\n");
1787 t
->to_flash_done (t
);
1795 show_trust_readonly (struct ui_file
*file
, int from_tty
,
1796 struct cmd_list_element
*c
, const char *value
)
1798 fprintf_filtered (file
,
1799 _("Mode for reading from readonly sections is %s.\n"),
1803 /* More generic transfers. */
1805 static enum target_xfer_status
1806 default_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1807 const char *annex
, gdb_byte
*readbuf
,
1808 const gdb_byte
*writebuf
, ULONGEST offset
, ULONGEST len
,
1809 ULONGEST
*xfered_len
)
1811 if (object
== TARGET_OBJECT_MEMORY
1812 && ops
->deprecated_xfer_memory
!= NULL
)
1813 /* If available, fall back to the target's
1814 "deprecated_xfer_memory" method. */
1819 if (writebuf
!= NULL
)
1821 void *buffer
= xmalloc (len
);
1822 struct cleanup
*cleanup
= make_cleanup (xfree
, buffer
);
1824 memcpy (buffer
, writebuf
, len
);
1825 xfered
= ops
->deprecated_xfer_memory (offset
, buffer
, len
,
1826 1/*write*/, NULL
, ops
);
1827 do_cleanups (cleanup
);
1829 if (readbuf
!= NULL
)
1830 xfered
= ops
->deprecated_xfer_memory (offset
, readbuf
, len
,
1831 0/*read*/, NULL
, ops
);
1834 *xfered_len
= (ULONGEST
) xfered
;
1835 return TARGET_XFER_E_IO
;
1837 else if (xfered
== 0 && errno
== 0)
1838 /* "deprecated_xfer_memory" uses 0, cross checked against
1839 ERRNO as one indication of an error. */
1840 return TARGET_XFER_EOF
;
1842 return TARGET_XFER_E_IO
;
1846 gdb_assert (ops
->beneath
!= NULL
);
1847 return ops
->beneath
->to_xfer_partial (ops
->beneath
, object
, annex
,
1848 readbuf
, writebuf
, offset
, len
,
1853 /* Target vector read/write partial wrapper functions. */
1855 static enum target_xfer_status
1856 target_read_partial (struct target_ops
*ops
,
1857 enum target_object object
,
1858 const char *annex
, gdb_byte
*buf
,
1859 ULONGEST offset
, ULONGEST len
,
1860 ULONGEST
*xfered_len
)
1862 return target_xfer_partial (ops
, object
, annex
, buf
, NULL
, offset
, len
,
1866 static enum target_xfer_status
1867 target_write_partial (struct target_ops
*ops
,
1868 enum target_object object
,
1869 const char *annex
, const gdb_byte
*buf
,
1870 ULONGEST offset
, LONGEST len
, ULONGEST
*xfered_len
)
1872 return target_xfer_partial (ops
, object
, annex
, NULL
, buf
, offset
, len
,
1876 /* Wrappers to perform the full transfer. */
1878 /* For docs on target_read see target.h. */
1881 target_read (struct target_ops
*ops
,
1882 enum target_object object
,
1883 const char *annex
, gdb_byte
*buf
,
1884 ULONGEST offset
, LONGEST len
)
1888 while (xfered
< len
)
1890 ULONGEST xfered_len
;
1891 enum target_xfer_status status
;
1893 status
= target_read_partial (ops
, object
, annex
,
1894 (gdb_byte
*) buf
+ xfered
,
1895 offset
+ xfered
, len
- xfered
,
1898 /* Call an observer, notifying them of the xfer progress? */
1899 if (status
== TARGET_XFER_EOF
)
1901 else if (status
== TARGET_XFER_OK
)
1903 xfered
+= xfered_len
;
1913 /* Assuming that the entire [begin, end) range of memory cannot be
1914 read, try to read whatever subrange is possible to read.
1916 The function returns, in RESULT, either zero or one memory block.
1917 If there's a readable subrange at the beginning, it is completely
1918 read and returned. Any further readable subrange will not be read.
1919 Otherwise, if there's a readable subrange at the end, it will be
1920 completely read and returned. Any readable subranges before it
1921 (obviously, not starting at the beginning), will be ignored. In
1922 other cases -- either no readable subrange, or readable subrange(s)
1923 that is neither at the beginning, or end, nothing is returned.
1925 The purpose of this function is to handle a read across a boundary
1926 of accessible memory in a case when memory map is not available.
1927 The above restrictions are fine for this case, but will give
1928 incorrect results if the memory is 'patchy'. However, supporting
1929 'patchy' memory would require trying to read every single byte,
1930 and it seems unacceptable solution. Explicit memory map is
1931 recommended for this case -- and target_read_memory_robust will
1932 take care of reading multiple ranges then. */
1935 read_whatever_is_readable (struct target_ops
*ops
,
1936 ULONGEST begin
, ULONGEST end
,
1937 VEC(memory_read_result_s
) **result
)
1939 gdb_byte
*buf
= xmalloc (end
- begin
);
1940 ULONGEST current_begin
= begin
;
1941 ULONGEST current_end
= end
;
1943 memory_read_result_s r
;
1944 ULONGEST xfered_len
;
1946 /* If we previously failed to read 1 byte, nothing can be done here. */
1947 if (end
- begin
<= 1)
1953 /* Check that either first or the last byte is readable, and give up
1954 if not. This heuristic is meant to permit reading accessible memory
1955 at the boundary of accessible region. */
1956 if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
1957 buf
, begin
, 1, &xfered_len
) == TARGET_XFER_OK
)
1962 else if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
1963 buf
+ (end
-begin
) - 1, end
- 1, 1,
1964 &xfered_len
) == TARGET_XFER_OK
)
1975 /* Loop invariant is that the [current_begin, current_end) was previously
1976 found to be not readable as a whole.
1978 Note loop condition -- if the range has 1 byte, we can't divide the range
1979 so there's no point trying further. */
1980 while (current_end
- current_begin
> 1)
1982 ULONGEST first_half_begin
, first_half_end
;
1983 ULONGEST second_half_begin
, second_half_end
;
1985 ULONGEST middle
= current_begin
+ (current_end
- current_begin
)/2;
1989 first_half_begin
= current_begin
;
1990 first_half_end
= middle
;
1991 second_half_begin
= middle
;
1992 second_half_end
= current_end
;
1996 first_half_begin
= middle
;
1997 first_half_end
= current_end
;
1998 second_half_begin
= current_begin
;
1999 second_half_end
= middle
;
2002 xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2003 buf
+ (first_half_begin
- begin
),
2005 first_half_end
- first_half_begin
);
2007 if (xfer
== first_half_end
- first_half_begin
)
2009 /* This half reads up fine. So, the error must be in the
2011 current_begin
= second_half_begin
;
2012 current_end
= second_half_end
;
2016 /* This half is not readable. Because we've tried one byte, we
2017 know some part of this half if actually redable. Go to the next
2018 iteration to divide again and try to read.
2020 We don't handle the other half, because this function only tries
2021 to read a single readable subrange. */
2022 current_begin
= first_half_begin
;
2023 current_end
= first_half_end
;
2029 /* The [begin, current_begin) range has been read. */
2031 r
.end
= current_begin
;
2036 /* The [current_end, end) range has been read. */
2037 LONGEST rlen
= end
- current_end
;
2039 r
.data
= xmalloc (rlen
);
2040 memcpy (r
.data
, buf
+ current_end
- begin
, rlen
);
2041 r
.begin
= current_end
;
2045 VEC_safe_push(memory_read_result_s
, (*result
), &r
);
2049 free_memory_read_result_vector (void *x
)
2051 VEC(memory_read_result_s
) *v
= x
;
2052 memory_read_result_s
*current
;
2055 for (ix
= 0; VEC_iterate (memory_read_result_s
, v
, ix
, current
); ++ix
)
2057 xfree (current
->data
);
2059 VEC_free (memory_read_result_s
, v
);
2062 VEC(memory_read_result_s
) *
2063 read_memory_robust (struct target_ops
*ops
, ULONGEST offset
, LONGEST len
)
2065 VEC(memory_read_result_s
) *result
= 0;
2068 while (xfered
< len
)
2070 struct mem_region
*region
= lookup_mem_region (offset
+ xfered
);
2073 /* If there is no explicit region, a fake one should be created. */
2074 gdb_assert (region
);
2076 if (region
->hi
== 0)
2077 rlen
= len
- xfered
;
2079 rlen
= region
->hi
- offset
;
2081 if (region
->attrib
.mode
== MEM_NONE
|| region
->attrib
.mode
== MEM_WO
)
2083 /* Cannot read this region. Note that we can end up here only
2084 if the region is explicitly marked inaccessible, or
2085 'inaccessible-by-default' is in effect. */
2090 LONGEST to_read
= min (len
- xfered
, rlen
);
2091 gdb_byte
*buffer
= (gdb_byte
*)xmalloc (to_read
);
2093 LONGEST xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2094 (gdb_byte
*) buffer
,
2095 offset
+ xfered
, to_read
);
2096 /* Call an observer, notifying them of the xfer progress? */
2099 /* Got an error reading full chunk. See if maybe we can read
2102 read_whatever_is_readable (ops
, offset
+ xfered
,
2103 offset
+ xfered
+ to_read
, &result
);
2108 struct memory_read_result r
;
2110 r
.begin
= offset
+ xfered
;
2111 r
.end
= r
.begin
+ xfer
;
2112 VEC_safe_push (memory_read_result_s
, result
, &r
);
2122 /* An alternative to target_write with progress callbacks. */
2125 target_write_with_progress (struct target_ops
*ops
,
2126 enum target_object object
,
2127 const char *annex
, const gdb_byte
*buf
,
2128 ULONGEST offset
, LONGEST len
,
2129 void (*progress
) (ULONGEST
, void *), void *baton
)
2133 /* Give the progress callback a chance to set up. */
2135 (*progress
) (0, baton
);
2137 while (xfered
< len
)
2139 ULONGEST xfered_len
;
2140 enum target_xfer_status status
;
2142 status
= target_write_partial (ops
, object
, annex
,
2143 (gdb_byte
*) buf
+ xfered
,
2144 offset
+ xfered
, len
- xfered
,
2147 if (status
== TARGET_XFER_EOF
)
2149 if (TARGET_XFER_STATUS_ERROR_P (status
))
2152 gdb_assert (status
== TARGET_XFER_OK
);
2154 (*progress
) (xfered_len
, baton
);
2156 xfered
+= xfered_len
;
2162 /* For docs on target_write see target.h. */
2165 target_write (struct target_ops
*ops
,
2166 enum target_object object
,
2167 const char *annex
, const gdb_byte
*buf
,
2168 ULONGEST offset
, LONGEST len
)
2170 return target_write_with_progress (ops
, object
, annex
, buf
, offset
, len
,
2174 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2175 the size of the transferred data. PADDING additional bytes are
2176 available in *BUF_P. This is a helper function for
2177 target_read_alloc; see the declaration of that function for more
2181 target_read_alloc_1 (struct target_ops
*ops
, enum target_object object
,
2182 const char *annex
, gdb_byte
**buf_p
, int padding
)
2184 size_t buf_alloc
, buf_pos
;
2187 /* This function does not have a length parameter; it reads the
2188 entire OBJECT). Also, it doesn't support objects fetched partly
2189 from one target and partly from another (in a different stratum,
2190 e.g. a core file and an executable). Both reasons make it
2191 unsuitable for reading memory. */
2192 gdb_assert (object
!= TARGET_OBJECT_MEMORY
);
2194 /* Start by reading up to 4K at a time. The target will throttle
2195 this number down if necessary. */
2197 buf
= xmalloc (buf_alloc
);
2201 ULONGEST xfered_len
;
2202 enum target_xfer_status status
;
2204 status
= target_read_partial (ops
, object
, annex
, &buf
[buf_pos
],
2205 buf_pos
, buf_alloc
- buf_pos
- padding
,
2208 if (status
== TARGET_XFER_EOF
)
2210 /* Read all there was. */
2217 else if (status
!= TARGET_XFER_OK
)
2219 /* An error occurred. */
2221 return TARGET_XFER_E_IO
;
2224 buf_pos
+= xfered_len
;
2226 /* If the buffer is filling up, expand it. */
2227 if (buf_alloc
< buf_pos
* 2)
2230 buf
= xrealloc (buf
, buf_alloc
);
2237 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2238 the size of the transferred data. See the declaration in "target.h"
2239 function for more information about the return value. */
2242 target_read_alloc (struct target_ops
*ops
, enum target_object object
,
2243 const char *annex
, gdb_byte
**buf_p
)
2245 return target_read_alloc_1 (ops
, object
, annex
, buf_p
, 0);
2248 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2249 returned as a string, allocated using xmalloc. If an error occurs
2250 or the transfer is unsupported, NULL is returned. Empty objects
2251 are returned as allocated but empty strings. A warning is issued
2252 if the result contains any embedded NUL bytes. */
2255 target_read_stralloc (struct target_ops
*ops
, enum target_object object
,
2260 LONGEST i
, transferred
;
2262 transferred
= target_read_alloc_1 (ops
, object
, annex
, &buffer
, 1);
2263 bufstr
= (char *) buffer
;
2265 if (transferred
< 0)
2268 if (transferred
== 0)
2269 return xstrdup ("");
2271 bufstr
[transferred
] = 0;
2273 /* Check for embedded NUL bytes; but allow trailing NULs. */
2274 for (i
= strlen (bufstr
); i
< transferred
; i
++)
2277 warning (_("target object %d, annex %s, "
2278 "contained unexpected null characters"),
2279 (int) object
, annex
? annex
: "(none)");
2286 /* Memory transfer methods. */
2289 get_target_memory (struct target_ops
*ops
, CORE_ADDR addr
, gdb_byte
*buf
,
2292 /* This method is used to read from an alternate, non-current
2293 target. This read must bypass the overlay support (as symbols
2294 don't match this target), and GDB's internal cache (wrong cache
2295 for this target). */
2296 if (target_read (ops
, TARGET_OBJECT_RAW_MEMORY
, NULL
, buf
, addr
, len
)
2298 memory_error (TARGET_XFER_E_IO
, addr
);
2302 get_target_memory_unsigned (struct target_ops
*ops
, CORE_ADDR addr
,
2303 int len
, enum bfd_endian byte_order
)
2305 gdb_byte buf
[sizeof (ULONGEST
)];
2307 gdb_assert (len
<= sizeof (buf
));
2308 get_target_memory (ops
, addr
, buf
, len
);
2309 return extract_unsigned_integer (buf
, len
, byte_order
);
2315 target_insert_breakpoint (struct gdbarch
*gdbarch
,
2316 struct bp_target_info
*bp_tgt
)
2318 if (!may_insert_breakpoints
)
2320 warning (_("May not insert breakpoints"));
2324 return current_target
.to_insert_breakpoint (¤t_target
,
2331 target_remove_breakpoint (struct gdbarch
*gdbarch
,
2332 struct bp_target_info
*bp_tgt
)
2334 /* This is kind of a weird case to handle, but the permission might
2335 have been changed after breakpoints were inserted - in which case
2336 we should just take the user literally and assume that any
2337 breakpoints should be left in place. */
2338 if (!may_insert_breakpoints
)
2340 warning (_("May not remove breakpoints"));
2344 return current_target
.to_remove_breakpoint (¤t_target
,
2349 target_info (char *args
, int from_tty
)
2351 struct target_ops
*t
;
2352 int has_all_mem
= 0;
2354 if (symfile_objfile
!= NULL
)
2355 printf_unfiltered (_("Symbols from \"%s\".\n"),
2356 objfile_name (symfile_objfile
));
2358 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
2360 if (!(*t
->to_has_memory
) (t
))
2363 if ((int) (t
->to_stratum
) <= (int) dummy_stratum
)
2366 printf_unfiltered (_("\tWhile running this, "
2367 "GDB does not access memory from...\n"));
2368 printf_unfiltered ("%s:\n", t
->to_longname
);
2369 (t
->to_files_info
) (t
);
2370 has_all_mem
= (*t
->to_has_all_memory
) (t
);
2374 /* This function is called before any new inferior is created, e.g.
2375 by running a program, attaching, or connecting to a target.
2376 It cleans up any state from previous invocations which might
2377 change between runs. This is a subset of what target_preopen
2378 resets (things which might change between targets). */
2381 target_pre_inferior (int from_tty
)
2383 /* Clear out solib state. Otherwise the solib state of the previous
2384 inferior might have survived and is entirely wrong for the new
2385 target. This has been observed on GNU/Linux using glibc 2.3. How
2397 Cannot access memory at address 0xdeadbeef
2400 /* In some OSs, the shared library list is the same/global/shared
2401 across inferiors. If code is shared between processes, so are
2402 memory regions and features. */
2403 if (!gdbarch_has_global_solist (target_gdbarch ()))
2405 no_shared_libraries (NULL
, from_tty
);
2407 invalidate_target_mem_regions ();
2409 target_clear_description ();
2412 agent_capability_invalidate ();
2415 /* Callback for iterate_over_inferiors. Gets rid of the given
2419 dispose_inferior (struct inferior
*inf
, void *args
)
2421 struct thread_info
*thread
;
2423 thread
= any_thread_of_process (inf
->pid
);
2426 switch_to_thread (thread
->ptid
);
2428 /* Core inferiors actually should be detached, not killed. */
2429 if (target_has_execution
)
2432 target_detach (NULL
, 0);
2438 /* This is to be called by the open routine before it does
2442 target_preopen (int from_tty
)
2446 if (have_inferiors ())
2449 || !have_live_inferiors ()
2450 || query (_("A program is being debugged already. Kill it? ")))
2451 iterate_over_inferiors (dispose_inferior
, NULL
);
2453 error (_("Program not killed."));
2456 /* Calling target_kill may remove the target from the stack. But if
2457 it doesn't (which seems like a win for UDI), remove it now. */
2458 /* Leave the exec target, though. The user may be switching from a
2459 live process to a core of the same program. */
2460 pop_all_targets_above (file_stratum
);
2462 target_pre_inferior (from_tty
);
2465 /* Detach a target after doing deferred register stores. */
2468 target_detach (const char *args
, int from_tty
)
2470 struct target_ops
* t
;
2472 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2473 /* Don't remove global breakpoints here. They're removed on
2474 disconnection from the target. */
2477 /* If we're in breakpoints-always-inserted mode, have to remove
2478 them before detaching. */
2479 remove_breakpoints_pid (ptid_get_pid (inferior_ptid
));
2481 prepare_for_detach ();
2483 current_target
.to_detach (¤t_target
, args
, from_tty
);
2485 fprintf_unfiltered (gdb_stdlog
, "target_detach (%s, %d)\n",
2490 target_disconnect (char *args
, int from_tty
)
2492 struct target_ops
*t
;
2494 /* If we're in breakpoints-always-inserted mode or if breakpoints
2495 are global across processes, we have to remove them before
2497 remove_breakpoints ();
2499 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2500 if (t
->to_disconnect
!= NULL
)
2503 fprintf_unfiltered (gdb_stdlog
, "target_disconnect (%s, %d)\n",
2505 t
->to_disconnect (t
, args
, from_tty
);
2513 target_wait (ptid_t ptid
, struct target_waitstatus
*status
, int options
)
2515 struct target_ops
*t
;
2516 ptid_t retval
= (current_target
.to_wait
) (¤t_target
, ptid
,
2521 char *status_string
;
2522 char *options_string
;
2524 status_string
= target_waitstatus_to_string (status
);
2525 options_string
= target_options_to_string (options
);
2526 fprintf_unfiltered (gdb_stdlog
,
2527 "target_wait (%d, status, options={%s})"
2529 ptid_get_pid (ptid
), options_string
,
2530 ptid_get_pid (retval
), status_string
);
2531 xfree (status_string
);
2532 xfree (options_string
);
2539 target_pid_to_str (ptid_t ptid
)
2541 struct target_ops
*t
;
2543 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2545 if (t
->to_pid_to_str
!= NULL
)
2546 return (*t
->to_pid_to_str
) (t
, ptid
);
2549 return normal_pid_to_str (ptid
);
2553 target_thread_name (struct thread_info
*info
)
2555 return current_target
.to_thread_name (¤t_target
, info
);
2559 target_resume (ptid_t ptid
, int step
, enum gdb_signal signal
)
2561 struct target_ops
*t
;
2563 target_dcache_invalidate ();
2565 current_target
.to_resume (¤t_target
, ptid
, step
, signal
);
2567 fprintf_unfiltered (gdb_stdlog
, "target_resume (%d, %s, %s)\n",
2568 ptid_get_pid (ptid
),
2569 step
? "step" : "continue",
2570 gdb_signal_to_name (signal
));
2572 registers_changed_ptid (ptid
);
2573 set_executing (ptid
, 1);
2574 set_running (ptid
, 1);
2575 clear_inline_frame_state (ptid
);
2579 target_pass_signals (int numsigs
, unsigned char *pass_signals
)
2581 struct target_ops
*t
;
2583 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2585 if (t
->to_pass_signals
!= NULL
)
2591 fprintf_unfiltered (gdb_stdlog
, "target_pass_signals (%d, {",
2594 for (i
= 0; i
< numsigs
; i
++)
2595 if (pass_signals
[i
])
2596 fprintf_unfiltered (gdb_stdlog
, " %s",
2597 gdb_signal_to_name (i
));
2599 fprintf_unfiltered (gdb_stdlog
, " })\n");
2602 (*t
->to_pass_signals
) (t
, numsigs
, pass_signals
);
2609 target_program_signals (int numsigs
, unsigned char *program_signals
)
2611 struct target_ops
*t
;
2613 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2615 if (t
->to_program_signals
!= NULL
)
2621 fprintf_unfiltered (gdb_stdlog
, "target_program_signals (%d, {",
2624 for (i
= 0; i
< numsigs
; i
++)
2625 if (program_signals
[i
])
2626 fprintf_unfiltered (gdb_stdlog
, " %s",
2627 gdb_signal_to_name (i
));
2629 fprintf_unfiltered (gdb_stdlog
, " })\n");
2632 (*t
->to_program_signals
) (t
, numsigs
, program_signals
);
2639 default_follow_fork (struct target_ops
*self
, int follow_child
,
2642 /* Some target returned a fork event, but did not know how to follow it. */
2643 internal_error (__FILE__
, __LINE__
,
2644 _("could not find a target to follow fork"));
2647 /* Look through the list of possible targets for a target that can
2651 target_follow_fork (int follow_child
, int detach_fork
)
2653 int retval
= current_target
.to_follow_fork (¤t_target
,
2654 follow_child
, detach_fork
);
2657 fprintf_unfiltered (gdb_stdlog
,
2658 "target_follow_fork (%d, %d) = %d\n",
2659 follow_child
, detach_fork
, retval
);
2664 target_mourn_inferior (void)
2666 struct target_ops
*t
;
2668 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2670 if (t
->to_mourn_inferior
!= NULL
)
2672 t
->to_mourn_inferior (t
);
2674 fprintf_unfiltered (gdb_stdlog
, "target_mourn_inferior ()\n");
2676 /* We no longer need to keep handles on any of the object files.
2677 Make sure to release them to avoid unnecessarily locking any
2678 of them while we're not actually debugging. */
2679 bfd_cache_close_all ();
2685 internal_error (__FILE__
, __LINE__
,
2686 _("could not find a target to follow mourn inferior"));
2689 /* Look for a target which can describe architectural features, starting
2690 from TARGET. If we find one, return its description. */
2692 const struct target_desc
*
2693 target_read_description (struct target_ops
*target
)
2695 struct target_ops
*t
;
2697 for (t
= target
; t
!= NULL
; t
= t
->beneath
)
2698 if (t
->to_read_description
!= NULL
)
2700 const struct target_desc
*tdesc
;
2702 tdesc
= t
->to_read_description (t
);
2710 /* The default implementation of to_search_memory.
2711 This implements a basic search of memory, reading target memory and
2712 performing the search here (as opposed to performing the search in on the
2713 target side with, for example, gdbserver). */
2716 simple_search_memory (struct target_ops
*ops
,
2717 CORE_ADDR start_addr
, ULONGEST search_space_len
,
2718 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2719 CORE_ADDR
*found_addrp
)
2721 /* NOTE: also defined in find.c testcase. */
2722 #define SEARCH_CHUNK_SIZE 16000
2723 const unsigned chunk_size
= SEARCH_CHUNK_SIZE
;
2724 /* Buffer to hold memory contents for searching. */
2725 gdb_byte
*search_buf
;
2726 unsigned search_buf_size
;
2727 struct cleanup
*old_cleanups
;
2729 search_buf_size
= chunk_size
+ pattern_len
- 1;
2731 /* No point in trying to allocate a buffer larger than the search space. */
2732 if (search_space_len
< search_buf_size
)
2733 search_buf_size
= search_space_len
;
2735 search_buf
= malloc (search_buf_size
);
2736 if (search_buf
== NULL
)
2737 error (_("Unable to allocate memory to perform the search."));
2738 old_cleanups
= make_cleanup (free_current_contents
, &search_buf
);
2740 /* Prime the search buffer. */
2742 if (target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2743 search_buf
, start_addr
, search_buf_size
) != search_buf_size
)
2745 warning (_("Unable to access %s bytes of target "
2746 "memory at %s, halting search."),
2747 pulongest (search_buf_size
), hex_string (start_addr
));
2748 do_cleanups (old_cleanups
);
2752 /* Perform the search.
2754 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2755 When we've scanned N bytes we copy the trailing bytes to the start and
2756 read in another N bytes. */
2758 while (search_space_len
>= pattern_len
)
2760 gdb_byte
*found_ptr
;
2761 unsigned nr_search_bytes
= min (search_space_len
, search_buf_size
);
2763 found_ptr
= memmem (search_buf
, nr_search_bytes
,
2764 pattern
, pattern_len
);
2766 if (found_ptr
!= NULL
)
2768 CORE_ADDR found_addr
= start_addr
+ (found_ptr
- search_buf
);
2770 *found_addrp
= found_addr
;
2771 do_cleanups (old_cleanups
);
2775 /* Not found in this chunk, skip to next chunk. */
2777 /* Don't let search_space_len wrap here, it's unsigned. */
2778 if (search_space_len
>= chunk_size
)
2779 search_space_len
-= chunk_size
;
2781 search_space_len
= 0;
2783 if (search_space_len
>= pattern_len
)
2785 unsigned keep_len
= search_buf_size
- chunk_size
;
2786 CORE_ADDR read_addr
= start_addr
+ chunk_size
+ keep_len
;
2789 /* Copy the trailing part of the previous iteration to the front
2790 of the buffer for the next iteration. */
2791 gdb_assert (keep_len
== pattern_len
- 1);
2792 memcpy (search_buf
, search_buf
+ chunk_size
, keep_len
);
2794 nr_to_read
= min (search_space_len
- keep_len
, chunk_size
);
2796 if (target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2797 search_buf
+ keep_len
, read_addr
,
2798 nr_to_read
) != nr_to_read
)
2800 warning (_("Unable to access %s bytes of target "
2801 "memory at %s, halting search."),
2802 plongest (nr_to_read
),
2803 hex_string (read_addr
));
2804 do_cleanups (old_cleanups
);
2808 start_addr
+= chunk_size
;
2814 do_cleanups (old_cleanups
);
2818 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2819 sequence of bytes in PATTERN with length PATTERN_LEN.
2821 The result is 1 if found, 0 if not found, and -1 if there was an error
2822 requiring halting of the search (e.g. memory read error).
2823 If the pattern is found the address is recorded in FOUND_ADDRP. */
2826 target_search_memory (CORE_ADDR start_addr
, ULONGEST search_space_len
,
2827 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2828 CORE_ADDR
*found_addrp
)
2830 struct target_ops
*t
;
2833 /* We don't use INHERIT to set current_target.to_search_memory,
2834 so we have to scan the target stack and handle targetdebug
2838 fprintf_unfiltered (gdb_stdlog
, "target_search_memory (%s, ...)\n",
2839 hex_string (start_addr
));
2841 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2842 if (t
->to_search_memory
!= NULL
)
2847 found
= t
->to_search_memory (t
, start_addr
, search_space_len
,
2848 pattern
, pattern_len
, found_addrp
);
2852 /* If a special version of to_search_memory isn't available, use the
2854 found
= simple_search_memory (current_target
.beneath
,
2855 start_addr
, search_space_len
,
2856 pattern
, pattern_len
, found_addrp
);
2860 fprintf_unfiltered (gdb_stdlog
, " = %d\n", found
);
2865 /* Look through the currently pushed targets. If none of them will
2866 be able to restart the currently running process, issue an error
2870 target_require_runnable (void)
2872 struct target_ops
*t
;
2874 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
2876 /* If this target knows how to create a new program, then
2877 assume we will still be able to after killing the current
2878 one. Either killing and mourning will not pop T, or else
2879 find_default_run_target will find it again. */
2880 if (t
->to_create_inferior
!= NULL
)
2883 /* Do not worry about thread_stratum targets that can not
2884 create inferiors. Assume they will be pushed again if
2885 necessary, and continue to the process_stratum. */
2886 if (t
->to_stratum
== thread_stratum
2887 || t
->to_stratum
== arch_stratum
)
2890 error (_("The \"%s\" target does not support \"run\". "
2891 "Try \"help target\" or \"continue\"."),
2895 /* This function is only called if the target is running. In that
2896 case there should have been a process_stratum target and it
2897 should either know how to create inferiors, or not... */
2898 internal_error (__FILE__
, __LINE__
, _("No targets found"));
2901 /* Look through the list of possible targets for a target that can
2902 execute a run or attach command without any other data. This is
2903 used to locate the default process stratum.
2905 If DO_MESG is not NULL, the result is always valid (error() is
2906 called for errors); else, return NULL on error. */
2908 static struct target_ops
*
2909 find_default_run_target (char *do_mesg
)
2911 struct target_ops
**t
;
2912 struct target_ops
*runable
= NULL
;
2917 for (t
= target_structs
; t
< target_structs
+ target_struct_size
;
2920 if ((*t
)->to_can_run
&& target_can_run (*t
))
2930 error (_("Don't know how to %s. Try \"help target\"."), do_mesg
);
2939 find_default_attach (struct target_ops
*ops
, char *args
, int from_tty
)
2941 struct target_ops
*t
;
2943 t
= find_default_run_target ("attach");
2944 (t
->to_attach
) (t
, args
, from_tty
);
2949 find_default_create_inferior (struct target_ops
*ops
,
2950 char *exec_file
, char *allargs
, char **env
,
2953 struct target_ops
*t
;
2955 t
= find_default_run_target ("run");
2956 (t
->to_create_inferior
) (t
, exec_file
, allargs
, env
, from_tty
);
2961 find_default_can_async_p (struct target_ops
*ignore
)
2963 struct target_ops
*t
;
2965 /* This may be called before the target is pushed on the stack;
2966 look for the default process stratum. If there's none, gdb isn't
2967 configured with a native debugger, and target remote isn't
2969 t
= find_default_run_target (NULL
);
2970 if (t
&& t
->to_can_async_p
!= delegate_can_async_p
)
2971 return (t
->to_can_async_p
) (t
);
2976 find_default_is_async_p (struct target_ops
*ignore
)
2978 struct target_ops
*t
;
2980 /* This may be called before the target is pushed on the stack;
2981 look for the default process stratum. If there's none, gdb isn't
2982 configured with a native debugger, and target remote isn't
2984 t
= find_default_run_target (NULL
);
2985 if (t
&& t
->to_is_async_p
!= delegate_is_async_p
)
2986 return (t
->to_is_async_p
) (t
);
2991 find_default_supports_non_stop (struct target_ops
*self
)
2993 struct target_ops
*t
;
2995 t
= find_default_run_target (NULL
);
2996 if (t
&& t
->to_supports_non_stop
)
2997 return (t
->to_supports_non_stop
) (t
);
3002 target_supports_non_stop (void)
3004 struct target_ops
*t
;
3006 for (t
= ¤t_target
; t
!= NULL
; t
= t
->beneath
)
3007 if (t
->to_supports_non_stop
)
3008 return t
->to_supports_non_stop (t
);
3013 /* Implement the "info proc" command. */
3016 target_info_proc (char *args
, enum info_proc_what what
)
3018 struct target_ops
*t
;
3020 /* If we're already connected to something that can get us OS
3021 related data, use it. Otherwise, try using the native
3023 if (current_target
.to_stratum
>= process_stratum
)
3024 t
= current_target
.beneath
;
3026 t
= find_default_run_target (NULL
);
3028 for (; t
!= NULL
; t
= t
->beneath
)
3030 if (t
->to_info_proc
!= NULL
)
3032 t
->to_info_proc (t
, args
, what
);
3035 fprintf_unfiltered (gdb_stdlog
,
3036 "target_info_proc (\"%s\", %d)\n", args
, what
);
3046 find_default_supports_disable_randomization (struct target_ops
*self
)
3048 struct target_ops
*t
;
3050 t
= find_default_run_target (NULL
);
3051 if (t
&& t
->to_supports_disable_randomization
)
3052 return (t
->to_supports_disable_randomization
) (t
);
3057 target_supports_disable_randomization (void)
3059 struct target_ops
*t
;
3061 for (t
= ¤t_target
; t
!= NULL
; t
= t
->beneath
)
3062 if (t
->to_supports_disable_randomization
)
3063 return t
->to_supports_disable_randomization (t
);
3069 target_get_osdata (const char *type
)
3071 struct target_ops
*t
;
3073 /* If we're already connected to something that can get us OS
3074 related data, use it. Otherwise, try using the native
3076 if (current_target
.to_stratum
>= process_stratum
)
3077 t
= current_target
.beneath
;
3079 t
= find_default_run_target ("get OS data");
3084 return target_read_stralloc (t
, TARGET_OBJECT_OSDATA
, type
);
3087 /* Determine the current address space of thread PTID. */
3089 struct address_space
*
3090 target_thread_address_space (ptid_t ptid
)
3092 struct address_space
*aspace
;
3093 struct inferior
*inf
;
3094 struct target_ops
*t
;
3096 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3098 if (t
->to_thread_address_space
!= NULL
)
3100 aspace
= t
->to_thread_address_space (t
, ptid
);
3101 gdb_assert (aspace
);
3104 fprintf_unfiltered (gdb_stdlog
,
3105 "target_thread_address_space (%s) = %d\n",
3106 target_pid_to_str (ptid
),
3107 address_space_num (aspace
));
3112 /* Fall-back to the "main" address space of the inferior. */
3113 inf
= find_inferior_pid (ptid_get_pid (ptid
));
3115 if (inf
== NULL
|| inf
->aspace
== NULL
)
3116 internal_error (__FILE__
, __LINE__
,
3117 _("Can't determine the current "
3118 "address space of thread %s\n"),
3119 target_pid_to_str (ptid
));
3125 /* Target file operations. */
3127 static struct target_ops
*
3128 default_fileio_target (void)
3130 /* If we're already connected to something that can perform
3131 file I/O, use it. Otherwise, try using the native target. */
3132 if (current_target
.to_stratum
>= process_stratum
)
3133 return current_target
.beneath
;
3135 return find_default_run_target ("file I/O");
3138 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3139 target file descriptor, or -1 if an error occurs (and set
3142 target_fileio_open (const char *filename
, int flags
, int mode
,
3145 struct target_ops
*t
;
3147 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3149 if (t
->to_fileio_open
!= NULL
)
3151 int fd
= t
->to_fileio_open (t
, filename
, flags
, mode
, target_errno
);
3154 fprintf_unfiltered (gdb_stdlog
,
3155 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3156 filename
, flags
, mode
,
3157 fd
, fd
!= -1 ? 0 : *target_errno
);
3162 *target_errno
= FILEIO_ENOSYS
;
3166 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3167 Return the number of bytes written, or -1 if an error occurs
3168 (and set *TARGET_ERRNO). */
3170 target_fileio_pwrite (int fd
, const gdb_byte
*write_buf
, int len
,
3171 ULONGEST offset
, int *target_errno
)
3173 struct target_ops
*t
;
3175 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3177 if (t
->to_fileio_pwrite
!= NULL
)
3179 int ret
= t
->to_fileio_pwrite (t
, fd
, write_buf
, len
, offset
,
3183 fprintf_unfiltered (gdb_stdlog
,
3184 "target_fileio_pwrite (%d,...,%d,%s) "
3186 fd
, len
, pulongest (offset
),
3187 ret
, ret
!= -1 ? 0 : *target_errno
);
3192 *target_errno
= FILEIO_ENOSYS
;
3196 /* Read up to LEN bytes FD on the target into READ_BUF.
3197 Return the number of bytes read, or -1 if an error occurs
3198 (and set *TARGET_ERRNO). */
3200 target_fileio_pread (int fd
, gdb_byte
*read_buf
, int len
,
3201 ULONGEST offset
, int *target_errno
)
3203 struct target_ops
*t
;
3205 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3207 if (t
->to_fileio_pread
!= NULL
)
3209 int ret
= t
->to_fileio_pread (t
, fd
, read_buf
, len
, offset
,
3213 fprintf_unfiltered (gdb_stdlog
,
3214 "target_fileio_pread (%d,...,%d,%s) "
3216 fd
, len
, pulongest (offset
),
3217 ret
, ret
!= -1 ? 0 : *target_errno
);
3222 *target_errno
= FILEIO_ENOSYS
;
3226 /* Close FD on the target. Return 0, or -1 if an error occurs
3227 (and set *TARGET_ERRNO). */
3229 target_fileio_close (int fd
, int *target_errno
)
3231 struct target_ops
*t
;
3233 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3235 if (t
->to_fileio_close
!= NULL
)
3237 int ret
= t
->to_fileio_close (t
, fd
, target_errno
);
3240 fprintf_unfiltered (gdb_stdlog
,
3241 "target_fileio_close (%d) = %d (%d)\n",
3242 fd
, ret
, ret
!= -1 ? 0 : *target_errno
);
3247 *target_errno
= FILEIO_ENOSYS
;
3251 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3252 occurs (and set *TARGET_ERRNO). */
3254 target_fileio_unlink (const char *filename
, int *target_errno
)
3256 struct target_ops
*t
;
3258 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3260 if (t
->to_fileio_unlink
!= NULL
)
3262 int ret
= t
->to_fileio_unlink (t
, filename
, target_errno
);
3265 fprintf_unfiltered (gdb_stdlog
,
3266 "target_fileio_unlink (%s) = %d (%d)\n",
3267 filename
, ret
, ret
!= -1 ? 0 : *target_errno
);
3272 *target_errno
= FILEIO_ENOSYS
;
3276 /* Read value of symbolic link FILENAME on the target. Return a
3277 null-terminated string allocated via xmalloc, or NULL if an error
3278 occurs (and set *TARGET_ERRNO). */
3280 target_fileio_readlink (const char *filename
, int *target_errno
)
3282 struct target_ops
*t
;
3284 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3286 if (t
->to_fileio_readlink
!= NULL
)
3288 char *ret
= t
->to_fileio_readlink (t
, filename
, target_errno
);
3291 fprintf_unfiltered (gdb_stdlog
,
3292 "target_fileio_readlink (%s) = %s (%d)\n",
3293 filename
, ret
? ret
: "(nil)",
3294 ret
? 0 : *target_errno
);
3299 *target_errno
= FILEIO_ENOSYS
;
3304 target_fileio_close_cleanup (void *opaque
)
3306 int fd
= *(int *) opaque
;
3309 target_fileio_close (fd
, &target_errno
);
3312 /* Read target file FILENAME. Store the result in *BUF_P and
3313 return the size of the transferred data. PADDING additional bytes are
3314 available in *BUF_P. This is a helper function for
3315 target_fileio_read_alloc; see the declaration of that function for more
3319 target_fileio_read_alloc_1 (const char *filename
,
3320 gdb_byte
**buf_p
, int padding
)
3322 struct cleanup
*close_cleanup
;
3323 size_t buf_alloc
, buf_pos
;
3329 fd
= target_fileio_open (filename
, FILEIO_O_RDONLY
, 0700, &target_errno
);
3333 close_cleanup
= make_cleanup (target_fileio_close_cleanup
, &fd
);
3335 /* Start by reading up to 4K at a time. The target will throttle
3336 this number down if necessary. */
3338 buf
= xmalloc (buf_alloc
);
3342 n
= target_fileio_pread (fd
, &buf
[buf_pos
],
3343 buf_alloc
- buf_pos
- padding
, buf_pos
,
3347 /* An error occurred. */
3348 do_cleanups (close_cleanup
);
3354 /* Read all there was. */
3355 do_cleanups (close_cleanup
);
3365 /* If the buffer is filling up, expand it. */
3366 if (buf_alloc
< buf_pos
* 2)
3369 buf
= xrealloc (buf
, buf_alloc
);
3376 /* Read target file FILENAME. Store the result in *BUF_P and return
3377 the size of the transferred data. See the declaration in "target.h"
3378 function for more information about the return value. */
3381 target_fileio_read_alloc (const char *filename
, gdb_byte
**buf_p
)
3383 return target_fileio_read_alloc_1 (filename
, buf_p
, 0);
3386 /* Read target file FILENAME. The result is NUL-terminated and
3387 returned as a string, allocated using xmalloc. If an error occurs
3388 or the transfer is unsupported, NULL is returned. Empty objects
3389 are returned as allocated but empty strings. A warning is issued
3390 if the result contains any embedded NUL bytes. */
3393 target_fileio_read_stralloc (const char *filename
)
3397 LONGEST i
, transferred
;
3399 transferred
= target_fileio_read_alloc_1 (filename
, &buffer
, 1);
3400 bufstr
= (char *) buffer
;
3402 if (transferred
< 0)
3405 if (transferred
== 0)
3406 return xstrdup ("");
3408 bufstr
[transferred
] = 0;
3410 /* Check for embedded NUL bytes; but allow trailing NULs. */
3411 for (i
= strlen (bufstr
); i
< transferred
; i
++)
3414 warning (_("target file %s "
3415 "contained unexpected null characters"),
3425 default_region_ok_for_hw_watchpoint (struct target_ops
*self
,
3426 CORE_ADDR addr
, int len
)
3428 return (len
<= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT
);
3432 default_watchpoint_addr_within_range (struct target_ops
*target
,
3434 CORE_ADDR start
, int length
)
3436 return addr
>= start
&& addr
< start
+ length
;
3439 static struct gdbarch
*
3440 default_thread_architecture (struct target_ops
*ops
, ptid_t ptid
)
3442 return target_gdbarch ();
3452 * Find the next target down the stack from the specified target.
3456 find_target_beneath (struct target_ops
*t
)
3464 find_target_at (enum strata stratum
)
3466 struct target_ops
*t
;
3468 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3469 if (t
->to_stratum
== stratum
)
3476 /* The inferior process has died. Long live the inferior! */
3479 generic_mourn_inferior (void)
3483 ptid
= inferior_ptid
;
3484 inferior_ptid
= null_ptid
;
3486 /* Mark breakpoints uninserted in case something tries to delete a
3487 breakpoint while we delete the inferior's threads (which would
3488 fail, since the inferior is long gone). */
3489 mark_breakpoints_out ();
3491 if (!ptid_equal (ptid
, null_ptid
))
3493 int pid
= ptid_get_pid (ptid
);
3494 exit_inferior (pid
);
3497 /* Note this wipes step-resume breakpoints, so needs to be done
3498 after exit_inferior, which ends up referencing the step-resume
3499 breakpoints through clear_thread_inferior_resources. */
3500 breakpoint_init_inferior (inf_exited
);
3502 registers_changed ();
3504 reopen_exec_file ();
3505 reinit_frame_cache ();
3507 if (deprecated_detach_hook
)
3508 deprecated_detach_hook ();
3511 /* Convert a normal process ID to a string. Returns the string in a
3515 normal_pid_to_str (ptid_t ptid
)
3517 static char buf
[32];
3519 xsnprintf (buf
, sizeof buf
, "process %d", ptid_get_pid (ptid
));
3524 dummy_pid_to_str (struct target_ops
*ops
, ptid_t ptid
)
3526 return normal_pid_to_str (ptid
);
3529 /* Error-catcher for target_find_memory_regions. */
3531 dummy_find_memory_regions (struct target_ops
*self
,
3532 find_memory_region_ftype ignore1
, void *ignore2
)
3534 error (_("Command not implemented for this target."));
3538 /* Error-catcher for target_make_corefile_notes. */
3540 dummy_make_corefile_notes (struct target_ops
*self
,
3541 bfd
*ignore1
, int *ignore2
)
3543 error (_("Command not implemented for this target."));
3547 /* Set up the handful of non-empty slots needed by the dummy target
3551 init_dummy_target (void)
3553 dummy_target
.to_shortname
= "None";
3554 dummy_target
.to_longname
= "None";
3555 dummy_target
.to_doc
= "";
3556 dummy_target
.to_create_inferior
= find_default_create_inferior
;
3557 dummy_target
.to_supports_non_stop
= find_default_supports_non_stop
;
3558 dummy_target
.to_supports_disable_randomization
3559 = find_default_supports_disable_randomization
;
3560 dummy_target
.to_pid_to_str
= dummy_pid_to_str
;
3561 dummy_target
.to_stratum
= dummy_stratum
;
3562 dummy_target
.to_has_all_memory
= (int (*) (struct target_ops
*)) return_zero
;
3563 dummy_target
.to_has_memory
= (int (*) (struct target_ops
*)) return_zero
;
3564 dummy_target
.to_has_stack
= (int (*) (struct target_ops
*)) return_zero
;
3565 dummy_target
.to_has_registers
= (int (*) (struct target_ops
*)) return_zero
;
3566 dummy_target
.to_has_execution
3567 = (int (*) (struct target_ops
*, ptid_t
)) return_zero
;
3568 dummy_target
.to_magic
= OPS_MAGIC
;
3570 install_dummy_methods (&dummy_target
);
3574 debug_to_open (char *args
, int from_tty
)
3576 debug_target
.to_open (args
, from_tty
);
3578 fprintf_unfiltered (gdb_stdlog
, "target_open (%s, %d)\n", args
, from_tty
);
3582 target_close (struct target_ops
*targ
)
3584 gdb_assert (!target_is_pushed (targ
));
3586 if (targ
->to_xclose
!= NULL
)
3587 targ
->to_xclose (targ
);
3588 else if (targ
->to_close
!= NULL
)
3589 targ
->to_close (targ
);
3592 fprintf_unfiltered (gdb_stdlog
, "target_close ()\n");
3596 target_attach (char *args
, int from_tty
)
3598 current_target
.to_attach (¤t_target
, args
, from_tty
);
3600 fprintf_unfiltered (gdb_stdlog
, "target_attach (%s, %d)\n",
3605 target_thread_alive (ptid_t ptid
)
3607 struct target_ops
*t
;
3609 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3611 if (t
->to_thread_alive
!= NULL
)
3615 retval
= t
->to_thread_alive (t
, ptid
);
3617 fprintf_unfiltered (gdb_stdlog
, "target_thread_alive (%d) = %d\n",
3618 ptid_get_pid (ptid
), retval
);
3628 target_find_new_threads (void)
3630 struct target_ops
*t
;
3632 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3634 if (t
->to_find_new_threads
!= NULL
)
3636 t
->to_find_new_threads (t
);
3638 fprintf_unfiltered (gdb_stdlog
, "target_find_new_threads ()\n");
3646 target_stop (ptid_t ptid
)
3650 warning (_("May not interrupt or stop the target, ignoring attempt"));
3654 (*current_target
.to_stop
) (¤t_target
, ptid
);
3658 debug_to_post_attach (struct target_ops
*self
, int pid
)
3660 debug_target
.to_post_attach (&debug_target
, pid
);
3662 fprintf_unfiltered (gdb_stdlog
, "target_post_attach (%d)\n", pid
);
3665 /* Concatenate ELEM to LIST, a comma separate list, and return the
3666 result. The LIST incoming argument is released. */
3669 str_comma_list_concat_elem (char *list
, const char *elem
)
3672 return xstrdup (elem
);
3674 return reconcat (list
, list
, ", ", elem
, (char *) NULL
);
3677 /* Helper for target_options_to_string. If OPT is present in
3678 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3679 Returns the new resulting string. OPT is removed from
3683 do_option (int *target_options
, char *ret
,
3684 int opt
, char *opt_str
)
3686 if ((*target_options
& opt
) != 0)
3688 ret
= str_comma_list_concat_elem (ret
, opt_str
);
3689 *target_options
&= ~opt
;
3696 target_options_to_string (int target_options
)
3700 #define DO_TARG_OPTION(OPT) \
3701 ret = do_option (&target_options, ret, OPT, #OPT)
3703 DO_TARG_OPTION (TARGET_WNOHANG
);
3705 if (target_options
!= 0)
3706 ret
= str_comma_list_concat_elem (ret
, "unknown???");
3714 debug_print_register (const char * func
,
3715 struct regcache
*regcache
, int regno
)
3717 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
3719 fprintf_unfiltered (gdb_stdlog
, "%s ", func
);
3720 if (regno
>= 0 && regno
< gdbarch_num_regs (gdbarch
)
3721 && gdbarch_register_name (gdbarch
, regno
) != NULL
3722 && gdbarch_register_name (gdbarch
, regno
)[0] != '\0')
3723 fprintf_unfiltered (gdb_stdlog
, "(%s)",
3724 gdbarch_register_name (gdbarch
, regno
));
3726 fprintf_unfiltered (gdb_stdlog
, "(%d)", regno
);
3727 if (regno
>= 0 && regno
< gdbarch_num_regs (gdbarch
))
3729 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
3730 int i
, size
= register_size (gdbarch
, regno
);
3731 gdb_byte buf
[MAX_REGISTER_SIZE
];
3733 regcache_raw_collect (regcache
, regno
, buf
);
3734 fprintf_unfiltered (gdb_stdlog
, " = ");
3735 for (i
= 0; i
< size
; i
++)
3737 fprintf_unfiltered (gdb_stdlog
, "%02x", buf
[i
]);
3739 if (size
<= sizeof (LONGEST
))
3741 ULONGEST val
= extract_unsigned_integer (buf
, size
, byte_order
);
3743 fprintf_unfiltered (gdb_stdlog
, " %s %s",
3744 core_addr_to_string_nz (val
), plongest (val
));
3747 fprintf_unfiltered (gdb_stdlog
, "\n");
3751 target_fetch_registers (struct regcache
*regcache
, int regno
)
3753 current_target
.to_fetch_registers (¤t_target
, regcache
, regno
);
3755 debug_print_register ("target_fetch_registers", regcache
, regno
);
3759 target_store_registers (struct regcache
*regcache
, int regno
)
3761 struct target_ops
*t
;
3763 if (!may_write_registers
)
3764 error (_("Writing to registers is not allowed (regno %d)"), regno
);
3766 current_target
.to_store_registers (¤t_target
, regcache
, regno
);
3769 debug_print_register ("target_store_registers", regcache
, regno
);
3774 target_core_of_thread (ptid_t ptid
)
3776 struct target_ops
*t
;
3778 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3780 if (t
->to_core_of_thread
!= NULL
)
3782 int retval
= t
->to_core_of_thread (t
, ptid
);
3785 fprintf_unfiltered (gdb_stdlog
,
3786 "target_core_of_thread (%d) = %d\n",
3787 ptid_get_pid (ptid
), retval
);
3796 target_verify_memory (const gdb_byte
*data
, CORE_ADDR memaddr
, ULONGEST size
)
3798 struct target_ops
*t
;
3800 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3802 if (t
->to_verify_memory
!= NULL
)
3804 int retval
= t
->to_verify_memory (t
, data
, memaddr
, size
);
3807 fprintf_unfiltered (gdb_stdlog
,
3808 "target_verify_memory (%s, %s) = %d\n",
3809 paddress (target_gdbarch (), memaddr
),
3819 /* The documentation for this function is in its prototype declaration in
3823 target_insert_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
, int rw
)
3827 ret
= current_target
.to_insert_mask_watchpoint (¤t_target
,
3831 fprintf_unfiltered (gdb_stdlog
, "\
3832 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3833 core_addr_to_string (addr
),
3834 core_addr_to_string (mask
), rw
, ret
);
3839 /* The documentation for this function is in its prototype declaration in
3843 target_remove_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
, int rw
)
3847 ret
= current_target
.to_remove_mask_watchpoint (¤t_target
,
3851 fprintf_unfiltered (gdb_stdlog
, "\
3852 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3853 core_addr_to_string (addr
),
3854 core_addr_to_string (mask
), rw
, ret
);
3859 /* The documentation for this function is in its prototype declaration
3863 target_masked_watch_num_registers (CORE_ADDR addr
, CORE_ADDR mask
)
3865 return current_target
.to_masked_watch_num_registers (¤t_target
,
3869 /* The documentation for this function is in its prototype declaration
3873 target_ranged_break_num_registers (void)
3875 return current_target
.to_ranged_break_num_registers (¤t_target
);
3880 struct btrace_target_info
*
3881 target_enable_btrace (ptid_t ptid
)
3883 struct target_ops
*t
;
3885 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3886 if (t
->to_enable_btrace
!= NULL
)
3887 return t
->to_enable_btrace (t
, ptid
);
3896 target_disable_btrace (struct btrace_target_info
*btinfo
)
3898 struct target_ops
*t
;
3900 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3901 if (t
->to_disable_btrace
!= NULL
)
3903 t
->to_disable_btrace (t
, btinfo
);
3913 target_teardown_btrace (struct btrace_target_info
*btinfo
)
3915 struct target_ops
*t
;
3917 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3918 if (t
->to_teardown_btrace
!= NULL
)
3920 t
->to_teardown_btrace (t
, btinfo
);
3930 target_read_btrace (VEC (btrace_block_s
) **btrace
,
3931 struct btrace_target_info
*btinfo
,
3932 enum btrace_read_type type
)
3934 struct target_ops
*t
;
3936 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3937 if (t
->to_read_btrace
!= NULL
)
3938 return t
->to_read_btrace (t
, btrace
, btinfo
, type
);
3941 return BTRACE_ERR_NOT_SUPPORTED
;
3947 target_stop_recording (void)
3949 struct target_ops
*t
;
3951 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3952 if (t
->to_stop_recording
!= NULL
)
3954 t
->to_stop_recording (t
);
3958 /* This is optional. */
3964 target_info_record (void)
3966 struct target_ops
*t
;
3968 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3969 if (t
->to_info_record
!= NULL
)
3971 t
->to_info_record (t
);
3981 target_save_record (const char *filename
)
3983 struct target_ops
*t
;
3985 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3986 if (t
->to_save_record
!= NULL
)
3988 t
->to_save_record (t
, filename
);
3998 target_supports_delete_record (void)
4000 struct target_ops
*t
;
4002 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4003 if (t
->to_delete_record
!= NULL
)
4012 target_delete_record (void)
4014 struct target_ops
*t
;
4016 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4017 if (t
->to_delete_record
!= NULL
)
4019 t
->to_delete_record (t
);
4029 target_record_is_replaying (void)
4031 struct target_ops
*t
;
4033 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4034 if (t
->to_record_is_replaying
!= NULL
)
4035 return t
->to_record_is_replaying (t
);
4043 target_goto_record_begin (void)
4045 struct target_ops
*t
;
4047 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4048 if (t
->to_goto_record_begin
!= NULL
)
4050 t
->to_goto_record_begin (t
);
4060 target_goto_record_end (void)
4062 struct target_ops
*t
;
4064 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4065 if (t
->to_goto_record_end
!= NULL
)
4067 t
->to_goto_record_end (t
);
4077 target_goto_record (ULONGEST insn
)
4079 struct target_ops
*t
;
4081 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4082 if (t
->to_goto_record
!= NULL
)
4084 t
->to_goto_record (t
, insn
);
4094 target_insn_history (int size
, int flags
)
4096 struct target_ops
*t
;
4098 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4099 if (t
->to_insn_history
!= NULL
)
4101 t
->to_insn_history (t
, size
, flags
);
4111 target_insn_history_from (ULONGEST from
, int size
, int flags
)
4113 struct target_ops
*t
;
4115 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4116 if (t
->to_insn_history_from
!= NULL
)
4118 t
->to_insn_history_from (t
, from
, size
, flags
);
4128 target_insn_history_range (ULONGEST begin
, ULONGEST end
, int flags
)
4130 struct target_ops
*t
;
4132 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4133 if (t
->to_insn_history_range
!= NULL
)
4135 t
->to_insn_history_range (t
, begin
, end
, flags
);
4145 target_call_history (int size
, int flags
)
4147 struct target_ops
*t
;
4149 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4150 if (t
->to_call_history
!= NULL
)
4152 t
->to_call_history (t
, size
, flags
);
4162 target_call_history_from (ULONGEST begin
, int size
, int flags
)
4164 struct target_ops
*t
;
4166 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4167 if (t
->to_call_history_from
!= NULL
)
4169 t
->to_call_history_from (t
, begin
, size
, flags
);
4179 target_call_history_range (ULONGEST begin
, ULONGEST end
, int flags
)
4181 struct target_ops
*t
;
4183 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4184 if (t
->to_call_history_range
!= NULL
)
4186 t
->to_call_history_range (t
, begin
, end
, flags
);
4194 debug_to_prepare_to_store (struct target_ops
*self
, struct regcache
*regcache
)
4196 debug_target
.to_prepare_to_store (&debug_target
, regcache
);
4198 fprintf_unfiltered (gdb_stdlog
, "target_prepare_to_store ()\n");
4203 const struct frame_unwind
*
4204 target_get_unwinder (void)
4206 struct target_ops
*t
;
4208 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4209 if (t
->to_get_unwinder
!= NULL
)
4210 return t
->to_get_unwinder
;
4217 const struct frame_unwind
*
4218 target_get_tailcall_unwinder (void)
4220 struct target_ops
*t
;
4222 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4223 if (t
->to_get_tailcall_unwinder
!= NULL
)
4224 return t
->to_get_tailcall_unwinder
;
4232 forward_target_decr_pc_after_break (struct target_ops
*ops
,
4233 struct gdbarch
*gdbarch
)
4235 for (; ops
!= NULL
; ops
= ops
->beneath
)
4236 if (ops
->to_decr_pc_after_break
!= NULL
)
4237 return ops
->to_decr_pc_after_break (ops
, gdbarch
);
4239 return gdbarch_decr_pc_after_break (gdbarch
);
4245 target_decr_pc_after_break (struct gdbarch
*gdbarch
)
4247 return forward_target_decr_pc_after_break (current_target
.beneath
, gdbarch
);
4251 deprecated_debug_xfer_memory (CORE_ADDR memaddr
, bfd_byte
*myaddr
, int len
,
4252 int write
, struct mem_attrib
*attrib
,
4253 struct target_ops
*target
)
4257 retval
= debug_target
.deprecated_xfer_memory (memaddr
, myaddr
, len
, write
,
4260 fprintf_unfiltered (gdb_stdlog
,
4261 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4262 paddress (target_gdbarch (), memaddr
), len
,
4263 write
? "write" : "read", retval
);
4269 fputs_unfiltered (", bytes =", gdb_stdlog
);
4270 for (i
= 0; i
< retval
; i
++)
4272 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
4274 if (targetdebug
< 2 && i
> 0)
4276 fprintf_unfiltered (gdb_stdlog
, " ...");
4279 fprintf_unfiltered (gdb_stdlog
, "\n");
4282 fprintf_unfiltered (gdb_stdlog
, " %02x", myaddr
[i
] & 0xff);
4286 fputc_unfiltered ('\n', gdb_stdlog
);
4292 debug_to_files_info (struct target_ops
*target
)
4294 debug_target
.to_files_info (target
);
4296 fprintf_unfiltered (gdb_stdlog
, "target_files_info (xxx)\n");
4300 debug_to_insert_breakpoint (struct target_ops
*ops
, struct gdbarch
*gdbarch
,
4301 struct bp_target_info
*bp_tgt
)
4305 retval
= debug_target
.to_insert_breakpoint (&debug_target
, gdbarch
, bp_tgt
);
4307 fprintf_unfiltered (gdb_stdlog
,
4308 "target_insert_breakpoint (%s, xxx) = %ld\n",
4309 core_addr_to_string (bp_tgt
->placed_address
),
4310 (unsigned long) retval
);
4315 debug_to_remove_breakpoint (struct target_ops
*ops
, struct gdbarch
*gdbarch
,
4316 struct bp_target_info
*bp_tgt
)
4320 retval
= debug_target
.to_remove_breakpoint (&debug_target
, gdbarch
, bp_tgt
);
4322 fprintf_unfiltered (gdb_stdlog
,
4323 "target_remove_breakpoint (%s, xxx) = %ld\n",
4324 core_addr_to_string (bp_tgt
->placed_address
),
4325 (unsigned long) retval
);
4330 debug_to_can_use_hw_breakpoint (struct target_ops
*self
,
4331 int type
, int cnt
, int from_tty
)
4335 retval
= debug_target
.to_can_use_hw_breakpoint (&debug_target
,
4336 type
, cnt
, from_tty
);
4338 fprintf_unfiltered (gdb_stdlog
,
4339 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4340 (unsigned long) type
,
4341 (unsigned long) cnt
,
4342 (unsigned long) from_tty
,
4343 (unsigned long) retval
);
4348 debug_to_region_ok_for_hw_watchpoint (struct target_ops
*self
,
4349 CORE_ADDR addr
, int len
)
4353 retval
= debug_target
.to_region_ok_for_hw_watchpoint (&debug_target
,
4356 fprintf_unfiltered (gdb_stdlog
,
4357 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4358 core_addr_to_string (addr
), (unsigned long) len
,
4359 core_addr_to_string (retval
));
4364 debug_to_can_accel_watchpoint_condition (struct target_ops
*self
,
4365 CORE_ADDR addr
, int len
, int rw
,
4366 struct expression
*cond
)
4370 retval
= debug_target
.to_can_accel_watchpoint_condition (&debug_target
,
4374 fprintf_unfiltered (gdb_stdlog
,
4375 "target_can_accel_watchpoint_condition "
4376 "(%s, %d, %d, %s) = %ld\n",
4377 core_addr_to_string (addr
), len
, rw
,
4378 host_address_to_string (cond
), (unsigned long) retval
);
4383 debug_to_stopped_by_watchpoint (struct target_ops
*ops
)
4387 retval
= debug_target
.to_stopped_by_watchpoint (&debug_target
);
4389 fprintf_unfiltered (gdb_stdlog
,
4390 "target_stopped_by_watchpoint () = %ld\n",
4391 (unsigned long) retval
);
4396 debug_to_stopped_data_address (struct target_ops
*target
, CORE_ADDR
*addr
)
4400 retval
= debug_target
.to_stopped_data_address (target
, addr
);
4402 fprintf_unfiltered (gdb_stdlog
,
4403 "target_stopped_data_address ([%s]) = %ld\n",
4404 core_addr_to_string (*addr
),
4405 (unsigned long)retval
);
4410 debug_to_watchpoint_addr_within_range (struct target_ops
*target
,
4412 CORE_ADDR start
, int length
)
4416 retval
= debug_target
.to_watchpoint_addr_within_range (target
, addr
,
4419 fprintf_filtered (gdb_stdlog
,
4420 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4421 core_addr_to_string (addr
), core_addr_to_string (start
),
4427 debug_to_insert_hw_breakpoint (struct target_ops
*self
,
4428 struct gdbarch
*gdbarch
,
4429 struct bp_target_info
*bp_tgt
)
4433 retval
= debug_target
.to_insert_hw_breakpoint (&debug_target
,
4436 fprintf_unfiltered (gdb_stdlog
,
4437 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4438 core_addr_to_string (bp_tgt
->placed_address
),
4439 (unsigned long) retval
);
4444 debug_to_remove_hw_breakpoint (struct target_ops
*self
,
4445 struct gdbarch
*gdbarch
,
4446 struct bp_target_info
*bp_tgt
)
4450 retval
= debug_target
.to_remove_hw_breakpoint (&debug_target
,
4453 fprintf_unfiltered (gdb_stdlog
,
4454 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4455 core_addr_to_string (bp_tgt
->placed_address
),
4456 (unsigned long) retval
);
4461 debug_to_insert_watchpoint (struct target_ops
*self
,
4462 CORE_ADDR addr
, int len
, int type
,
4463 struct expression
*cond
)
4467 retval
= debug_target
.to_insert_watchpoint (&debug_target
,
4468 addr
, len
, type
, cond
);
4470 fprintf_unfiltered (gdb_stdlog
,
4471 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4472 core_addr_to_string (addr
), len
, type
,
4473 host_address_to_string (cond
), (unsigned long) retval
);
4478 debug_to_remove_watchpoint (struct target_ops
*self
,
4479 CORE_ADDR addr
, int len
, int type
,
4480 struct expression
*cond
)
4484 retval
= debug_target
.to_remove_watchpoint (&debug_target
,
4485 addr
, len
, type
, cond
);
4487 fprintf_unfiltered (gdb_stdlog
,
4488 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4489 core_addr_to_string (addr
), len
, type
,
4490 host_address_to_string (cond
), (unsigned long) retval
);
4495 debug_to_terminal_init (struct target_ops
*self
)
4497 debug_target
.to_terminal_init (&debug_target
);
4499 fprintf_unfiltered (gdb_stdlog
, "target_terminal_init ()\n");
4503 debug_to_terminal_inferior (struct target_ops
*self
)
4505 debug_target
.to_terminal_inferior (&debug_target
);
4507 fprintf_unfiltered (gdb_stdlog
, "target_terminal_inferior ()\n");
4511 debug_to_terminal_ours_for_output (struct target_ops
*self
)
4513 debug_target
.to_terminal_ours_for_output (&debug_target
);
4515 fprintf_unfiltered (gdb_stdlog
, "target_terminal_ours_for_output ()\n");
4519 debug_to_terminal_ours (struct target_ops
*self
)
4521 debug_target
.to_terminal_ours (&debug_target
);
4523 fprintf_unfiltered (gdb_stdlog
, "target_terminal_ours ()\n");
4527 debug_to_terminal_save_ours (struct target_ops
*self
)
4529 debug_target
.to_terminal_save_ours (&debug_target
);
4531 fprintf_unfiltered (gdb_stdlog
, "target_terminal_save_ours ()\n");
4535 debug_to_terminal_info (struct target_ops
*self
,
4536 const char *arg
, int from_tty
)
4538 debug_target
.to_terminal_info (&debug_target
, arg
, from_tty
);
4540 fprintf_unfiltered (gdb_stdlog
, "target_terminal_info (%s, %d)\n", arg
,
4545 debug_to_load (struct target_ops
*self
, char *args
, int from_tty
)
4547 debug_target
.to_load (&debug_target
, args
, from_tty
);
4549 fprintf_unfiltered (gdb_stdlog
, "target_load (%s, %d)\n", args
, from_tty
);
4553 debug_to_post_startup_inferior (struct target_ops
*self
, ptid_t ptid
)
4555 debug_target
.to_post_startup_inferior (&debug_target
, ptid
);
4557 fprintf_unfiltered (gdb_stdlog
, "target_post_startup_inferior (%d)\n",
4558 ptid_get_pid (ptid
));
4562 debug_to_insert_fork_catchpoint (struct target_ops
*self
, int pid
)
4566 retval
= debug_target
.to_insert_fork_catchpoint (&debug_target
, pid
);
4568 fprintf_unfiltered (gdb_stdlog
, "target_insert_fork_catchpoint (%d) = %d\n",
4575 debug_to_remove_fork_catchpoint (struct target_ops
*self
, int pid
)
4579 retval
= debug_target
.to_remove_fork_catchpoint (&debug_target
, pid
);
4581 fprintf_unfiltered (gdb_stdlog
, "target_remove_fork_catchpoint (%d) = %d\n",
4588 debug_to_insert_vfork_catchpoint (struct target_ops
*self
, int pid
)
4592 retval
= debug_target
.to_insert_vfork_catchpoint (&debug_target
, pid
);
4594 fprintf_unfiltered (gdb_stdlog
, "target_insert_vfork_catchpoint (%d) = %d\n",
4601 debug_to_remove_vfork_catchpoint (struct target_ops
*self
, int pid
)
4605 retval
= debug_target
.to_remove_vfork_catchpoint (&debug_target
, pid
);
4607 fprintf_unfiltered (gdb_stdlog
, "target_remove_vfork_catchpoint (%d) = %d\n",
4614 debug_to_insert_exec_catchpoint (struct target_ops
*self
, int pid
)
4618 retval
= debug_target
.to_insert_exec_catchpoint (&debug_target
, pid
);
4620 fprintf_unfiltered (gdb_stdlog
, "target_insert_exec_catchpoint (%d) = %d\n",
4627 debug_to_remove_exec_catchpoint (struct target_ops
*self
, int pid
)
4631 retval
= debug_target
.to_remove_exec_catchpoint (&debug_target
, pid
);
4633 fprintf_unfiltered (gdb_stdlog
, "target_remove_exec_catchpoint (%d) = %d\n",
4640 debug_to_has_exited (struct target_ops
*self
,
4641 int pid
, int wait_status
, int *exit_status
)
4645 has_exited
= debug_target
.to_has_exited (&debug_target
,
4646 pid
, wait_status
, exit_status
);
4648 fprintf_unfiltered (gdb_stdlog
, "target_has_exited (%d, %d, %d) = %d\n",
4649 pid
, wait_status
, *exit_status
, has_exited
);
4655 debug_to_can_run (struct target_ops
*self
)
4659 retval
= debug_target
.to_can_run (&debug_target
);
4661 fprintf_unfiltered (gdb_stdlog
, "target_can_run () = %d\n", retval
);
4666 static struct gdbarch
*
4667 debug_to_thread_architecture (struct target_ops
*ops
, ptid_t ptid
)
4669 struct gdbarch
*retval
;
4671 retval
= debug_target
.to_thread_architecture (ops
, ptid
);
4673 fprintf_unfiltered (gdb_stdlog
,
4674 "target_thread_architecture (%s) = %s [%s]\n",
4675 target_pid_to_str (ptid
),
4676 host_address_to_string (retval
),
4677 gdbarch_bfd_arch_info (retval
)->printable_name
);
4682 debug_to_stop (struct target_ops
*self
, ptid_t ptid
)
4684 debug_target
.to_stop (&debug_target
, ptid
);
4686 fprintf_unfiltered (gdb_stdlog
, "target_stop (%s)\n",
4687 target_pid_to_str (ptid
));
4691 debug_to_rcmd (struct target_ops
*self
, char *command
,
4692 struct ui_file
*outbuf
)
4694 debug_target
.to_rcmd (&debug_target
, command
, outbuf
);
4695 fprintf_unfiltered (gdb_stdlog
, "target_rcmd (%s, ...)\n", command
);
4699 debug_to_pid_to_exec_file (struct target_ops
*self
, int pid
)
4703 exec_file
= debug_target
.to_pid_to_exec_file (&debug_target
, pid
);
4705 fprintf_unfiltered (gdb_stdlog
, "target_pid_to_exec_file (%d) = %s\n",
4712 setup_target_debug (void)
4714 memcpy (&debug_target
, ¤t_target
, sizeof debug_target
);
4716 current_target
.to_open
= debug_to_open
;
4717 current_target
.to_post_attach
= debug_to_post_attach
;
4718 current_target
.to_prepare_to_store
= debug_to_prepare_to_store
;
4719 current_target
.deprecated_xfer_memory
= deprecated_debug_xfer_memory
;
4720 current_target
.to_files_info
= debug_to_files_info
;
4721 current_target
.to_insert_breakpoint
= debug_to_insert_breakpoint
;
4722 current_target
.to_remove_breakpoint
= debug_to_remove_breakpoint
;
4723 current_target
.to_can_use_hw_breakpoint
= debug_to_can_use_hw_breakpoint
;
4724 current_target
.to_insert_hw_breakpoint
= debug_to_insert_hw_breakpoint
;
4725 current_target
.to_remove_hw_breakpoint
= debug_to_remove_hw_breakpoint
;
4726 current_target
.to_insert_watchpoint
= debug_to_insert_watchpoint
;
4727 current_target
.to_remove_watchpoint
= debug_to_remove_watchpoint
;
4728 current_target
.to_stopped_by_watchpoint
= debug_to_stopped_by_watchpoint
;
4729 current_target
.to_stopped_data_address
= debug_to_stopped_data_address
;
4730 current_target
.to_watchpoint_addr_within_range
4731 = debug_to_watchpoint_addr_within_range
;
4732 current_target
.to_region_ok_for_hw_watchpoint
4733 = debug_to_region_ok_for_hw_watchpoint
;
4734 current_target
.to_can_accel_watchpoint_condition
4735 = debug_to_can_accel_watchpoint_condition
;
4736 current_target
.to_terminal_init
= debug_to_terminal_init
;
4737 current_target
.to_terminal_inferior
= debug_to_terminal_inferior
;
4738 current_target
.to_terminal_ours_for_output
4739 = debug_to_terminal_ours_for_output
;
4740 current_target
.to_terminal_ours
= debug_to_terminal_ours
;
4741 current_target
.to_terminal_save_ours
= debug_to_terminal_save_ours
;
4742 current_target
.to_terminal_info
= debug_to_terminal_info
;
4743 current_target
.to_load
= debug_to_load
;
4744 current_target
.to_post_startup_inferior
= debug_to_post_startup_inferior
;
4745 current_target
.to_insert_fork_catchpoint
= debug_to_insert_fork_catchpoint
;
4746 current_target
.to_remove_fork_catchpoint
= debug_to_remove_fork_catchpoint
;
4747 current_target
.to_insert_vfork_catchpoint
= debug_to_insert_vfork_catchpoint
;
4748 current_target
.to_remove_vfork_catchpoint
= debug_to_remove_vfork_catchpoint
;
4749 current_target
.to_insert_exec_catchpoint
= debug_to_insert_exec_catchpoint
;
4750 current_target
.to_remove_exec_catchpoint
= debug_to_remove_exec_catchpoint
;
4751 current_target
.to_has_exited
= debug_to_has_exited
;
4752 current_target
.to_can_run
= debug_to_can_run
;
4753 current_target
.to_stop
= debug_to_stop
;
4754 current_target
.to_rcmd
= debug_to_rcmd
;
4755 current_target
.to_pid_to_exec_file
= debug_to_pid_to_exec_file
;
4756 current_target
.to_thread_architecture
= debug_to_thread_architecture
;
4760 static char targ_desc
[] =
4761 "Names of targets and files being debugged.\nShows the entire \
4762 stack of targets currently in use (including the exec-file,\n\
4763 core-file, and process, if any), as well as the symbol file name.";
4766 default_rcmd (struct target_ops
*self
, char *command
, struct ui_file
*output
)
4768 error (_("\"monitor\" command not supported by this target."));
4772 do_monitor_command (char *cmd
,
4775 target_rcmd (cmd
, gdb_stdtarg
);
4778 /* Print the name of each layers of our target stack. */
4781 maintenance_print_target_stack (char *cmd
, int from_tty
)
4783 struct target_ops
*t
;
4785 printf_filtered (_("The current target stack is:\n"));
4787 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
4789 printf_filtered (" - %s (%s)\n", t
->to_shortname
, t
->to_longname
);
4793 /* Controls if async mode is permitted. */
4794 int target_async_permitted
= 0;
4796 /* The set command writes to this variable. If the inferior is
4797 executing, target_async_permitted is *not* updated. */
4798 static int target_async_permitted_1
= 0;
4801 set_target_async_command (char *args
, int from_tty
,
4802 struct cmd_list_element
*c
)
4804 if (have_live_inferiors ())
4806 target_async_permitted_1
= target_async_permitted
;
4807 error (_("Cannot change this setting while the inferior is running."));
4810 target_async_permitted
= target_async_permitted_1
;
4814 show_target_async_command (struct ui_file
*file
, int from_tty
,
4815 struct cmd_list_element
*c
,
4818 fprintf_filtered (file
,
4819 _("Controlling the inferior in "
4820 "asynchronous mode is %s.\n"), value
);
4823 /* Temporary copies of permission settings. */
4825 static int may_write_registers_1
= 1;
4826 static int may_write_memory_1
= 1;
4827 static int may_insert_breakpoints_1
= 1;
4828 static int may_insert_tracepoints_1
= 1;
4829 static int may_insert_fast_tracepoints_1
= 1;
4830 static int may_stop_1
= 1;
4832 /* Make the user-set values match the real values again. */
4835 update_target_permissions (void)
4837 may_write_registers_1
= may_write_registers
;
4838 may_write_memory_1
= may_write_memory
;
4839 may_insert_breakpoints_1
= may_insert_breakpoints
;
4840 may_insert_tracepoints_1
= may_insert_tracepoints
;
4841 may_insert_fast_tracepoints_1
= may_insert_fast_tracepoints
;
4842 may_stop_1
= may_stop
;
4845 /* The one function handles (most of) the permission flags in the same
4849 set_target_permissions (char *args
, int from_tty
,
4850 struct cmd_list_element
*c
)
4852 if (target_has_execution
)
4854 update_target_permissions ();
4855 error (_("Cannot change this setting while the inferior is running."));
4858 /* Make the real values match the user-changed values. */
4859 may_write_registers
= may_write_registers_1
;
4860 may_insert_breakpoints
= may_insert_breakpoints_1
;
4861 may_insert_tracepoints
= may_insert_tracepoints_1
;
4862 may_insert_fast_tracepoints
= may_insert_fast_tracepoints_1
;
4863 may_stop
= may_stop_1
;
4864 update_observer_mode ();
4867 /* Set memory write permission independently of observer mode. */
4870 set_write_memory_permission (char *args
, int from_tty
,
4871 struct cmd_list_element
*c
)
4873 /* Make the real values match the user-changed values. */
4874 may_write_memory
= may_write_memory_1
;
4875 update_observer_mode ();
4880 initialize_targets (void)
4882 init_dummy_target ();
4883 push_target (&dummy_target
);
4885 add_info ("target", target_info
, targ_desc
);
4886 add_info ("files", target_info
, targ_desc
);
4888 add_setshow_zuinteger_cmd ("target", class_maintenance
, &targetdebug
, _("\
4889 Set target debugging."), _("\
4890 Show target debugging."), _("\
4891 When non-zero, target debugging is enabled. Higher numbers are more\n\
4892 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4896 &setdebuglist
, &showdebuglist
);
4898 add_setshow_boolean_cmd ("trust-readonly-sections", class_support
,
4899 &trust_readonly
, _("\
4900 Set mode for reading from readonly sections."), _("\
4901 Show mode for reading from readonly sections."), _("\
4902 When this mode is on, memory reads from readonly sections (such as .text)\n\
4903 will be read from the object file instead of from the target. This will\n\
4904 result in significant performance improvement for remote targets."),
4906 show_trust_readonly
,
4907 &setlist
, &showlist
);
4909 add_com ("monitor", class_obscure
, do_monitor_command
,
4910 _("Send a command to the remote monitor (remote targets only)."));
4912 add_cmd ("target-stack", class_maintenance
, maintenance_print_target_stack
,
4913 _("Print the name of each layer of the internal target stack."),
4914 &maintenanceprintlist
);
4916 add_setshow_boolean_cmd ("target-async", no_class
,
4917 &target_async_permitted_1
, _("\
4918 Set whether gdb controls the inferior in asynchronous mode."), _("\
4919 Show whether gdb controls the inferior in asynchronous mode."), _("\
4920 Tells gdb whether to control the inferior in asynchronous mode."),
4921 set_target_async_command
,
4922 show_target_async_command
,
4926 add_setshow_boolean_cmd ("may-write-registers", class_support
,
4927 &may_write_registers_1
, _("\
4928 Set permission to write into registers."), _("\
4929 Show permission to write into registers."), _("\
4930 When this permission is on, GDB may write into the target's registers.\n\
4931 Otherwise, any sort of write attempt will result in an error."),
4932 set_target_permissions
, NULL
,
4933 &setlist
, &showlist
);
4935 add_setshow_boolean_cmd ("may-write-memory", class_support
,
4936 &may_write_memory_1
, _("\
4937 Set permission to write into target memory."), _("\
4938 Show permission to write into target memory."), _("\
4939 When this permission is on, GDB may write into the target's memory.\n\
4940 Otherwise, any sort of write attempt will result in an error."),
4941 set_write_memory_permission
, NULL
,
4942 &setlist
, &showlist
);
4944 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support
,
4945 &may_insert_breakpoints_1
, _("\
4946 Set permission to insert breakpoints in the target."), _("\
4947 Show permission to insert breakpoints in the target."), _("\
4948 When this permission is on, GDB may insert breakpoints in the program.\n\
4949 Otherwise, any sort of insertion attempt will result in an error."),
4950 set_target_permissions
, NULL
,
4951 &setlist
, &showlist
);
4953 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support
,
4954 &may_insert_tracepoints_1
, _("\
4955 Set permission to insert tracepoints in the target."), _("\
4956 Show permission to insert tracepoints in the target."), _("\
4957 When this permission is on, GDB may insert tracepoints in the program.\n\
4958 Otherwise, any sort of insertion attempt will result in an error."),
4959 set_target_permissions
, NULL
,
4960 &setlist
, &showlist
);
4962 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support
,
4963 &may_insert_fast_tracepoints_1
, _("\
4964 Set permission to insert fast tracepoints in the target."), _("\
4965 Show permission to insert fast tracepoints in the target."), _("\
4966 When this permission is on, GDB may insert fast tracepoints.\n\
4967 Otherwise, any sort of insertion attempt will result in an error."),
4968 set_target_permissions
, NULL
,
4969 &setlist
, &showlist
);
4971 add_setshow_boolean_cmd ("may-interrupt", class_support
,
4973 Set permission to interrupt or signal the target."), _("\
4974 Show permission to interrupt or signal the target."), _("\
4975 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4976 Otherwise, any attempt to interrupt or stop will be ignored."),
4977 set_target_permissions
, NULL
,
4978 &setlist
, &showlist
);