convert to_can_run_breakpoint_commands
[binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (struct target_ops *, const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
59
60 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
61 long lwp, long tid);
62
63 static void tcomplain (void) ATTRIBUTE_NORETURN;
64
65 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
66
67 static int return_zero (void);
68
69 void target_ignore (void);
70
71 static void target_command (char *, int);
72
73 static struct target_ops *find_default_run_target (char *);
74
75 static target_xfer_partial_ftype default_xfer_partial;
76
77 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
78 ptid_t ptid);
79
80 static int dummy_find_memory_regions (struct target_ops *self,
81 find_memory_region_ftype ignore1,
82 void *ignore2);
83
84 static char *dummy_make_corefile_notes (struct target_ops *self,
85 bfd *ignore1, int *ignore2);
86
87 static int find_default_can_async_p (struct target_ops *ignore);
88
89 static int find_default_is_async_p (struct target_ops *ignore);
90
91 static enum exec_direction_kind default_execution_direction
92 (struct target_ops *self);
93
94 #include "target-delegates.c"
95
96 static void init_dummy_target (void);
97
98 static struct target_ops debug_target;
99
100 static void debug_to_open (char *, int);
101
102 static void debug_to_prepare_to_store (struct target_ops *self,
103 struct regcache *);
104
105 static void debug_to_files_info (struct target_ops *);
106
107 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
108 struct bp_target_info *);
109
110 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
111 struct bp_target_info *);
112
113 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
114 int, int, int);
115
116 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
117 struct gdbarch *,
118 struct bp_target_info *);
119
120 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
121 struct gdbarch *,
122 struct bp_target_info *);
123
124 static int debug_to_insert_watchpoint (struct target_ops *self,
125 CORE_ADDR, int, int,
126 struct expression *);
127
128 static int debug_to_remove_watchpoint (struct target_ops *self,
129 CORE_ADDR, int, int,
130 struct expression *);
131
132 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
133
134 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
135 CORE_ADDR, CORE_ADDR, int);
136
137 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
138 CORE_ADDR, int);
139
140 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
141 CORE_ADDR, int, int,
142 struct expression *);
143
144 static void debug_to_terminal_init (struct target_ops *self);
145
146 static void debug_to_terminal_inferior (struct target_ops *self);
147
148 static void debug_to_terminal_ours_for_output (struct target_ops *self);
149
150 static void debug_to_terminal_save_ours (struct target_ops *self);
151
152 static void debug_to_terminal_ours (struct target_ops *self);
153
154 static void debug_to_load (struct target_ops *self, char *, int);
155
156 static int debug_to_can_run (struct target_ops *self);
157
158 static void debug_to_stop (struct target_ops *self, ptid_t);
159
160 /* Pointer to array of target architecture structures; the size of the
161 array; the current index into the array; the allocated size of the
162 array. */
163 struct target_ops **target_structs;
164 unsigned target_struct_size;
165 unsigned target_struct_allocsize;
166 #define DEFAULT_ALLOCSIZE 10
167
168 /* The initial current target, so that there is always a semi-valid
169 current target. */
170
171 static struct target_ops dummy_target;
172
173 /* Top of target stack. */
174
175 static struct target_ops *target_stack;
176
177 /* The target structure we are currently using to talk to a process
178 or file or whatever "inferior" we have. */
179
180 struct target_ops current_target;
181
182 /* Command list for target. */
183
184 static struct cmd_list_element *targetlist = NULL;
185
186 /* Nonzero if we should trust readonly sections from the
187 executable when reading memory. */
188
189 static int trust_readonly = 0;
190
191 /* Nonzero if we should show true memory content including
192 memory breakpoint inserted by gdb. */
193
194 static int show_memory_breakpoints = 0;
195
196 /* These globals control whether GDB attempts to perform these
197 operations; they are useful for targets that need to prevent
198 inadvertant disruption, such as in non-stop mode. */
199
200 int may_write_registers = 1;
201
202 int may_write_memory = 1;
203
204 int may_insert_breakpoints = 1;
205
206 int may_insert_tracepoints = 1;
207
208 int may_insert_fast_tracepoints = 1;
209
210 int may_stop = 1;
211
212 /* Non-zero if we want to see trace of target level stuff. */
213
214 static unsigned int targetdebug = 0;
215 static void
216 show_targetdebug (struct ui_file *file, int from_tty,
217 struct cmd_list_element *c, const char *value)
218 {
219 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
220 }
221
222 static void setup_target_debug (void);
223
224 /* The user just typed 'target' without the name of a target. */
225
226 static void
227 target_command (char *arg, int from_tty)
228 {
229 fputs_filtered ("Argument required (target name). Try `help target'\n",
230 gdb_stdout);
231 }
232
233 /* Default target_has_* methods for process_stratum targets. */
234
235 int
236 default_child_has_all_memory (struct target_ops *ops)
237 {
238 /* If no inferior selected, then we can't read memory here. */
239 if (ptid_equal (inferior_ptid, null_ptid))
240 return 0;
241
242 return 1;
243 }
244
245 int
246 default_child_has_memory (struct target_ops *ops)
247 {
248 /* If no inferior selected, then we can't read memory here. */
249 if (ptid_equal (inferior_ptid, null_ptid))
250 return 0;
251
252 return 1;
253 }
254
255 int
256 default_child_has_stack (struct target_ops *ops)
257 {
258 /* If no inferior selected, there's no stack. */
259 if (ptid_equal (inferior_ptid, null_ptid))
260 return 0;
261
262 return 1;
263 }
264
265 int
266 default_child_has_registers (struct target_ops *ops)
267 {
268 /* Can't read registers from no inferior. */
269 if (ptid_equal (inferior_ptid, null_ptid))
270 return 0;
271
272 return 1;
273 }
274
275 int
276 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
277 {
278 /* If there's no thread selected, then we can't make it run through
279 hoops. */
280 if (ptid_equal (the_ptid, null_ptid))
281 return 0;
282
283 return 1;
284 }
285
286
287 int
288 target_has_all_memory_1 (void)
289 {
290 struct target_ops *t;
291
292 for (t = current_target.beneath; t != NULL; t = t->beneath)
293 if (t->to_has_all_memory (t))
294 return 1;
295
296 return 0;
297 }
298
299 int
300 target_has_memory_1 (void)
301 {
302 struct target_ops *t;
303
304 for (t = current_target.beneath; t != NULL; t = t->beneath)
305 if (t->to_has_memory (t))
306 return 1;
307
308 return 0;
309 }
310
311 int
312 target_has_stack_1 (void)
313 {
314 struct target_ops *t;
315
316 for (t = current_target.beneath; t != NULL; t = t->beneath)
317 if (t->to_has_stack (t))
318 return 1;
319
320 return 0;
321 }
322
323 int
324 target_has_registers_1 (void)
325 {
326 struct target_ops *t;
327
328 for (t = current_target.beneath; t != NULL; t = t->beneath)
329 if (t->to_has_registers (t))
330 return 1;
331
332 return 0;
333 }
334
335 int
336 target_has_execution_1 (ptid_t the_ptid)
337 {
338 struct target_ops *t;
339
340 for (t = current_target.beneath; t != NULL; t = t->beneath)
341 if (t->to_has_execution (t, the_ptid))
342 return 1;
343
344 return 0;
345 }
346
347 int
348 target_has_execution_current (void)
349 {
350 return target_has_execution_1 (inferior_ptid);
351 }
352
353 /* Complete initialization of T. This ensures that various fields in
354 T are set, if needed by the target implementation. */
355
356 void
357 complete_target_initialization (struct target_ops *t)
358 {
359 /* Provide default values for all "must have" methods. */
360 if (t->to_xfer_partial == NULL)
361 t->to_xfer_partial = default_xfer_partial;
362
363 if (t->to_has_all_memory == NULL)
364 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
365
366 if (t->to_has_memory == NULL)
367 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
368
369 if (t->to_has_stack == NULL)
370 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
371
372 if (t->to_has_registers == NULL)
373 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
374
375 if (t->to_has_execution == NULL)
376 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
377
378 install_delegators (t);
379 }
380
381 /* Add possible target architecture T to the list and add a new
382 command 'target T->to_shortname'. Set COMPLETER as the command's
383 completer if not NULL. */
384
385 void
386 add_target_with_completer (struct target_ops *t,
387 completer_ftype *completer)
388 {
389 struct cmd_list_element *c;
390
391 complete_target_initialization (t);
392
393 if (!target_structs)
394 {
395 target_struct_allocsize = DEFAULT_ALLOCSIZE;
396 target_structs = (struct target_ops **) xmalloc
397 (target_struct_allocsize * sizeof (*target_structs));
398 }
399 if (target_struct_size >= target_struct_allocsize)
400 {
401 target_struct_allocsize *= 2;
402 target_structs = (struct target_ops **)
403 xrealloc ((char *) target_structs,
404 target_struct_allocsize * sizeof (*target_structs));
405 }
406 target_structs[target_struct_size++] = t;
407
408 if (targetlist == NULL)
409 add_prefix_cmd ("target", class_run, target_command, _("\
410 Connect to a target machine or process.\n\
411 The first argument is the type or protocol of the target machine.\n\
412 Remaining arguments are interpreted by the target protocol. For more\n\
413 information on the arguments for a particular protocol, type\n\
414 `help target ' followed by the protocol name."),
415 &targetlist, "target ", 0, &cmdlist);
416 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
417 &targetlist);
418 if (completer != NULL)
419 set_cmd_completer (c, completer);
420 }
421
422 /* Add a possible target architecture to the list. */
423
424 void
425 add_target (struct target_ops *t)
426 {
427 add_target_with_completer (t, NULL);
428 }
429
430 /* See target.h. */
431
432 void
433 add_deprecated_target_alias (struct target_ops *t, char *alias)
434 {
435 struct cmd_list_element *c;
436 char *alt;
437
438 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
439 see PR cli/15104. */
440 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
441 alt = xstrprintf ("target %s", t->to_shortname);
442 deprecate_cmd (c, alt);
443 }
444
445 /* Stub functions */
446
447 void
448 target_ignore (void)
449 {
450 }
451
452 void
453 target_kill (void)
454 {
455 struct target_ops *t;
456
457 for (t = current_target.beneath; t != NULL; t = t->beneath)
458 if (t->to_kill != NULL)
459 {
460 if (targetdebug)
461 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
462
463 t->to_kill (t);
464 return;
465 }
466
467 noprocess ();
468 }
469
470 void
471 target_load (char *arg, int from_tty)
472 {
473 target_dcache_invalidate ();
474 (*current_target.to_load) (&current_target, arg, from_tty);
475 }
476
477 void
478 target_create_inferior (char *exec_file, char *args,
479 char **env, int from_tty)
480 {
481 struct target_ops *t;
482
483 for (t = current_target.beneath; t != NULL; t = t->beneath)
484 {
485 if (t->to_create_inferior != NULL)
486 {
487 t->to_create_inferior (t, exec_file, args, env, from_tty);
488 if (targetdebug)
489 fprintf_unfiltered (gdb_stdlog,
490 "target_create_inferior (%s, %s, xxx, %d)\n",
491 exec_file, args, from_tty);
492 return;
493 }
494 }
495
496 internal_error (__FILE__, __LINE__,
497 _("could not find a target to create inferior"));
498 }
499
500 void
501 target_terminal_inferior (void)
502 {
503 /* A background resume (``run&'') should leave GDB in control of the
504 terminal. Use target_can_async_p, not target_is_async_p, since at
505 this point the target is not async yet. However, if sync_execution
506 is not set, we know it will become async prior to resume. */
507 if (target_can_async_p () && !sync_execution)
508 return;
509
510 /* If GDB is resuming the inferior in the foreground, install
511 inferior's terminal modes. */
512 (*current_target.to_terminal_inferior) (&current_target);
513 }
514
515 static int
516 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
517 struct target_ops *t)
518 {
519 errno = EIO; /* Can't read/write this location. */
520 return 0; /* No bytes handled. */
521 }
522
523 static void
524 tcomplain (void)
525 {
526 error (_("You can't do that when your target is `%s'"),
527 current_target.to_shortname);
528 }
529
530 void
531 noprocess (void)
532 {
533 error (_("You can't do that without a process to debug."));
534 }
535
536 static void
537 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
538 {
539 printf_unfiltered (_("No saved terminal information.\n"));
540 }
541
542 /* A default implementation for the to_get_ada_task_ptid target method.
543
544 This function builds the PTID by using both LWP and TID as part of
545 the PTID lwp and tid elements. The pid used is the pid of the
546 inferior_ptid. */
547
548 static ptid_t
549 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
550 {
551 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
552 }
553
554 static enum exec_direction_kind
555 default_execution_direction (struct target_ops *self)
556 {
557 if (!target_can_execute_reverse)
558 return EXEC_FORWARD;
559 else if (!target_can_async_p ())
560 return EXEC_FORWARD;
561 else
562 gdb_assert_not_reached ("\
563 to_execution_direction must be implemented for reverse async");
564 }
565
566 /* Go through the target stack from top to bottom, copying over zero
567 entries in current_target, then filling in still empty entries. In
568 effect, we are doing class inheritance through the pushed target
569 vectors.
570
571 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
572 is currently implemented, is that it discards any knowledge of
573 which target an inherited method originally belonged to.
574 Consequently, new new target methods should instead explicitly and
575 locally search the target stack for the target that can handle the
576 request. */
577
578 static void
579 update_current_target (void)
580 {
581 struct target_ops *t;
582
583 /* First, reset current's contents. */
584 memset (&current_target, 0, sizeof (current_target));
585
586 /* Install the delegators. */
587 install_delegators (&current_target);
588
589 #define INHERIT(FIELD, TARGET) \
590 if (!current_target.FIELD) \
591 current_target.FIELD = (TARGET)->FIELD
592
593 for (t = target_stack; t; t = t->beneath)
594 {
595 INHERIT (to_shortname, t);
596 INHERIT (to_longname, t);
597 INHERIT (to_doc, t);
598 /* Do not inherit to_open. */
599 /* Do not inherit to_close. */
600 /* Do not inherit to_attach. */
601 /* Do not inherit to_post_attach. */
602 INHERIT (to_attach_no_wait, t);
603 /* Do not inherit to_detach. */
604 /* Do not inherit to_disconnect. */
605 /* Do not inherit to_resume. */
606 /* Do not inherit to_wait. */
607 /* Do not inherit to_fetch_registers. */
608 /* Do not inherit to_store_registers. */
609 /* Do not inherit to_prepare_to_store. */
610 INHERIT (deprecated_xfer_memory, t);
611 /* Do not inherit to_files_info. */
612 /* Do not inherit to_insert_breakpoint. */
613 /* Do not inherit to_remove_breakpoint. */
614 /* Do not inherit to_can_use_hw_breakpoint. */
615 /* Do not inherit to_insert_hw_breakpoint. */
616 /* Do not inherit to_remove_hw_breakpoint. */
617 /* Do not inherit to_ranged_break_num_registers. */
618 /* Do not inherit to_insert_watchpoint. */
619 /* Do not inherit to_remove_watchpoint. */
620 /* Do not inherit to_insert_mask_watchpoint. */
621 /* Do not inherit to_remove_mask_watchpoint. */
622 /* Do not inherit to_stopped_data_address. */
623 INHERIT (to_have_steppable_watchpoint, t);
624 INHERIT (to_have_continuable_watchpoint, t);
625 /* Do not inherit to_stopped_by_watchpoint. */
626 /* Do not inherit to_watchpoint_addr_within_range. */
627 /* Do not inherit to_region_ok_for_hw_watchpoint. */
628 /* Do not inherit to_can_accel_watchpoint_condition. */
629 /* Do not inherit to_masked_watch_num_registers. */
630 /* Do not inherit to_terminal_init. */
631 /* Do not inherit to_terminal_inferior. */
632 /* Do not inherit to_terminal_ours_for_output. */
633 /* Do not inherit to_terminal_ours. */
634 /* Do not inherit to_terminal_save_ours. */
635 /* Do not inherit to_terminal_info. */
636 /* Do not inherit to_kill. */
637 /* Do not inherit to_load. */
638 /* Do no inherit to_create_inferior. */
639 /* Do not inherit to_post_startup_inferior. */
640 /* Do not inherit to_insert_fork_catchpoint. */
641 /* Do not inherit to_remove_fork_catchpoint. */
642 /* Do not inherit to_insert_vfork_catchpoint. */
643 /* Do not inherit to_remove_vfork_catchpoint. */
644 /* Do not inherit to_follow_fork. */
645 /* Do not inherit to_insert_exec_catchpoint. */
646 /* Do not inherit to_remove_exec_catchpoint. */
647 /* Do not inherit to_set_syscall_catchpoint. */
648 /* Do not inherit to_has_exited. */
649 /* Do not inherit to_mourn_inferior. */
650 INHERIT (to_can_run, t);
651 /* Do not inherit to_pass_signals. */
652 /* Do not inherit to_program_signals. */
653 /* Do not inherit to_thread_alive. */
654 /* Do not inherit to_find_new_threads. */
655 /* Do not inherit to_pid_to_str. */
656 /* Do not inherit to_extra_thread_info. */
657 /* Do not inherit to_thread_name. */
658 INHERIT (to_stop, t);
659 /* Do not inherit to_xfer_partial. */
660 /* Do not inherit to_rcmd. */
661 /* Do not inherit to_pid_to_exec_file. */
662 /* Do not inherit to_log_command. */
663 INHERIT (to_stratum, t);
664 /* Do not inherit to_has_all_memory. */
665 /* Do not inherit to_has_memory. */
666 /* Do not inherit to_has_stack. */
667 /* Do not inherit to_has_registers. */
668 /* Do not inherit to_has_execution. */
669 INHERIT (to_has_thread_control, t);
670 /* Do not inherit to_can_async_p. */
671 /* Do not inherit to_is_async_p. */
672 /* Do not inherit to_async. */
673 /* Do not inherit to_find_memory_regions. */
674 /* Do not inherit to_make_corefile_notes. */
675 /* Do not inherit to_get_bookmark. */
676 /* Do not inherit to_goto_bookmark. */
677 /* Do not inherit to_get_thread_local_address. */
678 /* Do not inherit to_can_execute_reverse. */
679 /* Do not inherit to_execution_direction. */
680 /* Do not inherit to_thread_architecture. */
681 /* Do not inherit to_read_description. */
682 /* Do not inherit to_get_ada_task_ptid. */
683 /* Do not inherit to_search_memory. */
684 /* Do not inherit to_supports_multi_process. */
685 /* Do not inherit to_supports_enable_disable_tracepoint. */
686 /* Do not inherit to_supports_string_tracing. */
687 /* Do not inherit to_trace_init. */
688 /* Do not inherit to_download_tracepoint. */
689 /* Do not inherit to_can_download_tracepoint. */
690 /* Do not inherit to_download_trace_state_variable. */
691 /* Do not inherit to_enable_tracepoint. */
692 /* Do not inherit to_disable_tracepoint. */
693 /* Do not inherit to_trace_set_readonly_regions. */
694 /* Do not inherit to_trace_start. */
695 /* Do not inherit to_get_trace_status. */
696 /* Do not inherit to_get_tracepoint_status. */
697 /* Do not inherit to_trace_stop. */
698 /* Do not inherit to_trace_find. */
699 /* Do not inherit to_get_trace_state_variable_value. */
700 /* Do not inherit to_save_trace_data. */
701 /* Do not inherit to_upload_tracepoints. */
702 /* Do not inherit to_upload_trace_state_variables. */
703 /* Do not inherit to_get_raw_trace_data. */
704 /* Do not inherit to_get_min_fast_tracepoint_insn_len. */
705 /* Do not inherit to_set_disconnected_tracing. */
706 /* Do not inherit to_set_circular_trace_buffer. */
707 /* Do not inherit to_set_trace_buffer_size. */
708 /* Do not inherit to_set_trace_notes. */
709 /* Do not inherit to_get_tib_address. */
710 /* Do not inherit to_set_permissions. */
711 /* Do not inherit to_static_tracepoint_marker_at. */
712 /* Do not inherit to_static_tracepoint_markers_by_strid. */
713 /* Do not inherit to_traceframe_info. */
714 /* Do not inherit to_use_agent. */
715 /* Do not inherit to_can_use_agent. */
716 /* Do not inherit to_augmented_libraries_svr4_read. */
717 INHERIT (to_magic, t);
718 /* Do not inherit
719 to_supports_evaluation_of_breakpoint_conditions. */
720 /* Do not inherit to_can_run_breakpoint_commands. */
721 /* Do not inherit to_memory_map. */
722 /* Do not inherit to_flash_erase. */
723 /* Do not inherit to_flash_done. */
724 }
725 #undef INHERIT
726
727 /* Clean up a target struct so it no longer has any zero pointers in
728 it. Some entries are defaulted to a method that print an error,
729 others are hard-wired to a standard recursive default. */
730
731 #define de_fault(field, value) \
732 if (!current_target.field) \
733 current_target.field = value
734
735 de_fault (to_open,
736 (void (*) (char *, int))
737 tcomplain);
738 de_fault (to_close,
739 (void (*) (struct target_ops *))
740 target_ignore);
741 de_fault (deprecated_xfer_memory,
742 (int (*) (CORE_ADDR, gdb_byte *, int, int,
743 struct mem_attrib *, struct target_ops *))
744 nomemory);
745 de_fault (to_can_run,
746 (int (*) (struct target_ops *))
747 return_zero);
748 de_fault (to_stop,
749 (void (*) (struct target_ops *, ptid_t))
750 target_ignore);
751 current_target.to_read_description = NULL;
752
753 #undef de_fault
754
755 /* Finally, position the target-stack beneath the squashed
756 "current_target". That way code looking for a non-inherited
757 target method can quickly and simply find it. */
758 current_target.beneath = target_stack;
759
760 if (targetdebug)
761 setup_target_debug ();
762 }
763
764 /* Push a new target type into the stack of the existing target accessors,
765 possibly superseding some of the existing accessors.
766
767 Rather than allow an empty stack, we always have the dummy target at
768 the bottom stratum, so we can call the function vectors without
769 checking them. */
770
771 void
772 push_target (struct target_ops *t)
773 {
774 struct target_ops **cur;
775
776 /* Check magic number. If wrong, it probably means someone changed
777 the struct definition, but not all the places that initialize one. */
778 if (t->to_magic != OPS_MAGIC)
779 {
780 fprintf_unfiltered (gdb_stderr,
781 "Magic number of %s target struct wrong\n",
782 t->to_shortname);
783 internal_error (__FILE__, __LINE__,
784 _("failed internal consistency check"));
785 }
786
787 /* Find the proper stratum to install this target in. */
788 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
789 {
790 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
791 break;
792 }
793
794 /* If there's already targets at this stratum, remove them. */
795 /* FIXME: cagney/2003-10-15: I think this should be popping all
796 targets to CUR, and not just those at this stratum level. */
797 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
798 {
799 /* There's already something at this stratum level. Close it,
800 and un-hook it from the stack. */
801 struct target_ops *tmp = (*cur);
802
803 (*cur) = (*cur)->beneath;
804 tmp->beneath = NULL;
805 target_close (tmp);
806 }
807
808 /* We have removed all targets in our stratum, now add the new one. */
809 t->beneath = (*cur);
810 (*cur) = t;
811
812 update_current_target ();
813 }
814
815 /* Remove a target_ops vector from the stack, wherever it may be.
816 Return how many times it was removed (0 or 1). */
817
818 int
819 unpush_target (struct target_ops *t)
820 {
821 struct target_ops **cur;
822 struct target_ops *tmp;
823
824 if (t->to_stratum == dummy_stratum)
825 internal_error (__FILE__, __LINE__,
826 _("Attempt to unpush the dummy target"));
827
828 /* Look for the specified target. Note that we assume that a target
829 can only occur once in the target stack. */
830
831 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
832 {
833 if ((*cur) == t)
834 break;
835 }
836
837 /* If we don't find target_ops, quit. Only open targets should be
838 closed. */
839 if ((*cur) == NULL)
840 return 0;
841
842 /* Unchain the target. */
843 tmp = (*cur);
844 (*cur) = (*cur)->beneath;
845 tmp->beneath = NULL;
846
847 update_current_target ();
848
849 /* Finally close the target. Note we do this after unchaining, so
850 any target method calls from within the target_close
851 implementation don't end up in T anymore. */
852 target_close (t);
853
854 return 1;
855 }
856
857 void
858 pop_all_targets_above (enum strata above_stratum)
859 {
860 while ((int) (current_target.to_stratum) > (int) above_stratum)
861 {
862 if (!unpush_target (target_stack))
863 {
864 fprintf_unfiltered (gdb_stderr,
865 "pop_all_targets couldn't find target %s\n",
866 target_stack->to_shortname);
867 internal_error (__FILE__, __LINE__,
868 _("failed internal consistency check"));
869 break;
870 }
871 }
872 }
873
874 void
875 pop_all_targets (void)
876 {
877 pop_all_targets_above (dummy_stratum);
878 }
879
880 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
881
882 int
883 target_is_pushed (struct target_ops *t)
884 {
885 struct target_ops **cur;
886
887 /* Check magic number. If wrong, it probably means someone changed
888 the struct definition, but not all the places that initialize one. */
889 if (t->to_magic != OPS_MAGIC)
890 {
891 fprintf_unfiltered (gdb_stderr,
892 "Magic number of %s target struct wrong\n",
893 t->to_shortname);
894 internal_error (__FILE__, __LINE__,
895 _("failed internal consistency check"));
896 }
897
898 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
899 if (*cur == t)
900 return 1;
901
902 return 0;
903 }
904
905 /* Using the objfile specified in OBJFILE, find the address for the
906 current thread's thread-local storage with offset OFFSET. */
907 CORE_ADDR
908 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
909 {
910 volatile CORE_ADDR addr = 0;
911 struct target_ops *target;
912
913 for (target = current_target.beneath;
914 target != NULL;
915 target = target->beneath)
916 {
917 if (target->to_get_thread_local_address != NULL)
918 break;
919 }
920
921 if (target != NULL
922 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
923 {
924 ptid_t ptid = inferior_ptid;
925 volatile struct gdb_exception ex;
926
927 TRY_CATCH (ex, RETURN_MASK_ALL)
928 {
929 CORE_ADDR lm_addr;
930
931 /* Fetch the load module address for this objfile. */
932 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
933 objfile);
934 /* If it's 0, throw the appropriate exception. */
935 if (lm_addr == 0)
936 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
937 _("TLS load module not found"));
938
939 addr = target->to_get_thread_local_address (target, ptid,
940 lm_addr, offset);
941 }
942 /* If an error occurred, print TLS related messages here. Otherwise,
943 throw the error to some higher catcher. */
944 if (ex.reason < 0)
945 {
946 int objfile_is_library = (objfile->flags & OBJF_SHARED);
947
948 switch (ex.error)
949 {
950 case TLS_NO_LIBRARY_SUPPORT_ERROR:
951 error (_("Cannot find thread-local variables "
952 "in this thread library."));
953 break;
954 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
955 if (objfile_is_library)
956 error (_("Cannot find shared library `%s' in dynamic"
957 " linker's load module list"), objfile_name (objfile));
958 else
959 error (_("Cannot find executable file `%s' in dynamic"
960 " linker's load module list"), objfile_name (objfile));
961 break;
962 case TLS_NOT_ALLOCATED_YET_ERROR:
963 if (objfile_is_library)
964 error (_("The inferior has not yet allocated storage for"
965 " thread-local variables in\n"
966 "the shared library `%s'\n"
967 "for %s"),
968 objfile_name (objfile), target_pid_to_str (ptid));
969 else
970 error (_("The inferior has not yet allocated storage for"
971 " thread-local variables in\n"
972 "the executable `%s'\n"
973 "for %s"),
974 objfile_name (objfile), target_pid_to_str (ptid));
975 break;
976 case TLS_GENERIC_ERROR:
977 if (objfile_is_library)
978 error (_("Cannot find thread-local storage for %s, "
979 "shared library %s:\n%s"),
980 target_pid_to_str (ptid),
981 objfile_name (objfile), ex.message);
982 else
983 error (_("Cannot find thread-local storage for %s, "
984 "executable file %s:\n%s"),
985 target_pid_to_str (ptid),
986 objfile_name (objfile), ex.message);
987 break;
988 default:
989 throw_exception (ex);
990 break;
991 }
992 }
993 }
994 /* It wouldn't be wrong here to try a gdbarch method, too; finding
995 TLS is an ABI-specific thing. But we don't do that yet. */
996 else
997 error (_("Cannot find thread-local variables on this target"));
998
999 return addr;
1000 }
1001
1002 const char *
1003 target_xfer_status_to_string (enum target_xfer_status err)
1004 {
1005 #define CASE(X) case X: return #X
1006 switch (err)
1007 {
1008 CASE(TARGET_XFER_E_IO);
1009 CASE(TARGET_XFER_E_UNAVAILABLE);
1010 default:
1011 return "<unknown>";
1012 }
1013 #undef CASE
1014 };
1015
1016
1017 #undef MIN
1018 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1019
1020 /* target_read_string -- read a null terminated string, up to LEN bytes,
1021 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1022 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1023 is responsible for freeing it. Return the number of bytes successfully
1024 read. */
1025
1026 int
1027 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1028 {
1029 int tlen, offset, i;
1030 gdb_byte buf[4];
1031 int errcode = 0;
1032 char *buffer;
1033 int buffer_allocated;
1034 char *bufptr;
1035 unsigned int nbytes_read = 0;
1036
1037 gdb_assert (string);
1038
1039 /* Small for testing. */
1040 buffer_allocated = 4;
1041 buffer = xmalloc (buffer_allocated);
1042 bufptr = buffer;
1043
1044 while (len > 0)
1045 {
1046 tlen = MIN (len, 4 - (memaddr & 3));
1047 offset = memaddr & 3;
1048
1049 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1050 if (errcode != 0)
1051 {
1052 /* The transfer request might have crossed the boundary to an
1053 unallocated region of memory. Retry the transfer, requesting
1054 a single byte. */
1055 tlen = 1;
1056 offset = 0;
1057 errcode = target_read_memory (memaddr, buf, 1);
1058 if (errcode != 0)
1059 goto done;
1060 }
1061
1062 if (bufptr - buffer + tlen > buffer_allocated)
1063 {
1064 unsigned int bytes;
1065
1066 bytes = bufptr - buffer;
1067 buffer_allocated *= 2;
1068 buffer = xrealloc (buffer, buffer_allocated);
1069 bufptr = buffer + bytes;
1070 }
1071
1072 for (i = 0; i < tlen; i++)
1073 {
1074 *bufptr++ = buf[i + offset];
1075 if (buf[i + offset] == '\000')
1076 {
1077 nbytes_read += i + 1;
1078 goto done;
1079 }
1080 }
1081
1082 memaddr += tlen;
1083 len -= tlen;
1084 nbytes_read += tlen;
1085 }
1086 done:
1087 *string = buffer;
1088 if (errnop != NULL)
1089 *errnop = errcode;
1090 return nbytes_read;
1091 }
1092
1093 struct target_section_table *
1094 target_get_section_table (struct target_ops *target)
1095 {
1096 struct target_ops *t;
1097
1098 if (targetdebug)
1099 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1100
1101 for (t = target; t != NULL; t = t->beneath)
1102 if (t->to_get_section_table != NULL)
1103 return (*t->to_get_section_table) (t);
1104
1105 return NULL;
1106 }
1107
1108 /* Find a section containing ADDR. */
1109
1110 struct target_section *
1111 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1112 {
1113 struct target_section_table *table = target_get_section_table (target);
1114 struct target_section *secp;
1115
1116 if (table == NULL)
1117 return NULL;
1118
1119 for (secp = table->sections; secp < table->sections_end; secp++)
1120 {
1121 if (addr >= secp->addr && addr < secp->endaddr)
1122 return secp;
1123 }
1124 return NULL;
1125 }
1126
1127 /* Read memory from the live target, even if currently inspecting a
1128 traceframe. The return is the same as that of target_read. */
1129
1130 static enum target_xfer_status
1131 target_read_live_memory (enum target_object object,
1132 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1133 ULONGEST *xfered_len)
1134 {
1135 enum target_xfer_status ret;
1136 struct cleanup *cleanup;
1137
1138 /* Switch momentarily out of tfind mode so to access live memory.
1139 Note that this must not clear global state, such as the frame
1140 cache, which must still remain valid for the previous traceframe.
1141 We may be _building_ the frame cache at this point. */
1142 cleanup = make_cleanup_restore_traceframe_number ();
1143 set_traceframe_number (-1);
1144
1145 ret = target_xfer_partial (current_target.beneath, object, NULL,
1146 myaddr, NULL, memaddr, len, xfered_len);
1147
1148 do_cleanups (cleanup);
1149 return ret;
1150 }
1151
1152 /* Using the set of read-only target sections of OPS, read live
1153 read-only memory. Note that the actual reads start from the
1154 top-most target again.
1155
1156 For interface/parameters/return description see target.h,
1157 to_xfer_partial. */
1158
1159 static enum target_xfer_status
1160 memory_xfer_live_readonly_partial (struct target_ops *ops,
1161 enum target_object object,
1162 gdb_byte *readbuf, ULONGEST memaddr,
1163 ULONGEST len, ULONGEST *xfered_len)
1164 {
1165 struct target_section *secp;
1166 struct target_section_table *table;
1167
1168 secp = target_section_by_addr (ops, memaddr);
1169 if (secp != NULL
1170 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1171 secp->the_bfd_section)
1172 & SEC_READONLY))
1173 {
1174 struct target_section *p;
1175 ULONGEST memend = memaddr + len;
1176
1177 table = target_get_section_table (ops);
1178
1179 for (p = table->sections; p < table->sections_end; p++)
1180 {
1181 if (memaddr >= p->addr)
1182 {
1183 if (memend <= p->endaddr)
1184 {
1185 /* Entire transfer is within this section. */
1186 return target_read_live_memory (object, memaddr,
1187 readbuf, len, xfered_len);
1188 }
1189 else if (memaddr >= p->endaddr)
1190 {
1191 /* This section ends before the transfer starts. */
1192 continue;
1193 }
1194 else
1195 {
1196 /* This section overlaps the transfer. Just do half. */
1197 len = p->endaddr - memaddr;
1198 return target_read_live_memory (object, memaddr,
1199 readbuf, len, xfered_len);
1200 }
1201 }
1202 }
1203 }
1204
1205 return TARGET_XFER_EOF;
1206 }
1207
1208 /* Read memory from more than one valid target. A core file, for
1209 instance, could have some of memory but delegate other bits to
1210 the target below it. So, we must manually try all targets. */
1211
1212 static enum target_xfer_status
1213 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1214 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1215 ULONGEST *xfered_len)
1216 {
1217 enum target_xfer_status res;
1218
1219 do
1220 {
1221 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1222 readbuf, writebuf, memaddr, len,
1223 xfered_len);
1224 if (res == TARGET_XFER_OK)
1225 break;
1226
1227 /* Stop if the target reports that the memory is not available. */
1228 if (res == TARGET_XFER_E_UNAVAILABLE)
1229 break;
1230
1231 /* We want to continue past core files to executables, but not
1232 past a running target's memory. */
1233 if (ops->to_has_all_memory (ops))
1234 break;
1235
1236 ops = ops->beneath;
1237 }
1238 while (ops != NULL);
1239
1240 return res;
1241 }
1242
1243 /* Perform a partial memory transfer.
1244 For docs see target.h, to_xfer_partial. */
1245
1246 static enum target_xfer_status
1247 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1248 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1249 ULONGEST len, ULONGEST *xfered_len)
1250 {
1251 enum target_xfer_status res;
1252 int reg_len;
1253 struct mem_region *region;
1254 struct inferior *inf;
1255
1256 /* For accesses to unmapped overlay sections, read directly from
1257 files. Must do this first, as MEMADDR may need adjustment. */
1258 if (readbuf != NULL && overlay_debugging)
1259 {
1260 struct obj_section *section = find_pc_overlay (memaddr);
1261
1262 if (pc_in_unmapped_range (memaddr, section))
1263 {
1264 struct target_section_table *table
1265 = target_get_section_table (ops);
1266 const char *section_name = section->the_bfd_section->name;
1267
1268 memaddr = overlay_mapped_address (memaddr, section);
1269 return section_table_xfer_memory_partial (readbuf, writebuf,
1270 memaddr, len, xfered_len,
1271 table->sections,
1272 table->sections_end,
1273 section_name);
1274 }
1275 }
1276
1277 /* Try the executable files, if "trust-readonly-sections" is set. */
1278 if (readbuf != NULL && trust_readonly)
1279 {
1280 struct target_section *secp;
1281 struct target_section_table *table;
1282
1283 secp = target_section_by_addr (ops, memaddr);
1284 if (secp != NULL
1285 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1286 secp->the_bfd_section)
1287 & SEC_READONLY))
1288 {
1289 table = target_get_section_table (ops);
1290 return section_table_xfer_memory_partial (readbuf, writebuf,
1291 memaddr, len, xfered_len,
1292 table->sections,
1293 table->sections_end,
1294 NULL);
1295 }
1296 }
1297
1298 /* If reading unavailable memory in the context of traceframes, and
1299 this address falls within a read-only section, fallback to
1300 reading from live memory. */
1301 if (readbuf != NULL && get_traceframe_number () != -1)
1302 {
1303 VEC(mem_range_s) *available;
1304
1305 /* If we fail to get the set of available memory, then the
1306 target does not support querying traceframe info, and so we
1307 attempt reading from the traceframe anyway (assuming the
1308 target implements the old QTro packet then). */
1309 if (traceframe_available_memory (&available, memaddr, len))
1310 {
1311 struct cleanup *old_chain;
1312
1313 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1314
1315 if (VEC_empty (mem_range_s, available)
1316 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1317 {
1318 /* Don't read into the traceframe's available
1319 memory. */
1320 if (!VEC_empty (mem_range_s, available))
1321 {
1322 LONGEST oldlen = len;
1323
1324 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1325 gdb_assert (len <= oldlen);
1326 }
1327
1328 do_cleanups (old_chain);
1329
1330 /* This goes through the topmost target again. */
1331 res = memory_xfer_live_readonly_partial (ops, object,
1332 readbuf, memaddr,
1333 len, xfered_len);
1334 if (res == TARGET_XFER_OK)
1335 return TARGET_XFER_OK;
1336 else
1337 {
1338 /* No use trying further, we know some memory starting
1339 at MEMADDR isn't available. */
1340 *xfered_len = len;
1341 return TARGET_XFER_E_UNAVAILABLE;
1342 }
1343 }
1344
1345 /* Don't try to read more than how much is available, in
1346 case the target implements the deprecated QTro packet to
1347 cater for older GDBs (the target's knowledge of read-only
1348 sections may be outdated by now). */
1349 len = VEC_index (mem_range_s, available, 0)->length;
1350
1351 do_cleanups (old_chain);
1352 }
1353 }
1354
1355 /* Try GDB's internal data cache. */
1356 region = lookup_mem_region (memaddr);
1357 /* region->hi == 0 means there's no upper bound. */
1358 if (memaddr + len < region->hi || region->hi == 0)
1359 reg_len = len;
1360 else
1361 reg_len = region->hi - memaddr;
1362
1363 switch (region->attrib.mode)
1364 {
1365 case MEM_RO:
1366 if (writebuf != NULL)
1367 return TARGET_XFER_E_IO;
1368 break;
1369
1370 case MEM_WO:
1371 if (readbuf != NULL)
1372 return TARGET_XFER_E_IO;
1373 break;
1374
1375 case MEM_FLASH:
1376 /* We only support writing to flash during "load" for now. */
1377 if (writebuf != NULL)
1378 error (_("Writing to flash memory forbidden in this context"));
1379 break;
1380
1381 case MEM_NONE:
1382 return TARGET_XFER_E_IO;
1383 }
1384
1385 if (!ptid_equal (inferior_ptid, null_ptid))
1386 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1387 else
1388 inf = NULL;
1389
1390 if (inf != NULL
1391 /* The dcache reads whole cache lines; that doesn't play well
1392 with reading from a trace buffer, because reading outside of
1393 the collected memory range fails. */
1394 && get_traceframe_number () == -1
1395 && (region->attrib.cache
1396 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1397 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1398 {
1399 DCACHE *dcache = target_dcache_get_or_init ();
1400 int l;
1401
1402 if (readbuf != NULL)
1403 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1404 else
1405 /* FIXME drow/2006-08-09: If we're going to preserve const
1406 correctness dcache_xfer_memory should take readbuf and
1407 writebuf. */
1408 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1409 reg_len, 1);
1410 if (l <= 0)
1411 return TARGET_XFER_E_IO;
1412 else
1413 {
1414 *xfered_len = (ULONGEST) l;
1415 return TARGET_XFER_OK;
1416 }
1417 }
1418
1419 /* If none of those methods found the memory we wanted, fall back
1420 to a target partial transfer. Normally a single call to
1421 to_xfer_partial is enough; if it doesn't recognize an object
1422 it will call the to_xfer_partial of the next target down.
1423 But for memory this won't do. Memory is the only target
1424 object which can be read from more than one valid target.
1425 A core file, for instance, could have some of memory but
1426 delegate other bits to the target below it. So, we must
1427 manually try all targets. */
1428
1429 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1430 xfered_len);
1431
1432 /* Make sure the cache gets updated no matter what - if we are writing
1433 to the stack. Even if this write is not tagged as such, we still need
1434 to update the cache. */
1435
1436 if (res == TARGET_XFER_OK
1437 && inf != NULL
1438 && writebuf != NULL
1439 && target_dcache_init_p ()
1440 && !region->attrib.cache
1441 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1442 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1443 {
1444 DCACHE *dcache = target_dcache_get ();
1445
1446 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1447 }
1448
1449 /* If we still haven't got anything, return the last error. We
1450 give up. */
1451 return res;
1452 }
1453
1454 /* Perform a partial memory transfer. For docs see target.h,
1455 to_xfer_partial. */
1456
1457 static enum target_xfer_status
1458 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1459 gdb_byte *readbuf, const gdb_byte *writebuf,
1460 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1461 {
1462 enum target_xfer_status res;
1463
1464 /* Zero length requests are ok and require no work. */
1465 if (len == 0)
1466 return TARGET_XFER_EOF;
1467
1468 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1469 breakpoint insns, thus hiding out from higher layers whether
1470 there are software breakpoints inserted in the code stream. */
1471 if (readbuf != NULL)
1472 {
1473 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1474 xfered_len);
1475
1476 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1477 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1478 }
1479 else
1480 {
1481 void *buf;
1482 struct cleanup *old_chain;
1483
1484 /* A large write request is likely to be partially satisfied
1485 by memory_xfer_partial_1. We will continually malloc
1486 and free a copy of the entire write request for breakpoint
1487 shadow handling even though we only end up writing a small
1488 subset of it. Cap writes to 4KB to mitigate this. */
1489 len = min (4096, len);
1490
1491 buf = xmalloc (len);
1492 old_chain = make_cleanup (xfree, buf);
1493 memcpy (buf, writebuf, len);
1494
1495 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1496 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1497 xfered_len);
1498
1499 do_cleanups (old_chain);
1500 }
1501
1502 return res;
1503 }
1504
1505 static void
1506 restore_show_memory_breakpoints (void *arg)
1507 {
1508 show_memory_breakpoints = (uintptr_t) arg;
1509 }
1510
1511 struct cleanup *
1512 make_show_memory_breakpoints_cleanup (int show)
1513 {
1514 int current = show_memory_breakpoints;
1515
1516 show_memory_breakpoints = show;
1517 return make_cleanup (restore_show_memory_breakpoints,
1518 (void *) (uintptr_t) current);
1519 }
1520
1521 /* For docs see target.h, to_xfer_partial. */
1522
1523 enum target_xfer_status
1524 target_xfer_partial (struct target_ops *ops,
1525 enum target_object object, const char *annex,
1526 gdb_byte *readbuf, const gdb_byte *writebuf,
1527 ULONGEST offset, ULONGEST len,
1528 ULONGEST *xfered_len)
1529 {
1530 enum target_xfer_status retval;
1531
1532 gdb_assert (ops->to_xfer_partial != NULL);
1533
1534 /* Transfer is done when LEN is zero. */
1535 if (len == 0)
1536 return TARGET_XFER_EOF;
1537
1538 if (writebuf && !may_write_memory)
1539 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1540 core_addr_to_string_nz (offset), plongest (len));
1541
1542 *xfered_len = 0;
1543
1544 /* If this is a memory transfer, let the memory-specific code
1545 have a look at it instead. Memory transfers are more
1546 complicated. */
1547 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1548 || object == TARGET_OBJECT_CODE_MEMORY)
1549 retval = memory_xfer_partial (ops, object, readbuf,
1550 writebuf, offset, len, xfered_len);
1551 else if (object == TARGET_OBJECT_RAW_MEMORY)
1552 {
1553 /* Request the normal memory object from other layers. */
1554 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1555 xfered_len);
1556 }
1557 else
1558 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1559 writebuf, offset, len, xfered_len);
1560
1561 if (targetdebug)
1562 {
1563 const unsigned char *myaddr = NULL;
1564
1565 fprintf_unfiltered (gdb_stdlog,
1566 "%s:target_xfer_partial "
1567 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1568 ops->to_shortname,
1569 (int) object,
1570 (annex ? annex : "(null)"),
1571 host_address_to_string (readbuf),
1572 host_address_to_string (writebuf),
1573 core_addr_to_string_nz (offset),
1574 pulongest (len), retval,
1575 pulongest (*xfered_len));
1576
1577 if (readbuf)
1578 myaddr = readbuf;
1579 if (writebuf)
1580 myaddr = writebuf;
1581 if (retval == TARGET_XFER_OK && myaddr != NULL)
1582 {
1583 int i;
1584
1585 fputs_unfiltered (", bytes =", gdb_stdlog);
1586 for (i = 0; i < *xfered_len; i++)
1587 {
1588 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1589 {
1590 if (targetdebug < 2 && i > 0)
1591 {
1592 fprintf_unfiltered (gdb_stdlog, " ...");
1593 break;
1594 }
1595 fprintf_unfiltered (gdb_stdlog, "\n");
1596 }
1597
1598 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1599 }
1600 }
1601
1602 fputc_unfiltered ('\n', gdb_stdlog);
1603 }
1604
1605 /* Check implementations of to_xfer_partial update *XFERED_LEN
1606 properly. Do assertion after printing debug messages, so that we
1607 can find more clues on assertion failure from debugging messages. */
1608 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1609 gdb_assert (*xfered_len > 0);
1610
1611 return retval;
1612 }
1613
1614 /* Read LEN bytes of target memory at address MEMADDR, placing the
1615 results in GDB's memory at MYADDR. Returns either 0 for success or
1616 TARGET_XFER_E_IO if any error occurs.
1617
1618 If an error occurs, no guarantee is made about the contents of the data at
1619 MYADDR. In particular, the caller should not depend upon partial reads
1620 filling the buffer with good data. There is no way for the caller to know
1621 how much good data might have been transfered anyway. Callers that can
1622 deal with partial reads should call target_read (which will retry until
1623 it makes no progress, and then return how much was transferred). */
1624
1625 int
1626 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1627 {
1628 /* Dispatch to the topmost target, not the flattened current_target.
1629 Memory accesses check target->to_has_(all_)memory, and the
1630 flattened target doesn't inherit those. */
1631 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1632 myaddr, memaddr, len) == len)
1633 return 0;
1634 else
1635 return TARGET_XFER_E_IO;
1636 }
1637
1638 /* Like target_read_memory, but specify explicitly that this is a read
1639 from the target's raw memory. That is, this read bypasses the
1640 dcache, breakpoint shadowing, etc. */
1641
1642 int
1643 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1644 {
1645 /* See comment in target_read_memory about why the request starts at
1646 current_target.beneath. */
1647 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1648 myaddr, memaddr, len) == len)
1649 return 0;
1650 else
1651 return TARGET_XFER_E_IO;
1652 }
1653
1654 /* Like target_read_memory, but specify explicitly that this is a read from
1655 the target's stack. This may trigger different cache behavior. */
1656
1657 int
1658 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1659 {
1660 /* See comment in target_read_memory about why the request starts at
1661 current_target.beneath. */
1662 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1663 myaddr, memaddr, len) == len)
1664 return 0;
1665 else
1666 return TARGET_XFER_E_IO;
1667 }
1668
1669 /* Like target_read_memory, but specify explicitly that this is a read from
1670 the target's code. This may trigger different cache behavior. */
1671
1672 int
1673 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1674 {
1675 /* See comment in target_read_memory about why the request starts at
1676 current_target.beneath. */
1677 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1678 myaddr, memaddr, len) == len)
1679 return 0;
1680 else
1681 return TARGET_XFER_E_IO;
1682 }
1683
1684 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1685 Returns either 0 for success or TARGET_XFER_E_IO if any
1686 error occurs. If an error occurs, no guarantee is made about how
1687 much data got written. Callers that can deal with partial writes
1688 should call target_write. */
1689
1690 int
1691 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1692 {
1693 /* See comment in target_read_memory about why the request starts at
1694 current_target.beneath. */
1695 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1696 myaddr, memaddr, len) == len)
1697 return 0;
1698 else
1699 return TARGET_XFER_E_IO;
1700 }
1701
1702 /* Write LEN bytes from MYADDR to target raw memory at address
1703 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1704 if any error occurs. If an error occurs, no guarantee is made
1705 about how much data got written. Callers that can deal with
1706 partial writes should call target_write. */
1707
1708 int
1709 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1710 {
1711 /* See comment in target_read_memory about why the request starts at
1712 current_target.beneath. */
1713 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1714 myaddr, memaddr, len) == len)
1715 return 0;
1716 else
1717 return TARGET_XFER_E_IO;
1718 }
1719
1720 /* Fetch the target's memory map. */
1721
1722 VEC(mem_region_s) *
1723 target_memory_map (void)
1724 {
1725 VEC(mem_region_s) *result;
1726 struct mem_region *last_one, *this_one;
1727 int ix;
1728 struct target_ops *t;
1729
1730 if (targetdebug)
1731 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1732
1733 for (t = current_target.beneath; t != NULL; t = t->beneath)
1734 if (t->to_memory_map != NULL)
1735 break;
1736
1737 if (t == NULL)
1738 return NULL;
1739
1740 result = t->to_memory_map (t);
1741 if (result == NULL)
1742 return NULL;
1743
1744 qsort (VEC_address (mem_region_s, result),
1745 VEC_length (mem_region_s, result),
1746 sizeof (struct mem_region), mem_region_cmp);
1747
1748 /* Check that regions do not overlap. Simultaneously assign
1749 a numbering for the "mem" commands to use to refer to
1750 each region. */
1751 last_one = NULL;
1752 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1753 {
1754 this_one->number = ix;
1755
1756 if (last_one && last_one->hi > this_one->lo)
1757 {
1758 warning (_("Overlapping regions in memory map: ignoring"));
1759 VEC_free (mem_region_s, result);
1760 return NULL;
1761 }
1762 last_one = this_one;
1763 }
1764
1765 return result;
1766 }
1767
1768 void
1769 target_flash_erase (ULONGEST address, LONGEST length)
1770 {
1771 struct target_ops *t;
1772
1773 for (t = current_target.beneath; t != NULL; t = t->beneath)
1774 if (t->to_flash_erase != NULL)
1775 {
1776 if (targetdebug)
1777 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1778 hex_string (address), phex (length, 0));
1779 t->to_flash_erase (t, address, length);
1780 return;
1781 }
1782
1783 tcomplain ();
1784 }
1785
1786 void
1787 target_flash_done (void)
1788 {
1789 struct target_ops *t;
1790
1791 for (t = current_target.beneath; t != NULL; t = t->beneath)
1792 if (t->to_flash_done != NULL)
1793 {
1794 if (targetdebug)
1795 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1796 t->to_flash_done (t);
1797 return;
1798 }
1799
1800 tcomplain ();
1801 }
1802
1803 static void
1804 show_trust_readonly (struct ui_file *file, int from_tty,
1805 struct cmd_list_element *c, const char *value)
1806 {
1807 fprintf_filtered (file,
1808 _("Mode for reading from readonly sections is %s.\n"),
1809 value);
1810 }
1811
1812 /* More generic transfers. */
1813
1814 static enum target_xfer_status
1815 default_xfer_partial (struct target_ops *ops, enum target_object object,
1816 const char *annex, gdb_byte *readbuf,
1817 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
1818 ULONGEST *xfered_len)
1819 {
1820 if (object == TARGET_OBJECT_MEMORY
1821 && ops->deprecated_xfer_memory != NULL)
1822 /* If available, fall back to the target's
1823 "deprecated_xfer_memory" method. */
1824 {
1825 int xfered = -1;
1826
1827 errno = 0;
1828 if (writebuf != NULL)
1829 {
1830 void *buffer = xmalloc (len);
1831 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1832
1833 memcpy (buffer, writebuf, len);
1834 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1835 1/*write*/, NULL, ops);
1836 do_cleanups (cleanup);
1837 }
1838 if (readbuf != NULL)
1839 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1840 0/*read*/, NULL, ops);
1841 if (xfered > 0)
1842 {
1843 *xfered_len = (ULONGEST) xfered;
1844 return TARGET_XFER_E_IO;
1845 }
1846 else if (xfered == 0 && errno == 0)
1847 /* "deprecated_xfer_memory" uses 0, cross checked against
1848 ERRNO as one indication of an error. */
1849 return TARGET_XFER_EOF;
1850 else
1851 return TARGET_XFER_E_IO;
1852 }
1853 else
1854 {
1855 gdb_assert (ops->beneath != NULL);
1856 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1857 readbuf, writebuf, offset, len,
1858 xfered_len);
1859 }
1860 }
1861
1862 /* Target vector read/write partial wrapper functions. */
1863
1864 static enum target_xfer_status
1865 target_read_partial (struct target_ops *ops,
1866 enum target_object object,
1867 const char *annex, gdb_byte *buf,
1868 ULONGEST offset, ULONGEST len,
1869 ULONGEST *xfered_len)
1870 {
1871 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1872 xfered_len);
1873 }
1874
1875 static enum target_xfer_status
1876 target_write_partial (struct target_ops *ops,
1877 enum target_object object,
1878 const char *annex, const gdb_byte *buf,
1879 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1880 {
1881 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1882 xfered_len);
1883 }
1884
1885 /* Wrappers to perform the full transfer. */
1886
1887 /* For docs on target_read see target.h. */
1888
1889 LONGEST
1890 target_read (struct target_ops *ops,
1891 enum target_object object,
1892 const char *annex, gdb_byte *buf,
1893 ULONGEST offset, LONGEST len)
1894 {
1895 LONGEST xfered = 0;
1896
1897 while (xfered < len)
1898 {
1899 ULONGEST xfered_len;
1900 enum target_xfer_status status;
1901
1902 status = target_read_partial (ops, object, annex,
1903 (gdb_byte *) buf + xfered,
1904 offset + xfered, len - xfered,
1905 &xfered_len);
1906
1907 /* Call an observer, notifying them of the xfer progress? */
1908 if (status == TARGET_XFER_EOF)
1909 return xfered;
1910 else if (status == TARGET_XFER_OK)
1911 {
1912 xfered += xfered_len;
1913 QUIT;
1914 }
1915 else
1916 return -1;
1917
1918 }
1919 return len;
1920 }
1921
1922 /* Assuming that the entire [begin, end) range of memory cannot be
1923 read, try to read whatever subrange is possible to read.
1924
1925 The function returns, in RESULT, either zero or one memory block.
1926 If there's a readable subrange at the beginning, it is completely
1927 read and returned. Any further readable subrange will not be read.
1928 Otherwise, if there's a readable subrange at the end, it will be
1929 completely read and returned. Any readable subranges before it
1930 (obviously, not starting at the beginning), will be ignored. In
1931 other cases -- either no readable subrange, or readable subrange(s)
1932 that is neither at the beginning, or end, nothing is returned.
1933
1934 The purpose of this function is to handle a read across a boundary
1935 of accessible memory in a case when memory map is not available.
1936 The above restrictions are fine for this case, but will give
1937 incorrect results if the memory is 'patchy'. However, supporting
1938 'patchy' memory would require trying to read every single byte,
1939 and it seems unacceptable solution. Explicit memory map is
1940 recommended for this case -- and target_read_memory_robust will
1941 take care of reading multiple ranges then. */
1942
1943 static void
1944 read_whatever_is_readable (struct target_ops *ops,
1945 ULONGEST begin, ULONGEST end,
1946 VEC(memory_read_result_s) **result)
1947 {
1948 gdb_byte *buf = xmalloc (end - begin);
1949 ULONGEST current_begin = begin;
1950 ULONGEST current_end = end;
1951 int forward;
1952 memory_read_result_s r;
1953 ULONGEST xfered_len;
1954
1955 /* If we previously failed to read 1 byte, nothing can be done here. */
1956 if (end - begin <= 1)
1957 {
1958 xfree (buf);
1959 return;
1960 }
1961
1962 /* Check that either first or the last byte is readable, and give up
1963 if not. This heuristic is meant to permit reading accessible memory
1964 at the boundary of accessible region. */
1965 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1966 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
1967 {
1968 forward = 1;
1969 ++current_begin;
1970 }
1971 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1972 buf + (end-begin) - 1, end - 1, 1,
1973 &xfered_len) == TARGET_XFER_OK)
1974 {
1975 forward = 0;
1976 --current_end;
1977 }
1978 else
1979 {
1980 xfree (buf);
1981 return;
1982 }
1983
1984 /* Loop invariant is that the [current_begin, current_end) was previously
1985 found to be not readable as a whole.
1986
1987 Note loop condition -- if the range has 1 byte, we can't divide the range
1988 so there's no point trying further. */
1989 while (current_end - current_begin > 1)
1990 {
1991 ULONGEST first_half_begin, first_half_end;
1992 ULONGEST second_half_begin, second_half_end;
1993 LONGEST xfer;
1994 ULONGEST middle = current_begin + (current_end - current_begin)/2;
1995
1996 if (forward)
1997 {
1998 first_half_begin = current_begin;
1999 first_half_end = middle;
2000 second_half_begin = middle;
2001 second_half_end = current_end;
2002 }
2003 else
2004 {
2005 first_half_begin = middle;
2006 first_half_end = current_end;
2007 second_half_begin = current_begin;
2008 second_half_end = middle;
2009 }
2010
2011 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2012 buf + (first_half_begin - begin),
2013 first_half_begin,
2014 first_half_end - first_half_begin);
2015
2016 if (xfer == first_half_end - first_half_begin)
2017 {
2018 /* This half reads up fine. So, the error must be in the
2019 other half. */
2020 current_begin = second_half_begin;
2021 current_end = second_half_end;
2022 }
2023 else
2024 {
2025 /* This half is not readable. Because we've tried one byte, we
2026 know some part of this half if actually redable. Go to the next
2027 iteration to divide again and try to read.
2028
2029 We don't handle the other half, because this function only tries
2030 to read a single readable subrange. */
2031 current_begin = first_half_begin;
2032 current_end = first_half_end;
2033 }
2034 }
2035
2036 if (forward)
2037 {
2038 /* The [begin, current_begin) range has been read. */
2039 r.begin = begin;
2040 r.end = current_begin;
2041 r.data = buf;
2042 }
2043 else
2044 {
2045 /* The [current_end, end) range has been read. */
2046 LONGEST rlen = end - current_end;
2047
2048 r.data = xmalloc (rlen);
2049 memcpy (r.data, buf + current_end - begin, rlen);
2050 r.begin = current_end;
2051 r.end = end;
2052 xfree (buf);
2053 }
2054 VEC_safe_push(memory_read_result_s, (*result), &r);
2055 }
2056
2057 void
2058 free_memory_read_result_vector (void *x)
2059 {
2060 VEC(memory_read_result_s) *v = x;
2061 memory_read_result_s *current;
2062 int ix;
2063
2064 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2065 {
2066 xfree (current->data);
2067 }
2068 VEC_free (memory_read_result_s, v);
2069 }
2070
2071 VEC(memory_read_result_s) *
2072 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2073 {
2074 VEC(memory_read_result_s) *result = 0;
2075
2076 LONGEST xfered = 0;
2077 while (xfered < len)
2078 {
2079 struct mem_region *region = lookup_mem_region (offset + xfered);
2080 LONGEST rlen;
2081
2082 /* If there is no explicit region, a fake one should be created. */
2083 gdb_assert (region);
2084
2085 if (region->hi == 0)
2086 rlen = len - xfered;
2087 else
2088 rlen = region->hi - offset;
2089
2090 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2091 {
2092 /* Cannot read this region. Note that we can end up here only
2093 if the region is explicitly marked inaccessible, or
2094 'inaccessible-by-default' is in effect. */
2095 xfered += rlen;
2096 }
2097 else
2098 {
2099 LONGEST to_read = min (len - xfered, rlen);
2100 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2101
2102 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2103 (gdb_byte *) buffer,
2104 offset + xfered, to_read);
2105 /* Call an observer, notifying them of the xfer progress? */
2106 if (xfer <= 0)
2107 {
2108 /* Got an error reading full chunk. See if maybe we can read
2109 some subrange. */
2110 xfree (buffer);
2111 read_whatever_is_readable (ops, offset + xfered,
2112 offset + xfered + to_read, &result);
2113 xfered += to_read;
2114 }
2115 else
2116 {
2117 struct memory_read_result r;
2118 r.data = buffer;
2119 r.begin = offset + xfered;
2120 r.end = r.begin + xfer;
2121 VEC_safe_push (memory_read_result_s, result, &r);
2122 xfered += xfer;
2123 }
2124 QUIT;
2125 }
2126 }
2127 return result;
2128 }
2129
2130
2131 /* An alternative to target_write with progress callbacks. */
2132
2133 LONGEST
2134 target_write_with_progress (struct target_ops *ops,
2135 enum target_object object,
2136 const char *annex, const gdb_byte *buf,
2137 ULONGEST offset, LONGEST len,
2138 void (*progress) (ULONGEST, void *), void *baton)
2139 {
2140 LONGEST xfered = 0;
2141
2142 /* Give the progress callback a chance to set up. */
2143 if (progress)
2144 (*progress) (0, baton);
2145
2146 while (xfered < len)
2147 {
2148 ULONGEST xfered_len;
2149 enum target_xfer_status status;
2150
2151 status = target_write_partial (ops, object, annex,
2152 (gdb_byte *) buf + xfered,
2153 offset + xfered, len - xfered,
2154 &xfered_len);
2155
2156 if (status == TARGET_XFER_EOF)
2157 return xfered;
2158 if (TARGET_XFER_STATUS_ERROR_P (status))
2159 return -1;
2160
2161 gdb_assert (status == TARGET_XFER_OK);
2162 if (progress)
2163 (*progress) (xfered_len, baton);
2164
2165 xfered += xfered_len;
2166 QUIT;
2167 }
2168 return len;
2169 }
2170
2171 /* For docs on target_write see target.h. */
2172
2173 LONGEST
2174 target_write (struct target_ops *ops,
2175 enum target_object object,
2176 const char *annex, const gdb_byte *buf,
2177 ULONGEST offset, LONGEST len)
2178 {
2179 return target_write_with_progress (ops, object, annex, buf, offset, len,
2180 NULL, NULL);
2181 }
2182
2183 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2184 the size of the transferred data. PADDING additional bytes are
2185 available in *BUF_P. This is a helper function for
2186 target_read_alloc; see the declaration of that function for more
2187 information. */
2188
2189 static LONGEST
2190 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2191 const char *annex, gdb_byte **buf_p, int padding)
2192 {
2193 size_t buf_alloc, buf_pos;
2194 gdb_byte *buf;
2195
2196 /* This function does not have a length parameter; it reads the
2197 entire OBJECT). Also, it doesn't support objects fetched partly
2198 from one target and partly from another (in a different stratum,
2199 e.g. a core file and an executable). Both reasons make it
2200 unsuitable for reading memory. */
2201 gdb_assert (object != TARGET_OBJECT_MEMORY);
2202
2203 /* Start by reading up to 4K at a time. The target will throttle
2204 this number down if necessary. */
2205 buf_alloc = 4096;
2206 buf = xmalloc (buf_alloc);
2207 buf_pos = 0;
2208 while (1)
2209 {
2210 ULONGEST xfered_len;
2211 enum target_xfer_status status;
2212
2213 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2214 buf_pos, buf_alloc - buf_pos - padding,
2215 &xfered_len);
2216
2217 if (status == TARGET_XFER_EOF)
2218 {
2219 /* Read all there was. */
2220 if (buf_pos == 0)
2221 xfree (buf);
2222 else
2223 *buf_p = buf;
2224 return buf_pos;
2225 }
2226 else if (status != TARGET_XFER_OK)
2227 {
2228 /* An error occurred. */
2229 xfree (buf);
2230 return TARGET_XFER_E_IO;
2231 }
2232
2233 buf_pos += xfered_len;
2234
2235 /* If the buffer is filling up, expand it. */
2236 if (buf_alloc < buf_pos * 2)
2237 {
2238 buf_alloc *= 2;
2239 buf = xrealloc (buf, buf_alloc);
2240 }
2241
2242 QUIT;
2243 }
2244 }
2245
2246 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2247 the size of the transferred data. See the declaration in "target.h"
2248 function for more information about the return value. */
2249
2250 LONGEST
2251 target_read_alloc (struct target_ops *ops, enum target_object object,
2252 const char *annex, gdb_byte **buf_p)
2253 {
2254 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2255 }
2256
2257 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2258 returned as a string, allocated using xmalloc. If an error occurs
2259 or the transfer is unsupported, NULL is returned. Empty objects
2260 are returned as allocated but empty strings. A warning is issued
2261 if the result contains any embedded NUL bytes. */
2262
2263 char *
2264 target_read_stralloc (struct target_ops *ops, enum target_object object,
2265 const char *annex)
2266 {
2267 gdb_byte *buffer;
2268 char *bufstr;
2269 LONGEST i, transferred;
2270
2271 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2272 bufstr = (char *) buffer;
2273
2274 if (transferred < 0)
2275 return NULL;
2276
2277 if (transferred == 0)
2278 return xstrdup ("");
2279
2280 bufstr[transferred] = 0;
2281
2282 /* Check for embedded NUL bytes; but allow trailing NULs. */
2283 for (i = strlen (bufstr); i < transferred; i++)
2284 if (bufstr[i] != 0)
2285 {
2286 warning (_("target object %d, annex %s, "
2287 "contained unexpected null characters"),
2288 (int) object, annex ? annex : "(none)");
2289 break;
2290 }
2291
2292 return bufstr;
2293 }
2294
2295 /* Memory transfer methods. */
2296
2297 void
2298 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2299 LONGEST len)
2300 {
2301 /* This method is used to read from an alternate, non-current
2302 target. This read must bypass the overlay support (as symbols
2303 don't match this target), and GDB's internal cache (wrong cache
2304 for this target). */
2305 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2306 != len)
2307 memory_error (TARGET_XFER_E_IO, addr);
2308 }
2309
2310 ULONGEST
2311 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2312 int len, enum bfd_endian byte_order)
2313 {
2314 gdb_byte buf[sizeof (ULONGEST)];
2315
2316 gdb_assert (len <= sizeof (buf));
2317 get_target_memory (ops, addr, buf, len);
2318 return extract_unsigned_integer (buf, len, byte_order);
2319 }
2320
2321 /* See target.h. */
2322
2323 int
2324 target_insert_breakpoint (struct gdbarch *gdbarch,
2325 struct bp_target_info *bp_tgt)
2326 {
2327 if (!may_insert_breakpoints)
2328 {
2329 warning (_("May not insert breakpoints"));
2330 return 1;
2331 }
2332
2333 return current_target.to_insert_breakpoint (&current_target,
2334 gdbarch, bp_tgt);
2335 }
2336
2337 /* See target.h. */
2338
2339 int
2340 target_remove_breakpoint (struct gdbarch *gdbarch,
2341 struct bp_target_info *bp_tgt)
2342 {
2343 /* This is kind of a weird case to handle, but the permission might
2344 have been changed after breakpoints were inserted - in which case
2345 we should just take the user literally and assume that any
2346 breakpoints should be left in place. */
2347 if (!may_insert_breakpoints)
2348 {
2349 warning (_("May not remove breakpoints"));
2350 return 1;
2351 }
2352
2353 return current_target.to_remove_breakpoint (&current_target,
2354 gdbarch, bp_tgt);
2355 }
2356
2357 static void
2358 target_info (char *args, int from_tty)
2359 {
2360 struct target_ops *t;
2361 int has_all_mem = 0;
2362
2363 if (symfile_objfile != NULL)
2364 printf_unfiltered (_("Symbols from \"%s\".\n"),
2365 objfile_name (symfile_objfile));
2366
2367 for (t = target_stack; t != NULL; t = t->beneath)
2368 {
2369 if (!(*t->to_has_memory) (t))
2370 continue;
2371
2372 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2373 continue;
2374 if (has_all_mem)
2375 printf_unfiltered (_("\tWhile running this, "
2376 "GDB does not access memory from...\n"));
2377 printf_unfiltered ("%s:\n", t->to_longname);
2378 (t->to_files_info) (t);
2379 has_all_mem = (*t->to_has_all_memory) (t);
2380 }
2381 }
2382
2383 /* This function is called before any new inferior is created, e.g.
2384 by running a program, attaching, or connecting to a target.
2385 It cleans up any state from previous invocations which might
2386 change between runs. This is a subset of what target_preopen
2387 resets (things which might change between targets). */
2388
2389 void
2390 target_pre_inferior (int from_tty)
2391 {
2392 /* Clear out solib state. Otherwise the solib state of the previous
2393 inferior might have survived and is entirely wrong for the new
2394 target. This has been observed on GNU/Linux using glibc 2.3. How
2395 to reproduce:
2396
2397 bash$ ./foo&
2398 [1] 4711
2399 bash$ ./foo&
2400 [1] 4712
2401 bash$ gdb ./foo
2402 [...]
2403 (gdb) attach 4711
2404 (gdb) detach
2405 (gdb) attach 4712
2406 Cannot access memory at address 0xdeadbeef
2407 */
2408
2409 /* In some OSs, the shared library list is the same/global/shared
2410 across inferiors. If code is shared between processes, so are
2411 memory regions and features. */
2412 if (!gdbarch_has_global_solist (target_gdbarch ()))
2413 {
2414 no_shared_libraries (NULL, from_tty);
2415
2416 invalidate_target_mem_regions ();
2417
2418 target_clear_description ();
2419 }
2420
2421 agent_capability_invalidate ();
2422 }
2423
2424 /* Callback for iterate_over_inferiors. Gets rid of the given
2425 inferior. */
2426
2427 static int
2428 dispose_inferior (struct inferior *inf, void *args)
2429 {
2430 struct thread_info *thread;
2431
2432 thread = any_thread_of_process (inf->pid);
2433 if (thread)
2434 {
2435 switch_to_thread (thread->ptid);
2436
2437 /* Core inferiors actually should be detached, not killed. */
2438 if (target_has_execution)
2439 target_kill ();
2440 else
2441 target_detach (NULL, 0);
2442 }
2443
2444 return 0;
2445 }
2446
2447 /* This is to be called by the open routine before it does
2448 anything. */
2449
2450 void
2451 target_preopen (int from_tty)
2452 {
2453 dont_repeat ();
2454
2455 if (have_inferiors ())
2456 {
2457 if (!from_tty
2458 || !have_live_inferiors ()
2459 || query (_("A program is being debugged already. Kill it? ")))
2460 iterate_over_inferiors (dispose_inferior, NULL);
2461 else
2462 error (_("Program not killed."));
2463 }
2464
2465 /* Calling target_kill may remove the target from the stack. But if
2466 it doesn't (which seems like a win for UDI), remove it now. */
2467 /* Leave the exec target, though. The user may be switching from a
2468 live process to a core of the same program. */
2469 pop_all_targets_above (file_stratum);
2470
2471 target_pre_inferior (from_tty);
2472 }
2473
2474 /* Detach a target after doing deferred register stores. */
2475
2476 void
2477 target_detach (const char *args, int from_tty)
2478 {
2479 struct target_ops* t;
2480
2481 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2482 /* Don't remove global breakpoints here. They're removed on
2483 disconnection from the target. */
2484 ;
2485 else
2486 /* If we're in breakpoints-always-inserted mode, have to remove
2487 them before detaching. */
2488 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2489
2490 prepare_for_detach ();
2491
2492 current_target.to_detach (&current_target, args, from_tty);
2493 if (targetdebug)
2494 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2495 args, from_tty);
2496 }
2497
2498 void
2499 target_disconnect (char *args, int from_tty)
2500 {
2501 struct target_ops *t;
2502
2503 /* If we're in breakpoints-always-inserted mode or if breakpoints
2504 are global across processes, we have to remove them before
2505 disconnecting. */
2506 remove_breakpoints ();
2507
2508 for (t = current_target.beneath; t != NULL; t = t->beneath)
2509 if (t->to_disconnect != NULL)
2510 {
2511 if (targetdebug)
2512 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2513 args, from_tty);
2514 t->to_disconnect (t, args, from_tty);
2515 return;
2516 }
2517
2518 tcomplain ();
2519 }
2520
2521 ptid_t
2522 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2523 {
2524 struct target_ops *t;
2525 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2526 status, options);
2527
2528 if (targetdebug)
2529 {
2530 char *status_string;
2531 char *options_string;
2532
2533 status_string = target_waitstatus_to_string (status);
2534 options_string = target_options_to_string (options);
2535 fprintf_unfiltered (gdb_stdlog,
2536 "target_wait (%d, status, options={%s})"
2537 " = %d, %s\n",
2538 ptid_get_pid (ptid), options_string,
2539 ptid_get_pid (retval), status_string);
2540 xfree (status_string);
2541 xfree (options_string);
2542 }
2543
2544 return retval;
2545 }
2546
2547 char *
2548 target_pid_to_str (ptid_t ptid)
2549 {
2550 struct target_ops *t;
2551
2552 for (t = current_target.beneath; t != NULL; t = t->beneath)
2553 {
2554 if (t->to_pid_to_str != NULL)
2555 return (*t->to_pid_to_str) (t, ptid);
2556 }
2557
2558 return normal_pid_to_str (ptid);
2559 }
2560
2561 char *
2562 target_thread_name (struct thread_info *info)
2563 {
2564 return current_target.to_thread_name (&current_target, info);
2565 }
2566
2567 void
2568 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2569 {
2570 struct target_ops *t;
2571
2572 target_dcache_invalidate ();
2573
2574 current_target.to_resume (&current_target, ptid, step, signal);
2575 if (targetdebug)
2576 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2577 ptid_get_pid (ptid),
2578 step ? "step" : "continue",
2579 gdb_signal_to_name (signal));
2580
2581 registers_changed_ptid (ptid);
2582 set_executing (ptid, 1);
2583 set_running (ptid, 1);
2584 clear_inline_frame_state (ptid);
2585 }
2586
2587 void
2588 target_pass_signals (int numsigs, unsigned char *pass_signals)
2589 {
2590 struct target_ops *t;
2591
2592 for (t = current_target.beneath; t != NULL; t = t->beneath)
2593 {
2594 if (t->to_pass_signals != NULL)
2595 {
2596 if (targetdebug)
2597 {
2598 int i;
2599
2600 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2601 numsigs);
2602
2603 for (i = 0; i < numsigs; i++)
2604 if (pass_signals[i])
2605 fprintf_unfiltered (gdb_stdlog, " %s",
2606 gdb_signal_to_name (i));
2607
2608 fprintf_unfiltered (gdb_stdlog, " })\n");
2609 }
2610
2611 (*t->to_pass_signals) (t, numsigs, pass_signals);
2612 return;
2613 }
2614 }
2615 }
2616
2617 void
2618 target_program_signals (int numsigs, unsigned char *program_signals)
2619 {
2620 struct target_ops *t;
2621
2622 for (t = current_target.beneath; t != NULL; t = t->beneath)
2623 {
2624 if (t->to_program_signals != NULL)
2625 {
2626 if (targetdebug)
2627 {
2628 int i;
2629
2630 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2631 numsigs);
2632
2633 for (i = 0; i < numsigs; i++)
2634 if (program_signals[i])
2635 fprintf_unfiltered (gdb_stdlog, " %s",
2636 gdb_signal_to_name (i));
2637
2638 fprintf_unfiltered (gdb_stdlog, " })\n");
2639 }
2640
2641 (*t->to_program_signals) (t, numsigs, program_signals);
2642 return;
2643 }
2644 }
2645 }
2646
2647 /* Look through the list of possible targets for a target that can
2648 follow forks. */
2649
2650 int
2651 target_follow_fork (int follow_child, int detach_fork)
2652 {
2653 struct target_ops *t;
2654
2655 for (t = current_target.beneath; t != NULL; t = t->beneath)
2656 {
2657 if (t->to_follow_fork != NULL)
2658 {
2659 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2660
2661 if (targetdebug)
2662 fprintf_unfiltered (gdb_stdlog,
2663 "target_follow_fork (%d, %d) = %d\n",
2664 follow_child, detach_fork, retval);
2665 return retval;
2666 }
2667 }
2668
2669 /* Some target returned a fork event, but did not know how to follow it. */
2670 internal_error (__FILE__, __LINE__,
2671 _("could not find a target to follow fork"));
2672 }
2673
2674 void
2675 target_mourn_inferior (void)
2676 {
2677 struct target_ops *t;
2678
2679 for (t = current_target.beneath; t != NULL; t = t->beneath)
2680 {
2681 if (t->to_mourn_inferior != NULL)
2682 {
2683 t->to_mourn_inferior (t);
2684 if (targetdebug)
2685 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2686
2687 /* We no longer need to keep handles on any of the object files.
2688 Make sure to release them to avoid unnecessarily locking any
2689 of them while we're not actually debugging. */
2690 bfd_cache_close_all ();
2691
2692 return;
2693 }
2694 }
2695
2696 internal_error (__FILE__, __LINE__,
2697 _("could not find a target to follow mourn inferior"));
2698 }
2699
2700 /* Look for a target which can describe architectural features, starting
2701 from TARGET. If we find one, return its description. */
2702
2703 const struct target_desc *
2704 target_read_description (struct target_ops *target)
2705 {
2706 struct target_ops *t;
2707
2708 for (t = target; t != NULL; t = t->beneath)
2709 if (t->to_read_description != NULL)
2710 {
2711 const struct target_desc *tdesc;
2712
2713 tdesc = t->to_read_description (t);
2714 if (tdesc)
2715 return tdesc;
2716 }
2717
2718 return NULL;
2719 }
2720
2721 /* The default implementation of to_search_memory.
2722 This implements a basic search of memory, reading target memory and
2723 performing the search here (as opposed to performing the search in on the
2724 target side with, for example, gdbserver). */
2725
2726 int
2727 simple_search_memory (struct target_ops *ops,
2728 CORE_ADDR start_addr, ULONGEST search_space_len,
2729 const gdb_byte *pattern, ULONGEST pattern_len,
2730 CORE_ADDR *found_addrp)
2731 {
2732 /* NOTE: also defined in find.c testcase. */
2733 #define SEARCH_CHUNK_SIZE 16000
2734 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2735 /* Buffer to hold memory contents for searching. */
2736 gdb_byte *search_buf;
2737 unsigned search_buf_size;
2738 struct cleanup *old_cleanups;
2739
2740 search_buf_size = chunk_size + pattern_len - 1;
2741
2742 /* No point in trying to allocate a buffer larger than the search space. */
2743 if (search_space_len < search_buf_size)
2744 search_buf_size = search_space_len;
2745
2746 search_buf = malloc (search_buf_size);
2747 if (search_buf == NULL)
2748 error (_("Unable to allocate memory to perform the search."));
2749 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2750
2751 /* Prime the search buffer. */
2752
2753 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2754 search_buf, start_addr, search_buf_size) != search_buf_size)
2755 {
2756 warning (_("Unable to access %s bytes of target "
2757 "memory at %s, halting search."),
2758 pulongest (search_buf_size), hex_string (start_addr));
2759 do_cleanups (old_cleanups);
2760 return -1;
2761 }
2762
2763 /* Perform the search.
2764
2765 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2766 When we've scanned N bytes we copy the trailing bytes to the start and
2767 read in another N bytes. */
2768
2769 while (search_space_len >= pattern_len)
2770 {
2771 gdb_byte *found_ptr;
2772 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2773
2774 found_ptr = memmem (search_buf, nr_search_bytes,
2775 pattern, pattern_len);
2776
2777 if (found_ptr != NULL)
2778 {
2779 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2780
2781 *found_addrp = found_addr;
2782 do_cleanups (old_cleanups);
2783 return 1;
2784 }
2785
2786 /* Not found in this chunk, skip to next chunk. */
2787
2788 /* Don't let search_space_len wrap here, it's unsigned. */
2789 if (search_space_len >= chunk_size)
2790 search_space_len -= chunk_size;
2791 else
2792 search_space_len = 0;
2793
2794 if (search_space_len >= pattern_len)
2795 {
2796 unsigned keep_len = search_buf_size - chunk_size;
2797 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2798 int nr_to_read;
2799
2800 /* Copy the trailing part of the previous iteration to the front
2801 of the buffer for the next iteration. */
2802 gdb_assert (keep_len == pattern_len - 1);
2803 memcpy (search_buf, search_buf + chunk_size, keep_len);
2804
2805 nr_to_read = min (search_space_len - keep_len, chunk_size);
2806
2807 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2808 search_buf + keep_len, read_addr,
2809 nr_to_read) != nr_to_read)
2810 {
2811 warning (_("Unable to access %s bytes of target "
2812 "memory at %s, halting search."),
2813 plongest (nr_to_read),
2814 hex_string (read_addr));
2815 do_cleanups (old_cleanups);
2816 return -1;
2817 }
2818
2819 start_addr += chunk_size;
2820 }
2821 }
2822
2823 /* Not found. */
2824
2825 do_cleanups (old_cleanups);
2826 return 0;
2827 }
2828
2829 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2830 sequence of bytes in PATTERN with length PATTERN_LEN.
2831
2832 The result is 1 if found, 0 if not found, and -1 if there was an error
2833 requiring halting of the search (e.g. memory read error).
2834 If the pattern is found the address is recorded in FOUND_ADDRP. */
2835
2836 int
2837 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2838 const gdb_byte *pattern, ULONGEST pattern_len,
2839 CORE_ADDR *found_addrp)
2840 {
2841 struct target_ops *t;
2842 int found;
2843
2844 /* We don't use INHERIT to set current_target.to_search_memory,
2845 so we have to scan the target stack and handle targetdebug
2846 ourselves. */
2847
2848 if (targetdebug)
2849 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2850 hex_string (start_addr));
2851
2852 for (t = current_target.beneath; t != NULL; t = t->beneath)
2853 if (t->to_search_memory != NULL)
2854 break;
2855
2856 if (t != NULL)
2857 {
2858 found = t->to_search_memory (t, start_addr, search_space_len,
2859 pattern, pattern_len, found_addrp);
2860 }
2861 else
2862 {
2863 /* If a special version of to_search_memory isn't available, use the
2864 simple version. */
2865 found = simple_search_memory (current_target.beneath,
2866 start_addr, search_space_len,
2867 pattern, pattern_len, found_addrp);
2868 }
2869
2870 if (targetdebug)
2871 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2872
2873 return found;
2874 }
2875
2876 /* Look through the currently pushed targets. If none of them will
2877 be able to restart the currently running process, issue an error
2878 message. */
2879
2880 void
2881 target_require_runnable (void)
2882 {
2883 struct target_ops *t;
2884
2885 for (t = target_stack; t != NULL; t = t->beneath)
2886 {
2887 /* If this target knows how to create a new program, then
2888 assume we will still be able to after killing the current
2889 one. Either killing and mourning will not pop T, or else
2890 find_default_run_target will find it again. */
2891 if (t->to_create_inferior != NULL)
2892 return;
2893
2894 /* Do not worry about thread_stratum targets that can not
2895 create inferiors. Assume they will be pushed again if
2896 necessary, and continue to the process_stratum. */
2897 if (t->to_stratum == thread_stratum
2898 || t->to_stratum == arch_stratum)
2899 continue;
2900
2901 error (_("The \"%s\" target does not support \"run\". "
2902 "Try \"help target\" or \"continue\"."),
2903 t->to_shortname);
2904 }
2905
2906 /* This function is only called if the target is running. In that
2907 case there should have been a process_stratum target and it
2908 should either know how to create inferiors, or not... */
2909 internal_error (__FILE__, __LINE__, _("No targets found"));
2910 }
2911
2912 /* Look through the list of possible targets for a target that can
2913 execute a run or attach command without any other data. This is
2914 used to locate the default process stratum.
2915
2916 If DO_MESG is not NULL, the result is always valid (error() is
2917 called for errors); else, return NULL on error. */
2918
2919 static struct target_ops *
2920 find_default_run_target (char *do_mesg)
2921 {
2922 struct target_ops **t;
2923 struct target_ops *runable = NULL;
2924 int count;
2925
2926 count = 0;
2927
2928 for (t = target_structs; t < target_structs + target_struct_size;
2929 ++t)
2930 {
2931 if ((*t)->to_can_run && target_can_run (*t))
2932 {
2933 runable = *t;
2934 ++count;
2935 }
2936 }
2937
2938 if (count != 1)
2939 {
2940 if (do_mesg)
2941 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2942 else
2943 return NULL;
2944 }
2945
2946 return runable;
2947 }
2948
2949 void
2950 find_default_attach (struct target_ops *ops, char *args, int from_tty)
2951 {
2952 struct target_ops *t;
2953
2954 t = find_default_run_target ("attach");
2955 (t->to_attach) (t, args, from_tty);
2956 return;
2957 }
2958
2959 void
2960 find_default_create_inferior (struct target_ops *ops,
2961 char *exec_file, char *allargs, char **env,
2962 int from_tty)
2963 {
2964 struct target_ops *t;
2965
2966 t = find_default_run_target ("run");
2967 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
2968 return;
2969 }
2970
2971 static int
2972 find_default_can_async_p (struct target_ops *ignore)
2973 {
2974 struct target_ops *t;
2975
2976 /* This may be called before the target is pushed on the stack;
2977 look for the default process stratum. If there's none, gdb isn't
2978 configured with a native debugger, and target remote isn't
2979 connected yet. */
2980 t = find_default_run_target (NULL);
2981 if (t && t->to_can_async_p != delegate_can_async_p)
2982 return (t->to_can_async_p) (t);
2983 return 0;
2984 }
2985
2986 static int
2987 find_default_is_async_p (struct target_ops *ignore)
2988 {
2989 struct target_ops *t;
2990
2991 /* This may be called before the target is pushed on the stack;
2992 look for the default process stratum. If there's none, gdb isn't
2993 configured with a native debugger, and target remote isn't
2994 connected yet. */
2995 t = find_default_run_target (NULL);
2996 if (t && t->to_is_async_p != delegate_is_async_p)
2997 return (t->to_is_async_p) (t);
2998 return 0;
2999 }
3000
3001 static int
3002 find_default_supports_non_stop (struct target_ops *self)
3003 {
3004 struct target_ops *t;
3005
3006 t = find_default_run_target (NULL);
3007 if (t && t->to_supports_non_stop)
3008 return (t->to_supports_non_stop) (t);
3009 return 0;
3010 }
3011
3012 int
3013 target_supports_non_stop (void)
3014 {
3015 struct target_ops *t;
3016
3017 for (t = &current_target; t != NULL; t = t->beneath)
3018 if (t->to_supports_non_stop)
3019 return t->to_supports_non_stop (t);
3020
3021 return 0;
3022 }
3023
3024 /* Implement the "info proc" command. */
3025
3026 int
3027 target_info_proc (char *args, enum info_proc_what what)
3028 {
3029 struct target_ops *t;
3030
3031 /* If we're already connected to something that can get us OS
3032 related data, use it. Otherwise, try using the native
3033 target. */
3034 if (current_target.to_stratum >= process_stratum)
3035 t = current_target.beneath;
3036 else
3037 t = find_default_run_target (NULL);
3038
3039 for (; t != NULL; t = t->beneath)
3040 {
3041 if (t->to_info_proc != NULL)
3042 {
3043 t->to_info_proc (t, args, what);
3044
3045 if (targetdebug)
3046 fprintf_unfiltered (gdb_stdlog,
3047 "target_info_proc (\"%s\", %d)\n", args, what);
3048
3049 return 1;
3050 }
3051 }
3052
3053 return 0;
3054 }
3055
3056 static int
3057 find_default_supports_disable_randomization (struct target_ops *self)
3058 {
3059 struct target_ops *t;
3060
3061 t = find_default_run_target (NULL);
3062 if (t && t->to_supports_disable_randomization)
3063 return (t->to_supports_disable_randomization) (t);
3064 return 0;
3065 }
3066
3067 int
3068 target_supports_disable_randomization (void)
3069 {
3070 struct target_ops *t;
3071
3072 for (t = &current_target; t != NULL; t = t->beneath)
3073 if (t->to_supports_disable_randomization)
3074 return t->to_supports_disable_randomization (t);
3075
3076 return 0;
3077 }
3078
3079 char *
3080 target_get_osdata (const char *type)
3081 {
3082 struct target_ops *t;
3083
3084 /* If we're already connected to something that can get us OS
3085 related data, use it. Otherwise, try using the native
3086 target. */
3087 if (current_target.to_stratum >= process_stratum)
3088 t = current_target.beneath;
3089 else
3090 t = find_default_run_target ("get OS data");
3091
3092 if (!t)
3093 return NULL;
3094
3095 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3096 }
3097
3098 /* Determine the current address space of thread PTID. */
3099
3100 struct address_space *
3101 target_thread_address_space (ptid_t ptid)
3102 {
3103 struct address_space *aspace;
3104 struct inferior *inf;
3105 struct target_ops *t;
3106
3107 for (t = current_target.beneath; t != NULL; t = t->beneath)
3108 {
3109 if (t->to_thread_address_space != NULL)
3110 {
3111 aspace = t->to_thread_address_space (t, ptid);
3112 gdb_assert (aspace);
3113
3114 if (targetdebug)
3115 fprintf_unfiltered (gdb_stdlog,
3116 "target_thread_address_space (%s) = %d\n",
3117 target_pid_to_str (ptid),
3118 address_space_num (aspace));
3119 return aspace;
3120 }
3121 }
3122
3123 /* Fall-back to the "main" address space of the inferior. */
3124 inf = find_inferior_pid (ptid_get_pid (ptid));
3125
3126 if (inf == NULL || inf->aspace == NULL)
3127 internal_error (__FILE__, __LINE__,
3128 _("Can't determine the current "
3129 "address space of thread %s\n"),
3130 target_pid_to_str (ptid));
3131
3132 return inf->aspace;
3133 }
3134
3135
3136 /* Target file operations. */
3137
3138 static struct target_ops *
3139 default_fileio_target (void)
3140 {
3141 /* If we're already connected to something that can perform
3142 file I/O, use it. Otherwise, try using the native target. */
3143 if (current_target.to_stratum >= process_stratum)
3144 return current_target.beneath;
3145 else
3146 return find_default_run_target ("file I/O");
3147 }
3148
3149 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3150 target file descriptor, or -1 if an error occurs (and set
3151 *TARGET_ERRNO). */
3152 int
3153 target_fileio_open (const char *filename, int flags, int mode,
3154 int *target_errno)
3155 {
3156 struct target_ops *t;
3157
3158 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3159 {
3160 if (t->to_fileio_open != NULL)
3161 {
3162 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
3163
3164 if (targetdebug)
3165 fprintf_unfiltered (gdb_stdlog,
3166 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3167 filename, flags, mode,
3168 fd, fd != -1 ? 0 : *target_errno);
3169 return fd;
3170 }
3171 }
3172
3173 *target_errno = FILEIO_ENOSYS;
3174 return -1;
3175 }
3176
3177 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3178 Return the number of bytes written, or -1 if an error occurs
3179 (and set *TARGET_ERRNO). */
3180 int
3181 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3182 ULONGEST offset, int *target_errno)
3183 {
3184 struct target_ops *t;
3185
3186 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3187 {
3188 if (t->to_fileio_pwrite != NULL)
3189 {
3190 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
3191 target_errno);
3192
3193 if (targetdebug)
3194 fprintf_unfiltered (gdb_stdlog,
3195 "target_fileio_pwrite (%d,...,%d,%s) "
3196 "= %d (%d)\n",
3197 fd, len, pulongest (offset),
3198 ret, ret != -1 ? 0 : *target_errno);
3199 return ret;
3200 }
3201 }
3202
3203 *target_errno = FILEIO_ENOSYS;
3204 return -1;
3205 }
3206
3207 /* Read up to LEN bytes FD on the target into READ_BUF.
3208 Return the number of bytes read, or -1 if an error occurs
3209 (and set *TARGET_ERRNO). */
3210 int
3211 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3212 ULONGEST offset, int *target_errno)
3213 {
3214 struct target_ops *t;
3215
3216 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3217 {
3218 if (t->to_fileio_pread != NULL)
3219 {
3220 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3221 target_errno);
3222
3223 if (targetdebug)
3224 fprintf_unfiltered (gdb_stdlog,
3225 "target_fileio_pread (%d,...,%d,%s) "
3226 "= %d (%d)\n",
3227 fd, len, pulongest (offset),
3228 ret, ret != -1 ? 0 : *target_errno);
3229 return ret;
3230 }
3231 }
3232
3233 *target_errno = FILEIO_ENOSYS;
3234 return -1;
3235 }
3236
3237 /* Close FD on the target. Return 0, or -1 if an error occurs
3238 (and set *TARGET_ERRNO). */
3239 int
3240 target_fileio_close (int fd, int *target_errno)
3241 {
3242 struct target_ops *t;
3243
3244 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3245 {
3246 if (t->to_fileio_close != NULL)
3247 {
3248 int ret = t->to_fileio_close (t, fd, target_errno);
3249
3250 if (targetdebug)
3251 fprintf_unfiltered (gdb_stdlog,
3252 "target_fileio_close (%d) = %d (%d)\n",
3253 fd, ret, ret != -1 ? 0 : *target_errno);
3254 return ret;
3255 }
3256 }
3257
3258 *target_errno = FILEIO_ENOSYS;
3259 return -1;
3260 }
3261
3262 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3263 occurs (and set *TARGET_ERRNO). */
3264 int
3265 target_fileio_unlink (const char *filename, int *target_errno)
3266 {
3267 struct target_ops *t;
3268
3269 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3270 {
3271 if (t->to_fileio_unlink != NULL)
3272 {
3273 int ret = t->to_fileio_unlink (t, filename, target_errno);
3274
3275 if (targetdebug)
3276 fprintf_unfiltered (gdb_stdlog,
3277 "target_fileio_unlink (%s) = %d (%d)\n",
3278 filename, ret, ret != -1 ? 0 : *target_errno);
3279 return ret;
3280 }
3281 }
3282
3283 *target_errno = FILEIO_ENOSYS;
3284 return -1;
3285 }
3286
3287 /* Read value of symbolic link FILENAME on the target. Return a
3288 null-terminated string allocated via xmalloc, or NULL if an error
3289 occurs (and set *TARGET_ERRNO). */
3290 char *
3291 target_fileio_readlink (const char *filename, int *target_errno)
3292 {
3293 struct target_ops *t;
3294
3295 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3296 {
3297 if (t->to_fileio_readlink != NULL)
3298 {
3299 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3300
3301 if (targetdebug)
3302 fprintf_unfiltered (gdb_stdlog,
3303 "target_fileio_readlink (%s) = %s (%d)\n",
3304 filename, ret? ret : "(nil)",
3305 ret? 0 : *target_errno);
3306 return ret;
3307 }
3308 }
3309
3310 *target_errno = FILEIO_ENOSYS;
3311 return NULL;
3312 }
3313
3314 static void
3315 target_fileio_close_cleanup (void *opaque)
3316 {
3317 int fd = *(int *) opaque;
3318 int target_errno;
3319
3320 target_fileio_close (fd, &target_errno);
3321 }
3322
3323 /* Read target file FILENAME. Store the result in *BUF_P and
3324 return the size of the transferred data. PADDING additional bytes are
3325 available in *BUF_P. This is a helper function for
3326 target_fileio_read_alloc; see the declaration of that function for more
3327 information. */
3328
3329 static LONGEST
3330 target_fileio_read_alloc_1 (const char *filename,
3331 gdb_byte **buf_p, int padding)
3332 {
3333 struct cleanup *close_cleanup;
3334 size_t buf_alloc, buf_pos;
3335 gdb_byte *buf;
3336 LONGEST n;
3337 int fd;
3338 int target_errno;
3339
3340 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3341 if (fd == -1)
3342 return -1;
3343
3344 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3345
3346 /* Start by reading up to 4K at a time. The target will throttle
3347 this number down if necessary. */
3348 buf_alloc = 4096;
3349 buf = xmalloc (buf_alloc);
3350 buf_pos = 0;
3351 while (1)
3352 {
3353 n = target_fileio_pread (fd, &buf[buf_pos],
3354 buf_alloc - buf_pos - padding, buf_pos,
3355 &target_errno);
3356 if (n < 0)
3357 {
3358 /* An error occurred. */
3359 do_cleanups (close_cleanup);
3360 xfree (buf);
3361 return -1;
3362 }
3363 else if (n == 0)
3364 {
3365 /* Read all there was. */
3366 do_cleanups (close_cleanup);
3367 if (buf_pos == 0)
3368 xfree (buf);
3369 else
3370 *buf_p = buf;
3371 return buf_pos;
3372 }
3373
3374 buf_pos += n;
3375
3376 /* If the buffer is filling up, expand it. */
3377 if (buf_alloc < buf_pos * 2)
3378 {
3379 buf_alloc *= 2;
3380 buf = xrealloc (buf, buf_alloc);
3381 }
3382
3383 QUIT;
3384 }
3385 }
3386
3387 /* Read target file FILENAME. Store the result in *BUF_P and return
3388 the size of the transferred data. See the declaration in "target.h"
3389 function for more information about the return value. */
3390
3391 LONGEST
3392 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3393 {
3394 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3395 }
3396
3397 /* Read target file FILENAME. The result is NUL-terminated and
3398 returned as a string, allocated using xmalloc. If an error occurs
3399 or the transfer is unsupported, NULL is returned. Empty objects
3400 are returned as allocated but empty strings. A warning is issued
3401 if the result contains any embedded NUL bytes. */
3402
3403 char *
3404 target_fileio_read_stralloc (const char *filename)
3405 {
3406 gdb_byte *buffer;
3407 char *bufstr;
3408 LONGEST i, transferred;
3409
3410 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3411 bufstr = (char *) buffer;
3412
3413 if (transferred < 0)
3414 return NULL;
3415
3416 if (transferred == 0)
3417 return xstrdup ("");
3418
3419 bufstr[transferred] = 0;
3420
3421 /* Check for embedded NUL bytes; but allow trailing NULs. */
3422 for (i = strlen (bufstr); i < transferred; i++)
3423 if (bufstr[i] != 0)
3424 {
3425 warning (_("target file %s "
3426 "contained unexpected null characters"),
3427 filename);
3428 break;
3429 }
3430
3431 return bufstr;
3432 }
3433
3434
3435 static int
3436 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3437 CORE_ADDR addr, int len)
3438 {
3439 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3440 }
3441
3442 static int
3443 default_watchpoint_addr_within_range (struct target_ops *target,
3444 CORE_ADDR addr,
3445 CORE_ADDR start, int length)
3446 {
3447 return addr >= start && addr < start + length;
3448 }
3449
3450 static struct gdbarch *
3451 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3452 {
3453 return target_gdbarch ();
3454 }
3455
3456 static int
3457 return_zero (void)
3458 {
3459 return 0;
3460 }
3461
3462 /*
3463 * Find the next target down the stack from the specified target.
3464 */
3465
3466 struct target_ops *
3467 find_target_beneath (struct target_ops *t)
3468 {
3469 return t->beneath;
3470 }
3471
3472 /* See target.h. */
3473
3474 struct target_ops *
3475 find_target_at (enum strata stratum)
3476 {
3477 struct target_ops *t;
3478
3479 for (t = current_target.beneath; t != NULL; t = t->beneath)
3480 if (t->to_stratum == stratum)
3481 return t;
3482
3483 return NULL;
3484 }
3485
3486 \f
3487 /* The inferior process has died. Long live the inferior! */
3488
3489 void
3490 generic_mourn_inferior (void)
3491 {
3492 ptid_t ptid;
3493
3494 ptid = inferior_ptid;
3495 inferior_ptid = null_ptid;
3496
3497 /* Mark breakpoints uninserted in case something tries to delete a
3498 breakpoint while we delete the inferior's threads (which would
3499 fail, since the inferior is long gone). */
3500 mark_breakpoints_out ();
3501
3502 if (!ptid_equal (ptid, null_ptid))
3503 {
3504 int pid = ptid_get_pid (ptid);
3505 exit_inferior (pid);
3506 }
3507
3508 /* Note this wipes step-resume breakpoints, so needs to be done
3509 after exit_inferior, which ends up referencing the step-resume
3510 breakpoints through clear_thread_inferior_resources. */
3511 breakpoint_init_inferior (inf_exited);
3512
3513 registers_changed ();
3514
3515 reopen_exec_file ();
3516 reinit_frame_cache ();
3517
3518 if (deprecated_detach_hook)
3519 deprecated_detach_hook ();
3520 }
3521 \f
3522 /* Convert a normal process ID to a string. Returns the string in a
3523 static buffer. */
3524
3525 char *
3526 normal_pid_to_str (ptid_t ptid)
3527 {
3528 static char buf[32];
3529
3530 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3531 return buf;
3532 }
3533
3534 static char *
3535 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3536 {
3537 return normal_pid_to_str (ptid);
3538 }
3539
3540 /* Error-catcher for target_find_memory_regions. */
3541 static int
3542 dummy_find_memory_regions (struct target_ops *self,
3543 find_memory_region_ftype ignore1, void *ignore2)
3544 {
3545 error (_("Command not implemented for this target."));
3546 return 0;
3547 }
3548
3549 /* Error-catcher for target_make_corefile_notes. */
3550 static char *
3551 dummy_make_corefile_notes (struct target_ops *self,
3552 bfd *ignore1, int *ignore2)
3553 {
3554 error (_("Command not implemented for this target."));
3555 return NULL;
3556 }
3557
3558 /* Set up the handful of non-empty slots needed by the dummy target
3559 vector. */
3560
3561 static void
3562 init_dummy_target (void)
3563 {
3564 dummy_target.to_shortname = "None";
3565 dummy_target.to_longname = "None";
3566 dummy_target.to_doc = "";
3567 dummy_target.to_create_inferior = find_default_create_inferior;
3568 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3569 dummy_target.to_supports_disable_randomization
3570 = find_default_supports_disable_randomization;
3571 dummy_target.to_pid_to_str = dummy_pid_to_str;
3572 dummy_target.to_stratum = dummy_stratum;
3573 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3574 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3575 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3576 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3577 dummy_target.to_has_execution
3578 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3579 dummy_target.to_magic = OPS_MAGIC;
3580
3581 install_dummy_methods (&dummy_target);
3582 }
3583 \f
3584 static void
3585 debug_to_open (char *args, int from_tty)
3586 {
3587 debug_target.to_open (args, from_tty);
3588
3589 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3590 }
3591
3592 void
3593 target_close (struct target_ops *targ)
3594 {
3595 gdb_assert (!target_is_pushed (targ));
3596
3597 if (targ->to_xclose != NULL)
3598 targ->to_xclose (targ);
3599 else if (targ->to_close != NULL)
3600 targ->to_close (targ);
3601
3602 if (targetdebug)
3603 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3604 }
3605
3606 void
3607 target_attach (char *args, int from_tty)
3608 {
3609 current_target.to_attach (&current_target, args, from_tty);
3610 if (targetdebug)
3611 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3612 args, from_tty);
3613 }
3614
3615 int
3616 target_thread_alive (ptid_t ptid)
3617 {
3618 struct target_ops *t;
3619
3620 for (t = current_target.beneath; t != NULL; t = t->beneath)
3621 {
3622 if (t->to_thread_alive != NULL)
3623 {
3624 int retval;
3625
3626 retval = t->to_thread_alive (t, ptid);
3627 if (targetdebug)
3628 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3629 ptid_get_pid (ptid), retval);
3630
3631 return retval;
3632 }
3633 }
3634
3635 return 0;
3636 }
3637
3638 void
3639 target_find_new_threads (void)
3640 {
3641 struct target_ops *t;
3642
3643 for (t = current_target.beneath; t != NULL; t = t->beneath)
3644 {
3645 if (t->to_find_new_threads != NULL)
3646 {
3647 t->to_find_new_threads (t);
3648 if (targetdebug)
3649 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3650
3651 return;
3652 }
3653 }
3654 }
3655
3656 void
3657 target_stop (ptid_t ptid)
3658 {
3659 if (!may_stop)
3660 {
3661 warning (_("May not interrupt or stop the target, ignoring attempt"));
3662 return;
3663 }
3664
3665 (*current_target.to_stop) (&current_target, ptid);
3666 }
3667
3668 static void
3669 debug_to_post_attach (struct target_ops *self, int pid)
3670 {
3671 debug_target.to_post_attach (&debug_target, pid);
3672
3673 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3674 }
3675
3676 /* Concatenate ELEM to LIST, a comma separate list, and return the
3677 result. The LIST incoming argument is released. */
3678
3679 static char *
3680 str_comma_list_concat_elem (char *list, const char *elem)
3681 {
3682 if (list == NULL)
3683 return xstrdup (elem);
3684 else
3685 return reconcat (list, list, ", ", elem, (char *) NULL);
3686 }
3687
3688 /* Helper for target_options_to_string. If OPT is present in
3689 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3690 Returns the new resulting string. OPT is removed from
3691 TARGET_OPTIONS. */
3692
3693 static char *
3694 do_option (int *target_options, char *ret,
3695 int opt, char *opt_str)
3696 {
3697 if ((*target_options & opt) != 0)
3698 {
3699 ret = str_comma_list_concat_elem (ret, opt_str);
3700 *target_options &= ~opt;
3701 }
3702
3703 return ret;
3704 }
3705
3706 char *
3707 target_options_to_string (int target_options)
3708 {
3709 char *ret = NULL;
3710
3711 #define DO_TARG_OPTION(OPT) \
3712 ret = do_option (&target_options, ret, OPT, #OPT)
3713
3714 DO_TARG_OPTION (TARGET_WNOHANG);
3715
3716 if (target_options != 0)
3717 ret = str_comma_list_concat_elem (ret, "unknown???");
3718
3719 if (ret == NULL)
3720 ret = xstrdup ("");
3721 return ret;
3722 }
3723
3724 static void
3725 debug_print_register (const char * func,
3726 struct regcache *regcache, int regno)
3727 {
3728 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3729
3730 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3731 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3732 && gdbarch_register_name (gdbarch, regno) != NULL
3733 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3734 fprintf_unfiltered (gdb_stdlog, "(%s)",
3735 gdbarch_register_name (gdbarch, regno));
3736 else
3737 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3738 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3739 {
3740 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3741 int i, size = register_size (gdbarch, regno);
3742 gdb_byte buf[MAX_REGISTER_SIZE];
3743
3744 regcache_raw_collect (regcache, regno, buf);
3745 fprintf_unfiltered (gdb_stdlog, " = ");
3746 for (i = 0; i < size; i++)
3747 {
3748 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3749 }
3750 if (size <= sizeof (LONGEST))
3751 {
3752 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3753
3754 fprintf_unfiltered (gdb_stdlog, " %s %s",
3755 core_addr_to_string_nz (val), plongest (val));
3756 }
3757 }
3758 fprintf_unfiltered (gdb_stdlog, "\n");
3759 }
3760
3761 void
3762 target_fetch_registers (struct regcache *regcache, int regno)
3763 {
3764 struct target_ops *t;
3765
3766 for (t = current_target.beneath; t != NULL; t = t->beneath)
3767 {
3768 if (t->to_fetch_registers != NULL)
3769 {
3770 t->to_fetch_registers (t, regcache, regno);
3771 if (targetdebug)
3772 debug_print_register ("target_fetch_registers", regcache, regno);
3773 return;
3774 }
3775 }
3776 }
3777
3778 void
3779 target_store_registers (struct regcache *regcache, int regno)
3780 {
3781 struct target_ops *t;
3782
3783 if (!may_write_registers)
3784 error (_("Writing to registers is not allowed (regno %d)"), regno);
3785
3786 current_target.to_store_registers (&current_target, regcache, regno);
3787 if (targetdebug)
3788 {
3789 debug_print_register ("target_store_registers", regcache, regno);
3790 }
3791 }
3792
3793 int
3794 target_core_of_thread (ptid_t ptid)
3795 {
3796 struct target_ops *t;
3797
3798 for (t = current_target.beneath; t != NULL; t = t->beneath)
3799 {
3800 if (t->to_core_of_thread != NULL)
3801 {
3802 int retval = t->to_core_of_thread (t, ptid);
3803
3804 if (targetdebug)
3805 fprintf_unfiltered (gdb_stdlog,
3806 "target_core_of_thread (%d) = %d\n",
3807 ptid_get_pid (ptid), retval);
3808 return retval;
3809 }
3810 }
3811
3812 return -1;
3813 }
3814
3815 int
3816 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3817 {
3818 struct target_ops *t;
3819
3820 for (t = current_target.beneath; t != NULL; t = t->beneath)
3821 {
3822 if (t->to_verify_memory != NULL)
3823 {
3824 int retval = t->to_verify_memory (t, data, memaddr, size);
3825
3826 if (targetdebug)
3827 fprintf_unfiltered (gdb_stdlog,
3828 "target_verify_memory (%s, %s) = %d\n",
3829 paddress (target_gdbarch (), memaddr),
3830 pulongest (size),
3831 retval);
3832 return retval;
3833 }
3834 }
3835
3836 tcomplain ();
3837 }
3838
3839 /* The documentation for this function is in its prototype declaration in
3840 target.h. */
3841
3842 int
3843 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3844 {
3845 struct target_ops *t;
3846
3847 for (t = current_target.beneath; t != NULL; t = t->beneath)
3848 if (t->to_insert_mask_watchpoint != NULL)
3849 {
3850 int ret;
3851
3852 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
3853
3854 if (targetdebug)
3855 fprintf_unfiltered (gdb_stdlog, "\
3856 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3857 core_addr_to_string (addr),
3858 core_addr_to_string (mask), rw, ret);
3859
3860 return ret;
3861 }
3862
3863 return 1;
3864 }
3865
3866 /* The documentation for this function is in its prototype declaration in
3867 target.h. */
3868
3869 int
3870 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3871 {
3872 struct target_ops *t;
3873
3874 for (t = current_target.beneath; t != NULL; t = t->beneath)
3875 if (t->to_remove_mask_watchpoint != NULL)
3876 {
3877 int ret;
3878
3879 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
3880
3881 if (targetdebug)
3882 fprintf_unfiltered (gdb_stdlog, "\
3883 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3884 core_addr_to_string (addr),
3885 core_addr_to_string (mask), rw, ret);
3886
3887 return ret;
3888 }
3889
3890 return 1;
3891 }
3892
3893 /* The documentation for this function is in its prototype declaration
3894 in target.h. */
3895
3896 int
3897 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3898 {
3899 struct target_ops *t;
3900
3901 for (t = current_target.beneath; t != NULL; t = t->beneath)
3902 if (t->to_masked_watch_num_registers != NULL)
3903 return t->to_masked_watch_num_registers (t, addr, mask);
3904
3905 return -1;
3906 }
3907
3908 /* The documentation for this function is in its prototype declaration
3909 in target.h. */
3910
3911 int
3912 target_ranged_break_num_registers (void)
3913 {
3914 struct target_ops *t;
3915
3916 for (t = current_target.beneath; t != NULL; t = t->beneath)
3917 if (t->to_ranged_break_num_registers != NULL)
3918 return t->to_ranged_break_num_registers (t);
3919
3920 return -1;
3921 }
3922
3923 /* See target.h. */
3924
3925 struct btrace_target_info *
3926 target_enable_btrace (ptid_t ptid)
3927 {
3928 struct target_ops *t;
3929
3930 for (t = current_target.beneath; t != NULL; t = t->beneath)
3931 if (t->to_enable_btrace != NULL)
3932 return t->to_enable_btrace (t, ptid);
3933
3934 tcomplain ();
3935 return NULL;
3936 }
3937
3938 /* See target.h. */
3939
3940 void
3941 target_disable_btrace (struct btrace_target_info *btinfo)
3942 {
3943 struct target_ops *t;
3944
3945 for (t = current_target.beneath; t != NULL; t = t->beneath)
3946 if (t->to_disable_btrace != NULL)
3947 {
3948 t->to_disable_btrace (t, btinfo);
3949 return;
3950 }
3951
3952 tcomplain ();
3953 }
3954
3955 /* See target.h. */
3956
3957 void
3958 target_teardown_btrace (struct btrace_target_info *btinfo)
3959 {
3960 struct target_ops *t;
3961
3962 for (t = current_target.beneath; t != NULL; t = t->beneath)
3963 if (t->to_teardown_btrace != NULL)
3964 {
3965 t->to_teardown_btrace (t, btinfo);
3966 return;
3967 }
3968
3969 tcomplain ();
3970 }
3971
3972 /* See target.h. */
3973
3974 enum btrace_error
3975 target_read_btrace (VEC (btrace_block_s) **btrace,
3976 struct btrace_target_info *btinfo,
3977 enum btrace_read_type type)
3978 {
3979 struct target_ops *t;
3980
3981 for (t = current_target.beneath; t != NULL; t = t->beneath)
3982 if (t->to_read_btrace != NULL)
3983 return t->to_read_btrace (t, btrace, btinfo, type);
3984
3985 tcomplain ();
3986 return BTRACE_ERR_NOT_SUPPORTED;
3987 }
3988
3989 /* See target.h. */
3990
3991 void
3992 target_stop_recording (void)
3993 {
3994 struct target_ops *t;
3995
3996 for (t = current_target.beneath; t != NULL; t = t->beneath)
3997 if (t->to_stop_recording != NULL)
3998 {
3999 t->to_stop_recording (t);
4000 return;
4001 }
4002
4003 /* This is optional. */
4004 }
4005
4006 /* See target.h. */
4007
4008 void
4009 target_info_record (void)
4010 {
4011 struct target_ops *t;
4012
4013 for (t = current_target.beneath; t != NULL; t = t->beneath)
4014 if (t->to_info_record != NULL)
4015 {
4016 t->to_info_record (t);
4017 return;
4018 }
4019
4020 tcomplain ();
4021 }
4022
4023 /* See target.h. */
4024
4025 void
4026 target_save_record (const char *filename)
4027 {
4028 struct target_ops *t;
4029
4030 for (t = current_target.beneath; t != NULL; t = t->beneath)
4031 if (t->to_save_record != NULL)
4032 {
4033 t->to_save_record (t, filename);
4034 return;
4035 }
4036
4037 tcomplain ();
4038 }
4039
4040 /* See target.h. */
4041
4042 int
4043 target_supports_delete_record (void)
4044 {
4045 struct target_ops *t;
4046
4047 for (t = current_target.beneath; t != NULL; t = t->beneath)
4048 if (t->to_delete_record != NULL)
4049 return 1;
4050
4051 return 0;
4052 }
4053
4054 /* See target.h. */
4055
4056 void
4057 target_delete_record (void)
4058 {
4059 struct target_ops *t;
4060
4061 for (t = current_target.beneath; t != NULL; t = t->beneath)
4062 if (t->to_delete_record != NULL)
4063 {
4064 t->to_delete_record (t);
4065 return;
4066 }
4067
4068 tcomplain ();
4069 }
4070
4071 /* See target.h. */
4072
4073 int
4074 target_record_is_replaying (void)
4075 {
4076 struct target_ops *t;
4077
4078 for (t = current_target.beneath; t != NULL; t = t->beneath)
4079 if (t->to_record_is_replaying != NULL)
4080 return t->to_record_is_replaying (t);
4081
4082 return 0;
4083 }
4084
4085 /* See target.h. */
4086
4087 void
4088 target_goto_record_begin (void)
4089 {
4090 struct target_ops *t;
4091
4092 for (t = current_target.beneath; t != NULL; t = t->beneath)
4093 if (t->to_goto_record_begin != NULL)
4094 {
4095 t->to_goto_record_begin (t);
4096 return;
4097 }
4098
4099 tcomplain ();
4100 }
4101
4102 /* See target.h. */
4103
4104 void
4105 target_goto_record_end (void)
4106 {
4107 struct target_ops *t;
4108
4109 for (t = current_target.beneath; t != NULL; t = t->beneath)
4110 if (t->to_goto_record_end != NULL)
4111 {
4112 t->to_goto_record_end (t);
4113 return;
4114 }
4115
4116 tcomplain ();
4117 }
4118
4119 /* See target.h. */
4120
4121 void
4122 target_goto_record (ULONGEST insn)
4123 {
4124 struct target_ops *t;
4125
4126 for (t = current_target.beneath; t != NULL; t = t->beneath)
4127 if (t->to_goto_record != NULL)
4128 {
4129 t->to_goto_record (t, insn);
4130 return;
4131 }
4132
4133 tcomplain ();
4134 }
4135
4136 /* See target.h. */
4137
4138 void
4139 target_insn_history (int size, int flags)
4140 {
4141 struct target_ops *t;
4142
4143 for (t = current_target.beneath; t != NULL; t = t->beneath)
4144 if (t->to_insn_history != NULL)
4145 {
4146 t->to_insn_history (t, size, flags);
4147 return;
4148 }
4149
4150 tcomplain ();
4151 }
4152
4153 /* See target.h. */
4154
4155 void
4156 target_insn_history_from (ULONGEST from, int size, int flags)
4157 {
4158 struct target_ops *t;
4159
4160 for (t = current_target.beneath; t != NULL; t = t->beneath)
4161 if (t->to_insn_history_from != NULL)
4162 {
4163 t->to_insn_history_from (t, from, size, flags);
4164 return;
4165 }
4166
4167 tcomplain ();
4168 }
4169
4170 /* See target.h. */
4171
4172 void
4173 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4174 {
4175 struct target_ops *t;
4176
4177 for (t = current_target.beneath; t != NULL; t = t->beneath)
4178 if (t->to_insn_history_range != NULL)
4179 {
4180 t->to_insn_history_range (t, begin, end, flags);
4181 return;
4182 }
4183
4184 tcomplain ();
4185 }
4186
4187 /* See target.h. */
4188
4189 void
4190 target_call_history (int size, int flags)
4191 {
4192 struct target_ops *t;
4193
4194 for (t = current_target.beneath; t != NULL; t = t->beneath)
4195 if (t->to_call_history != NULL)
4196 {
4197 t->to_call_history (t, size, flags);
4198 return;
4199 }
4200
4201 tcomplain ();
4202 }
4203
4204 /* See target.h. */
4205
4206 void
4207 target_call_history_from (ULONGEST begin, int size, int flags)
4208 {
4209 struct target_ops *t;
4210
4211 for (t = current_target.beneath; t != NULL; t = t->beneath)
4212 if (t->to_call_history_from != NULL)
4213 {
4214 t->to_call_history_from (t, begin, size, flags);
4215 return;
4216 }
4217
4218 tcomplain ();
4219 }
4220
4221 /* See target.h. */
4222
4223 void
4224 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4225 {
4226 struct target_ops *t;
4227
4228 for (t = current_target.beneath; t != NULL; t = t->beneath)
4229 if (t->to_call_history_range != NULL)
4230 {
4231 t->to_call_history_range (t, begin, end, flags);
4232 return;
4233 }
4234
4235 tcomplain ();
4236 }
4237
4238 static void
4239 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4240 {
4241 debug_target.to_prepare_to_store (&debug_target, regcache);
4242
4243 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4244 }
4245
4246 /* See target.h. */
4247
4248 const struct frame_unwind *
4249 target_get_unwinder (void)
4250 {
4251 struct target_ops *t;
4252
4253 for (t = current_target.beneath; t != NULL; t = t->beneath)
4254 if (t->to_get_unwinder != NULL)
4255 return t->to_get_unwinder;
4256
4257 return NULL;
4258 }
4259
4260 /* See target.h. */
4261
4262 const struct frame_unwind *
4263 target_get_tailcall_unwinder (void)
4264 {
4265 struct target_ops *t;
4266
4267 for (t = current_target.beneath; t != NULL; t = t->beneath)
4268 if (t->to_get_tailcall_unwinder != NULL)
4269 return t->to_get_tailcall_unwinder;
4270
4271 return NULL;
4272 }
4273
4274 /* See target.h. */
4275
4276 CORE_ADDR
4277 forward_target_decr_pc_after_break (struct target_ops *ops,
4278 struct gdbarch *gdbarch)
4279 {
4280 for (; ops != NULL; ops = ops->beneath)
4281 if (ops->to_decr_pc_after_break != NULL)
4282 return ops->to_decr_pc_after_break (ops, gdbarch);
4283
4284 return gdbarch_decr_pc_after_break (gdbarch);
4285 }
4286
4287 /* See target.h. */
4288
4289 CORE_ADDR
4290 target_decr_pc_after_break (struct gdbarch *gdbarch)
4291 {
4292 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4293 }
4294
4295 static int
4296 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4297 int write, struct mem_attrib *attrib,
4298 struct target_ops *target)
4299 {
4300 int retval;
4301
4302 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4303 attrib, target);
4304
4305 fprintf_unfiltered (gdb_stdlog,
4306 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4307 paddress (target_gdbarch (), memaddr), len,
4308 write ? "write" : "read", retval);
4309
4310 if (retval > 0)
4311 {
4312 int i;
4313
4314 fputs_unfiltered (", bytes =", gdb_stdlog);
4315 for (i = 0; i < retval; i++)
4316 {
4317 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4318 {
4319 if (targetdebug < 2 && i > 0)
4320 {
4321 fprintf_unfiltered (gdb_stdlog, " ...");
4322 break;
4323 }
4324 fprintf_unfiltered (gdb_stdlog, "\n");
4325 }
4326
4327 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4328 }
4329 }
4330
4331 fputc_unfiltered ('\n', gdb_stdlog);
4332
4333 return retval;
4334 }
4335
4336 static void
4337 debug_to_files_info (struct target_ops *target)
4338 {
4339 debug_target.to_files_info (target);
4340
4341 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4342 }
4343
4344 static int
4345 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4346 struct bp_target_info *bp_tgt)
4347 {
4348 int retval;
4349
4350 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4351
4352 fprintf_unfiltered (gdb_stdlog,
4353 "target_insert_breakpoint (%s, xxx) = %ld\n",
4354 core_addr_to_string (bp_tgt->placed_address),
4355 (unsigned long) retval);
4356 return retval;
4357 }
4358
4359 static int
4360 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4361 struct bp_target_info *bp_tgt)
4362 {
4363 int retval;
4364
4365 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4366
4367 fprintf_unfiltered (gdb_stdlog,
4368 "target_remove_breakpoint (%s, xxx) = %ld\n",
4369 core_addr_to_string (bp_tgt->placed_address),
4370 (unsigned long) retval);
4371 return retval;
4372 }
4373
4374 static int
4375 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4376 int type, int cnt, int from_tty)
4377 {
4378 int retval;
4379
4380 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4381 type, cnt, from_tty);
4382
4383 fprintf_unfiltered (gdb_stdlog,
4384 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4385 (unsigned long) type,
4386 (unsigned long) cnt,
4387 (unsigned long) from_tty,
4388 (unsigned long) retval);
4389 return retval;
4390 }
4391
4392 static int
4393 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4394 CORE_ADDR addr, int len)
4395 {
4396 CORE_ADDR retval;
4397
4398 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4399 addr, len);
4400
4401 fprintf_unfiltered (gdb_stdlog,
4402 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4403 core_addr_to_string (addr), (unsigned long) len,
4404 core_addr_to_string (retval));
4405 return retval;
4406 }
4407
4408 static int
4409 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4410 CORE_ADDR addr, int len, int rw,
4411 struct expression *cond)
4412 {
4413 int retval;
4414
4415 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4416 addr, len,
4417 rw, cond);
4418
4419 fprintf_unfiltered (gdb_stdlog,
4420 "target_can_accel_watchpoint_condition "
4421 "(%s, %d, %d, %s) = %ld\n",
4422 core_addr_to_string (addr), len, rw,
4423 host_address_to_string (cond), (unsigned long) retval);
4424 return retval;
4425 }
4426
4427 static int
4428 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4429 {
4430 int retval;
4431
4432 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4433
4434 fprintf_unfiltered (gdb_stdlog,
4435 "target_stopped_by_watchpoint () = %ld\n",
4436 (unsigned long) retval);
4437 return retval;
4438 }
4439
4440 static int
4441 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4442 {
4443 int retval;
4444
4445 retval = debug_target.to_stopped_data_address (target, addr);
4446
4447 fprintf_unfiltered (gdb_stdlog,
4448 "target_stopped_data_address ([%s]) = %ld\n",
4449 core_addr_to_string (*addr),
4450 (unsigned long)retval);
4451 return retval;
4452 }
4453
4454 static int
4455 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4456 CORE_ADDR addr,
4457 CORE_ADDR start, int length)
4458 {
4459 int retval;
4460
4461 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4462 start, length);
4463
4464 fprintf_filtered (gdb_stdlog,
4465 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4466 core_addr_to_string (addr), core_addr_to_string (start),
4467 length, retval);
4468 return retval;
4469 }
4470
4471 static int
4472 debug_to_insert_hw_breakpoint (struct target_ops *self,
4473 struct gdbarch *gdbarch,
4474 struct bp_target_info *bp_tgt)
4475 {
4476 int retval;
4477
4478 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4479 gdbarch, bp_tgt);
4480
4481 fprintf_unfiltered (gdb_stdlog,
4482 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4483 core_addr_to_string (bp_tgt->placed_address),
4484 (unsigned long) retval);
4485 return retval;
4486 }
4487
4488 static int
4489 debug_to_remove_hw_breakpoint (struct target_ops *self,
4490 struct gdbarch *gdbarch,
4491 struct bp_target_info *bp_tgt)
4492 {
4493 int retval;
4494
4495 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4496 gdbarch, bp_tgt);
4497
4498 fprintf_unfiltered (gdb_stdlog,
4499 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4500 core_addr_to_string (bp_tgt->placed_address),
4501 (unsigned long) retval);
4502 return retval;
4503 }
4504
4505 static int
4506 debug_to_insert_watchpoint (struct target_ops *self,
4507 CORE_ADDR addr, int len, int type,
4508 struct expression *cond)
4509 {
4510 int retval;
4511
4512 retval = debug_target.to_insert_watchpoint (&debug_target,
4513 addr, len, type, cond);
4514
4515 fprintf_unfiltered (gdb_stdlog,
4516 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4517 core_addr_to_string (addr), len, type,
4518 host_address_to_string (cond), (unsigned long) retval);
4519 return retval;
4520 }
4521
4522 static int
4523 debug_to_remove_watchpoint (struct target_ops *self,
4524 CORE_ADDR addr, int len, int type,
4525 struct expression *cond)
4526 {
4527 int retval;
4528
4529 retval = debug_target.to_remove_watchpoint (&debug_target,
4530 addr, len, type, cond);
4531
4532 fprintf_unfiltered (gdb_stdlog,
4533 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4534 core_addr_to_string (addr), len, type,
4535 host_address_to_string (cond), (unsigned long) retval);
4536 return retval;
4537 }
4538
4539 static void
4540 debug_to_terminal_init (struct target_ops *self)
4541 {
4542 debug_target.to_terminal_init (&debug_target);
4543
4544 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4545 }
4546
4547 static void
4548 debug_to_terminal_inferior (struct target_ops *self)
4549 {
4550 debug_target.to_terminal_inferior (&debug_target);
4551
4552 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4553 }
4554
4555 static void
4556 debug_to_terminal_ours_for_output (struct target_ops *self)
4557 {
4558 debug_target.to_terminal_ours_for_output (&debug_target);
4559
4560 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4561 }
4562
4563 static void
4564 debug_to_terminal_ours (struct target_ops *self)
4565 {
4566 debug_target.to_terminal_ours (&debug_target);
4567
4568 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4569 }
4570
4571 static void
4572 debug_to_terminal_save_ours (struct target_ops *self)
4573 {
4574 debug_target.to_terminal_save_ours (&debug_target);
4575
4576 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4577 }
4578
4579 static void
4580 debug_to_terminal_info (struct target_ops *self,
4581 const char *arg, int from_tty)
4582 {
4583 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4584
4585 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4586 from_tty);
4587 }
4588
4589 static void
4590 debug_to_load (struct target_ops *self, char *args, int from_tty)
4591 {
4592 debug_target.to_load (&debug_target, args, from_tty);
4593
4594 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4595 }
4596
4597 static void
4598 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4599 {
4600 debug_target.to_post_startup_inferior (&debug_target, ptid);
4601
4602 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4603 ptid_get_pid (ptid));
4604 }
4605
4606 static int
4607 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4608 {
4609 int retval;
4610
4611 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4612
4613 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4614 pid, retval);
4615
4616 return retval;
4617 }
4618
4619 static int
4620 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4621 {
4622 int retval;
4623
4624 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4625
4626 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4627 pid, retval);
4628
4629 return retval;
4630 }
4631
4632 static int
4633 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4634 {
4635 int retval;
4636
4637 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4638
4639 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4640 pid, retval);
4641
4642 return retval;
4643 }
4644
4645 static int
4646 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4647 {
4648 int retval;
4649
4650 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4651
4652 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4653 pid, retval);
4654
4655 return retval;
4656 }
4657
4658 static int
4659 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4660 {
4661 int retval;
4662
4663 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4664
4665 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4666 pid, retval);
4667
4668 return retval;
4669 }
4670
4671 static int
4672 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4673 {
4674 int retval;
4675
4676 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4677
4678 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4679 pid, retval);
4680
4681 return retval;
4682 }
4683
4684 static int
4685 debug_to_has_exited (struct target_ops *self,
4686 int pid, int wait_status, int *exit_status)
4687 {
4688 int has_exited;
4689
4690 has_exited = debug_target.to_has_exited (&debug_target,
4691 pid, wait_status, exit_status);
4692
4693 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4694 pid, wait_status, *exit_status, has_exited);
4695
4696 return has_exited;
4697 }
4698
4699 static int
4700 debug_to_can_run (struct target_ops *self)
4701 {
4702 int retval;
4703
4704 retval = debug_target.to_can_run (&debug_target);
4705
4706 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4707
4708 return retval;
4709 }
4710
4711 static struct gdbarch *
4712 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4713 {
4714 struct gdbarch *retval;
4715
4716 retval = debug_target.to_thread_architecture (ops, ptid);
4717
4718 fprintf_unfiltered (gdb_stdlog,
4719 "target_thread_architecture (%s) = %s [%s]\n",
4720 target_pid_to_str (ptid),
4721 host_address_to_string (retval),
4722 gdbarch_bfd_arch_info (retval)->printable_name);
4723 return retval;
4724 }
4725
4726 static void
4727 debug_to_stop (struct target_ops *self, ptid_t ptid)
4728 {
4729 debug_target.to_stop (&debug_target, ptid);
4730
4731 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4732 target_pid_to_str (ptid));
4733 }
4734
4735 static void
4736 debug_to_rcmd (struct target_ops *self, char *command,
4737 struct ui_file *outbuf)
4738 {
4739 debug_target.to_rcmd (&debug_target, command, outbuf);
4740 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4741 }
4742
4743 static char *
4744 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4745 {
4746 char *exec_file;
4747
4748 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4749
4750 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4751 pid, exec_file);
4752
4753 return exec_file;
4754 }
4755
4756 static void
4757 setup_target_debug (void)
4758 {
4759 memcpy (&debug_target, &current_target, sizeof debug_target);
4760
4761 current_target.to_open = debug_to_open;
4762 current_target.to_post_attach = debug_to_post_attach;
4763 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4764 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4765 current_target.to_files_info = debug_to_files_info;
4766 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4767 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4768 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4769 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4770 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4771 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4772 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4773 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4774 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4775 current_target.to_watchpoint_addr_within_range
4776 = debug_to_watchpoint_addr_within_range;
4777 current_target.to_region_ok_for_hw_watchpoint
4778 = debug_to_region_ok_for_hw_watchpoint;
4779 current_target.to_can_accel_watchpoint_condition
4780 = debug_to_can_accel_watchpoint_condition;
4781 current_target.to_terminal_init = debug_to_terminal_init;
4782 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4783 current_target.to_terminal_ours_for_output
4784 = debug_to_terminal_ours_for_output;
4785 current_target.to_terminal_ours = debug_to_terminal_ours;
4786 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4787 current_target.to_terminal_info = debug_to_terminal_info;
4788 current_target.to_load = debug_to_load;
4789 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4790 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4791 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4792 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4793 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4794 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4795 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4796 current_target.to_has_exited = debug_to_has_exited;
4797 current_target.to_can_run = debug_to_can_run;
4798 current_target.to_stop = debug_to_stop;
4799 current_target.to_rcmd = debug_to_rcmd;
4800 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4801 current_target.to_thread_architecture = debug_to_thread_architecture;
4802 }
4803 \f
4804
4805 static char targ_desc[] =
4806 "Names of targets and files being debugged.\nShows the entire \
4807 stack of targets currently in use (including the exec-file,\n\
4808 core-file, and process, if any), as well as the symbol file name.";
4809
4810 static void
4811 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
4812 {
4813 error (_("\"monitor\" command not supported by this target."));
4814 }
4815
4816 static void
4817 do_monitor_command (char *cmd,
4818 int from_tty)
4819 {
4820 target_rcmd (cmd, gdb_stdtarg);
4821 }
4822
4823 /* Print the name of each layers of our target stack. */
4824
4825 static void
4826 maintenance_print_target_stack (char *cmd, int from_tty)
4827 {
4828 struct target_ops *t;
4829
4830 printf_filtered (_("The current target stack is:\n"));
4831
4832 for (t = target_stack; t != NULL; t = t->beneath)
4833 {
4834 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4835 }
4836 }
4837
4838 /* Controls if async mode is permitted. */
4839 int target_async_permitted = 0;
4840
4841 /* The set command writes to this variable. If the inferior is
4842 executing, target_async_permitted is *not* updated. */
4843 static int target_async_permitted_1 = 0;
4844
4845 static void
4846 set_target_async_command (char *args, int from_tty,
4847 struct cmd_list_element *c)
4848 {
4849 if (have_live_inferiors ())
4850 {
4851 target_async_permitted_1 = target_async_permitted;
4852 error (_("Cannot change this setting while the inferior is running."));
4853 }
4854
4855 target_async_permitted = target_async_permitted_1;
4856 }
4857
4858 static void
4859 show_target_async_command (struct ui_file *file, int from_tty,
4860 struct cmd_list_element *c,
4861 const char *value)
4862 {
4863 fprintf_filtered (file,
4864 _("Controlling the inferior in "
4865 "asynchronous mode is %s.\n"), value);
4866 }
4867
4868 /* Temporary copies of permission settings. */
4869
4870 static int may_write_registers_1 = 1;
4871 static int may_write_memory_1 = 1;
4872 static int may_insert_breakpoints_1 = 1;
4873 static int may_insert_tracepoints_1 = 1;
4874 static int may_insert_fast_tracepoints_1 = 1;
4875 static int may_stop_1 = 1;
4876
4877 /* Make the user-set values match the real values again. */
4878
4879 void
4880 update_target_permissions (void)
4881 {
4882 may_write_registers_1 = may_write_registers;
4883 may_write_memory_1 = may_write_memory;
4884 may_insert_breakpoints_1 = may_insert_breakpoints;
4885 may_insert_tracepoints_1 = may_insert_tracepoints;
4886 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4887 may_stop_1 = may_stop;
4888 }
4889
4890 /* The one function handles (most of) the permission flags in the same
4891 way. */
4892
4893 static void
4894 set_target_permissions (char *args, int from_tty,
4895 struct cmd_list_element *c)
4896 {
4897 if (target_has_execution)
4898 {
4899 update_target_permissions ();
4900 error (_("Cannot change this setting while the inferior is running."));
4901 }
4902
4903 /* Make the real values match the user-changed values. */
4904 may_write_registers = may_write_registers_1;
4905 may_insert_breakpoints = may_insert_breakpoints_1;
4906 may_insert_tracepoints = may_insert_tracepoints_1;
4907 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4908 may_stop = may_stop_1;
4909 update_observer_mode ();
4910 }
4911
4912 /* Set memory write permission independently of observer mode. */
4913
4914 static void
4915 set_write_memory_permission (char *args, int from_tty,
4916 struct cmd_list_element *c)
4917 {
4918 /* Make the real values match the user-changed values. */
4919 may_write_memory = may_write_memory_1;
4920 update_observer_mode ();
4921 }
4922
4923
4924 void
4925 initialize_targets (void)
4926 {
4927 init_dummy_target ();
4928 push_target (&dummy_target);
4929
4930 add_info ("target", target_info, targ_desc);
4931 add_info ("files", target_info, targ_desc);
4932
4933 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4934 Set target debugging."), _("\
4935 Show target debugging."), _("\
4936 When non-zero, target debugging is enabled. Higher numbers are more\n\
4937 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4938 command."),
4939 NULL,
4940 show_targetdebug,
4941 &setdebuglist, &showdebuglist);
4942
4943 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4944 &trust_readonly, _("\
4945 Set mode for reading from readonly sections."), _("\
4946 Show mode for reading from readonly sections."), _("\
4947 When this mode is on, memory reads from readonly sections (such as .text)\n\
4948 will be read from the object file instead of from the target. This will\n\
4949 result in significant performance improvement for remote targets."),
4950 NULL,
4951 show_trust_readonly,
4952 &setlist, &showlist);
4953
4954 add_com ("monitor", class_obscure, do_monitor_command,
4955 _("Send a command to the remote monitor (remote targets only)."));
4956
4957 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4958 _("Print the name of each layer of the internal target stack."),
4959 &maintenanceprintlist);
4960
4961 add_setshow_boolean_cmd ("target-async", no_class,
4962 &target_async_permitted_1, _("\
4963 Set whether gdb controls the inferior in asynchronous mode."), _("\
4964 Show whether gdb controls the inferior in asynchronous mode."), _("\
4965 Tells gdb whether to control the inferior in asynchronous mode."),
4966 set_target_async_command,
4967 show_target_async_command,
4968 &setlist,
4969 &showlist);
4970
4971 add_setshow_boolean_cmd ("may-write-registers", class_support,
4972 &may_write_registers_1, _("\
4973 Set permission to write into registers."), _("\
4974 Show permission to write into registers."), _("\
4975 When this permission is on, GDB may write into the target's registers.\n\
4976 Otherwise, any sort of write attempt will result in an error."),
4977 set_target_permissions, NULL,
4978 &setlist, &showlist);
4979
4980 add_setshow_boolean_cmd ("may-write-memory", class_support,
4981 &may_write_memory_1, _("\
4982 Set permission to write into target memory."), _("\
4983 Show permission to write into target memory."), _("\
4984 When this permission is on, GDB may write into the target's memory.\n\
4985 Otherwise, any sort of write attempt will result in an error."),
4986 set_write_memory_permission, NULL,
4987 &setlist, &showlist);
4988
4989 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4990 &may_insert_breakpoints_1, _("\
4991 Set permission to insert breakpoints in the target."), _("\
4992 Show permission to insert breakpoints in the target."), _("\
4993 When this permission is on, GDB may insert breakpoints in the program.\n\
4994 Otherwise, any sort of insertion attempt will result in an error."),
4995 set_target_permissions, NULL,
4996 &setlist, &showlist);
4997
4998 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4999 &may_insert_tracepoints_1, _("\
5000 Set permission to insert tracepoints in the target."), _("\
5001 Show permission to insert tracepoints in the target."), _("\
5002 When this permission is on, GDB may insert tracepoints in the program.\n\
5003 Otherwise, any sort of insertion attempt will result in an error."),
5004 set_target_permissions, NULL,
5005 &setlist, &showlist);
5006
5007 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5008 &may_insert_fast_tracepoints_1, _("\
5009 Set permission to insert fast tracepoints in the target."), _("\
5010 Show permission to insert fast tracepoints in the target."), _("\
5011 When this permission is on, GDB may insert fast tracepoints.\n\
5012 Otherwise, any sort of insertion attempt will result in an error."),
5013 set_target_permissions, NULL,
5014 &setlist, &showlist);
5015
5016 add_setshow_boolean_cmd ("may-interrupt", class_support,
5017 &may_stop_1, _("\
5018 Set permission to interrupt or signal the target."), _("\
5019 Show permission to interrupt or signal the target."), _("\
5020 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5021 Otherwise, any attempt to interrupt or stop will be ignored."),
5022 set_target_permissions, NULL,
5023 &setlist, &showlist);
5024 }