convert to_follow_fork
[binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (struct target_ops *, const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
59
60 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
61 long lwp, long tid);
62
63 static int default_follow_fork (struct target_ops *self, int follow_child,
64 int detach_fork);
65
66 static void tcomplain (void) ATTRIBUTE_NORETURN;
67
68 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
69
70 static int return_zero (void);
71
72 void target_ignore (void);
73
74 static void target_command (char *, int);
75
76 static struct target_ops *find_default_run_target (char *);
77
78 static target_xfer_partial_ftype default_xfer_partial;
79
80 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
81 ptid_t ptid);
82
83 static int dummy_find_memory_regions (struct target_ops *self,
84 find_memory_region_ftype ignore1,
85 void *ignore2);
86
87 static char *dummy_make_corefile_notes (struct target_ops *self,
88 bfd *ignore1, int *ignore2);
89
90 static int find_default_can_async_p (struct target_ops *ignore);
91
92 static int find_default_is_async_p (struct target_ops *ignore);
93
94 static enum exec_direction_kind default_execution_direction
95 (struct target_ops *self);
96
97 #include "target-delegates.c"
98
99 static void init_dummy_target (void);
100
101 static struct target_ops debug_target;
102
103 static void debug_to_open (char *, int);
104
105 static void debug_to_prepare_to_store (struct target_ops *self,
106 struct regcache *);
107
108 static void debug_to_files_info (struct target_ops *);
109
110 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
111 struct bp_target_info *);
112
113 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
114 struct bp_target_info *);
115
116 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
117 int, int, int);
118
119 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
120 struct gdbarch *,
121 struct bp_target_info *);
122
123 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
124 struct gdbarch *,
125 struct bp_target_info *);
126
127 static int debug_to_insert_watchpoint (struct target_ops *self,
128 CORE_ADDR, int, int,
129 struct expression *);
130
131 static int debug_to_remove_watchpoint (struct target_ops *self,
132 CORE_ADDR, int, int,
133 struct expression *);
134
135 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
136
137 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
138 CORE_ADDR, CORE_ADDR, int);
139
140 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
141 CORE_ADDR, int);
142
143 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
144 CORE_ADDR, int, int,
145 struct expression *);
146
147 static void debug_to_terminal_init (struct target_ops *self);
148
149 static void debug_to_terminal_inferior (struct target_ops *self);
150
151 static void debug_to_terminal_ours_for_output (struct target_ops *self);
152
153 static void debug_to_terminal_save_ours (struct target_ops *self);
154
155 static void debug_to_terminal_ours (struct target_ops *self);
156
157 static void debug_to_load (struct target_ops *self, char *, int);
158
159 static int debug_to_can_run (struct target_ops *self);
160
161 static void debug_to_stop (struct target_ops *self, ptid_t);
162
163 /* Pointer to array of target architecture structures; the size of the
164 array; the current index into the array; the allocated size of the
165 array. */
166 struct target_ops **target_structs;
167 unsigned target_struct_size;
168 unsigned target_struct_allocsize;
169 #define DEFAULT_ALLOCSIZE 10
170
171 /* The initial current target, so that there is always a semi-valid
172 current target. */
173
174 static struct target_ops dummy_target;
175
176 /* Top of target stack. */
177
178 static struct target_ops *target_stack;
179
180 /* The target structure we are currently using to talk to a process
181 or file or whatever "inferior" we have. */
182
183 struct target_ops current_target;
184
185 /* Command list for target. */
186
187 static struct cmd_list_element *targetlist = NULL;
188
189 /* Nonzero if we should trust readonly sections from the
190 executable when reading memory. */
191
192 static int trust_readonly = 0;
193
194 /* Nonzero if we should show true memory content including
195 memory breakpoint inserted by gdb. */
196
197 static int show_memory_breakpoints = 0;
198
199 /* These globals control whether GDB attempts to perform these
200 operations; they are useful for targets that need to prevent
201 inadvertant disruption, such as in non-stop mode. */
202
203 int may_write_registers = 1;
204
205 int may_write_memory = 1;
206
207 int may_insert_breakpoints = 1;
208
209 int may_insert_tracepoints = 1;
210
211 int may_insert_fast_tracepoints = 1;
212
213 int may_stop = 1;
214
215 /* Non-zero if we want to see trace of target level stuff. */
216
217 static unsigned int targetdebug = 0;
218 static void
219 show_targetdebug (struct ui_file *file, int from_tty,
220 struct cmd_list_element *c, const char *value)
221 {
222 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
223 }
224
225 static void setup_target_debug (void);
226
227 /* The user just typed 'target' without the name of a target. */
228
229 static void
230 target_command (char *arg, int from_tty)
231 {
232 fputs_filtered ("Argument required (target name). Try `help target'\n",
233 gdb_stdout);
234 }
235
236 /* Default target_has_* methods for process_stratum targets. */
237
238 int
239 default_child_has_all_memory (struct target_ops *ops)
240 {
241 /* If no inferior selected, then we can't read memory here. */
242 if (ptid_equal (inferior_ptid, null_ptid))
243 return 0;
244
245 return 1;
246 }
247
248 int
249 default_child_has_memory (struct target_ops *ops)
250 {
251 /* If no inferior selected, then we can't read memory here. */
252 if (ptid_equal (inferior_ptid, null_ptid))
253 return 0;
254
255 return 1;
256 }
257
258 int
259 default_child_has_stack (struct target_ops *ops)
260 {
261 /* If no inferior selected, there's no stack. */
262 if (ptid_equal (inferior_ptid, null_ptid))
263 return 0;
264
265 return 1;
266 }
267
268 int
269 default_child_has_registers (struct target_ops *ops)
270 {
271 /* Can't read registers from no inferior. */
272 if (ptid_equal (inferior_ptid, null_ptid))
273 return 0;
274
275 return 1;
276 }
277
278 int
279 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
280 {
281 /* If there's no thread selected, then we can't make it run through
282 hoops. */
283 if (ptid_equal (the_ptid, null_ptid))
284 return 0;
285
286 return 1;
287 }
288
289
290 int
291 target_has_all_memory_1 (void)
292 {
293 struct target_ops *t;
294
295 for (t = current_target.beneath; t != NULL; t = t->beneath)
296 if (t->to_has_all_memory (t))
297 return 1;
298
299 return 0;
300 }
301
302 int
303 target_has_memory_1 (void)
304 {
305 struct target_ops *t;
306
307 for (t = current_target.beneath; t != NULL; t = t->beneath)
308 if (t->to_has_memory (t))
309 return 1;
310
311 return 0;
312 }
313
314 int
315 target_has_stack_1 (void)
316 {
317 struct target_ops *t;
318
319 for (t = current_target.beneath; t != NULL; t = t->beneath)
320 if (t->to_has_stack (t))
321 return 1;
322
323 return 0;
324 }
325
326 int
327 target_has_registers_1 (void)
328 {
329 struct target_ops *t;
330
331 for (t = current_target.beneath; t != NULL; t = t->beneath)
332 if (t->to_has_registers (t))
333 return 1;
334
335 return 0;
336 }
337
338 int
339 target_has_execution_1 (ptid_t the_ptid)
340 {
341 struct target_ops *t;
342
343 for (t = current_target.beneath; t != NULL; t = t->beneath)
344 if (t->to_has_execution (t, the_ptid))
345 return 1;
346
347 return 0;
348 }
349
350 int
351 target_has_execution_current (void)
352 {
353 return target_has_execution_1 (inferior_ptid);
354 }
355
356 /* Complete initialization of T. This ensures that various fields in
357 T are set, if needed by the target implementation. */
358
359 void
360 complete_target_initialization (struct target_ops *t)
361 {
362 /* Provide default values for all "must have" methods. */
363 if (t->to_xfer_partial == NULL)
364 t->to_xfer_partial = default_xfer_partial;
365
366 if (t->to_has_all_memory == NULL)
367 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
368
369 if (t->to_has_memory == NULL)
370 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
371
372 if (t->to_has_stack == NULL)
373 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
374
375 if (t->to_has_registers == NULL)
376 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
377
378 if (t->to_has_execution == NULL)
379 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
380
381 install_delegators (t);
382 }
383
384 /* Add possible target architecture T to the list and add a new
385 command 'target T->to_shortname'. Set COMPLETER as the command's
386 completer if not NULL. */
387
388 void
389 add_target_with_completer (struct target_ops *t,
390 completer_ftype *completer)
391 {
392 struct cmd_list_element *c;
393
394 complete_target_initialization (t);
395
396 if (!target_structs)
397 {
398 target_struct_allocsize = DEFAULT_ALLOCSIZE;
399 target_structs = (struct target_ops **) xmalloc
400 (target_struct_allocsize * sizeof (*target_structs));
401 }
402 if (target_struct_size >= target_struct_allocsize)
403 {
404 target_struct_allocsize *= 2;
405 target_structs = (struct target_ops **)
406 xrealloc ((char *) target_structs,
407 target_struct_allocsize * sizeof (*target_structs));
408 }
409 target_structs[target_struct_size++] = t;
410
411 if (targetlist == NULL)
412 add_prefix_cmd ("target", class_run, target_command, _("\
413 Connect to a target machine or process.\n\
414 The first argument is the type or protocol of the target machine.\n\
415 Remaining arguments are interpreted by the target protocol. For more\n\
416 information on the arguments for a particular protocol, type\n\
417 `help target ' followed by the protocol name."),
418 &targetlist, "target ", 0, &cmdlist);
419 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
420 &targetlist);
421 if (completer != NULL)
422 set_cmd_completer (c, completer);
423 }
424
425 /* Add a possible target architecture to the list. */
426
427 void
428 add_target (struct target_ops *t)
429 {
430 add_target_with_completer (t, NULL);
431 }
432
433 /* See target.h. */
434
435 void
436 add_deprecated_target_alias (struct target_ops *t, char *alias)
437 {
438 struct cmd_list_element *c;
439 char *alt;
440
441 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
442 see PR cli/15104. */
443 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
444 alt = xstrprintf ("target %s", t->to_shortname);
445 deprecate_cmd (c, alt);
446 }
447
448 /* Stub functions */
449
450 void
451 target_ignore (void)
452 {
453 }
454
455 void
456 target_kill (void)
457 {
458 if (targetdebug)
459 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
460
461 current_target.to_kill (&current_target);
462 }
463
464 void
465 target_load (char *arg, int from_tty)
466 {
467 target_dcache_invalidate ();
468 (*current_target.to_load) (&current_target, arg, from_tty);
469 }
470
471 void
472 target_create_inferior (char *exec_file, char *args,
473 char **env, int from_tty)
474 {
475 struct target_ops *t;
476
477 for (t = current_target.beneath; t != NULL; t = t->beneath)
478 {
479 if (t->to_create_inferior != NULL)
480 {
481 t->to_create_inferior (t, exec_file, args, env, from_tty);
482 if (targetdebug)
483 fprintf_unfiltered (gdb_stdlog,
484 "target_create_inferior (%s, %s, xxx, %d)\n",
485 exec_file, args, from_tty);
486 return;
487 }
488 }
489
490 internal_error (__FILE__, __LINE__,
491 _("could not find a target to create inferior"));
492 }
493
494 void
495 target_terminal_inferior (void)
496 {
497 /* A background resume (``run&'') should leave GDB in control of the
498 terminal. Use target_can_async_p, not target_is_async_p, since at
499 this point the target is not async yet. However, if sync_execution
500 is not set, we know it will become async prior to resume. */
501 if (target_can_async_p () && !sync_execution)
502 return;
503
504 /* If GDB is resuming the inferior in the foreground, install
505 inferior's terminal modes. */
506 (*current_target.to_terminal_inferior) (&current_target);
507 }
508
509 static int
510 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
511 struct target_ops *t)
512 {
513 errno = EIO; /* Can't read/write this location. */
514 return 0; /* No bytes handled. */
515 }
516
517 static void
518 tcomplain (void)
519 {
520 error (_("You can't do that when your target is `%s'"),
521 current_target.to_shortname);
522 }
523
524 void
525 noprocess (void)
526 {
527 error (_("You can't do that without a process to debug."));
528 }
529
530 static void
531 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
532 {
533 printf_unfiltered (_("No saved terminal information.\n"));
534 }
535
536 /* A default implementation for the to_get_ada_task_ptid target method.
537
538 This function builds the PTID by using both LWP and TID as part of
539 the PTID lwp and tid elements. The pid used is the pid of the
540 inferior_ptid. */
541
542 static ptid_t
543 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
544 {
545 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
546 }
547
548 static enum exec_direction_kind
549 default_execution_direction (struct target_ops *self)
550 {
551 if (!target_can_execute_reverse)
552 return EXEC_FORWARD;
553 else if (!target_can_async_p ())
554 return EXEC_FORWARD;
555 else
556 gdb_assert_not_reached ("\
557 to_execution_direction must be implemented for reverse async");
558 }
559
560 /* Go through the target stack from top to bottom, copying over zero
561 entries in current_target, then filling in still empty entries. In
562 effect, we are doing class inheritance through the pushed target
563 vectors.
564
565 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
566 is currently implemented, is that it discards any knowledge of
567 which target an inherited method originally belonged to.
568 Consequently, new new target methods should instead explicitly and
569 locally search the target stack for the target that can handle the
570 request. */
571
572 static void
573 update_current_target (void)
574 {
575 struct target_ops *t;
576
577 /* First, reset current's contents. */
578 memset (&current_target, 0, sizeof (current_target));
579
580 /* Install the delegators. */
581 install_delegators (&current_target);
582
583 #define INHERIT(FIELD, TARGET) \
584 if (!current_target.FIELD) \
585 current_target.FIELD = (TARGET)->FIELD
586
587 for (t = target_stack; t; t = t->beneath)
588 {
589 INHERIT (to_shortname, t);
590 INHERIT (to_longname, t);
591 INHERIT (to_doc, t);
592 /* Do not inherit to_open. */
593 /* Do not inherit to_close. */
594 /* Do not inherit to_attach. */
595 /* Do not inherit to_post_attach. */
596 INHERIT (to_attach_no_wait, t);
597 /* Do not inherit to_detach. */
598 /* Do not inherit to_disconnect. */
599 /* Do not inherit to_resume. */
600 /* Do not inherit to_wait. */
601 /* Do not inherit to_fetch_registers. */
602 /* Do not inherit to_store_registers. */
603 /* Do not inherit to_prepare_to_store. */
604 INHERIT (deprecated_xfer_memory, t);
605 /* Do not inherit to_files_info. */
606 /* Do not inherit to_insert_breakpoint. */
607 /* Do not inherit to_remove_breakpoint. */
608 /* Do not inherit to_can_use_hw_breakpoint. */
609 /* Do not inherit to_insert_hw_breakpoint. */
610 /* Do not inherit to_remove_hw_breakpoint. */
611 /* Do not inherit to_ranged_break_num_registers. */
612 /* Do not inherit to_insert_watchpoint. */
613 /* Do not inherit to_remove_watchpoint. */
614 /* Do not inherit to_insert_mask_watchpoint. */
615 /* Do not inherit to_remove_mask_watchpoint. */
616 /* Do not inherit to_stopped_data_address. */
617 INHERIT (to_have_steppable_watchpoint, t);
618 INHERIT (to_have_continuable_watchpoint, t);
619 /* Do not inherit to_stopped_by_watchpoint. */
620 /* Do not inherit to_watchpoint_addr_within_range. */
621 /* Do not inherit to_region_ok_for_hw_watchpoint. */
622 /* Do not inherit to_can_accel_watchpoint_condition. */
623 /* Do not inherit to_masked_watch_num_registers. */
624 /* Do not inherit to_terminal_init. */
625 /* Do not inherit to_terminal_inferior. */
626 /* Do not inherit to_terminal_ours_for_output. */
627 /* Do not inherit to_terminal_ours. */
628 /* Do not inherit to_terminal_save_ours. */
629 /* Do not inherit to_terminal_info. */
630 /* Do not inherit to_kill. */
631 /* Do not inherit to_load. */
632 /* Do no inherit to_create_inferior. */
633 /* Do not inherit to_post_startup_inferior. */
634 /* Do not inherit to_insert_fork_catchpoint. */
635 /* Do not inherit to_remove_fork_catchpoint. */
636 /* Do not inherit to_insert_vfork_catchpoint. */
637 /* Do not inherit to_remove_vfork_catchpoint. */
638 /* Do not inherit to_follow_fork. */
639 /* Do not inherit to_insert_exec_catchpoint. */
640 /* Do not inherit to_remove_exec_catchpoint. */
641 /* Do not inherit to_set_syscall_catchpoint. */
642 /* Do not inherit to_has_exited. */
643 /* Do not inherit to_mourn_inferior. */
644 INHERIT (to_can_run, t);
645 /* Do not inherit to_pass_signals. */
646 /* Do not inherit to_program_signals. */
647 /* Do not inherit to_thread_alive. */
648 /* Do not inherit to_find_new_threads. */
649 /* Do not inherit to_pid_to_str. */
650 /* Do not inherit to_extra_thread_info. */
651 /* Do not inherit to_thread_name. */
652 /* Do not inherit to_stop. */
653 /* Do not inherit to_xfer_partial. */
654 /* Do not inherit to_rcmd. */
655 /* Do not inherit to_pid_to_exec_file. */
656 /* Do not inherit to_log_command. */
657 INHERIT (to_stratum, t);
658 /* Do not inherit to_has_all_memory. */
659 /* Do not inherit to_has_memory. */
660 /* Do not inherit to_has_stack. */
661 /* Do not inherit to_has_registers. */
662 /* Do not inherit to_has_execution. */
663 INHERIT (to_has_thread_control, t);
664 /* Do not inherit to_can_async_p. */
665 /* Do not inherit to_is_async_p. */
666 /* Do not inherit to_async. */
667 /* Do not inherit to_find_memory_regions. */
668 /* Do not inherit to_make_corefile_notes. */
669 /* Do not inherit to_get_bookmark. */
670 /* Do not inherit to_goto_bookmark. */
671 /* Do not inherit to_get_thread_local_address. */
672 /* Do not inherit to_can_execute_reverse. */
673 /* Do not inherit to_execution_direction. */
674 /* Do not inherit to_thread_architecture. */
675 /* Do not inherit to_read_description. */
676 /* Do not inherit to_get_ada_task_ptid. */
677 /* Do not inherit to_search_memory. */
678 /* Do not inherit to_supports_multi_process. */
679 /* Do not inherit to_supports_enable_disable_tracepoint. */
680 /* Do not inherit to_supports_string_tracing. */
681 /* Do not inherit to_trace_init. */
682 /* Do not inherit to_download_tracepoint. */
683 /* Do not inherit to_can_download_tracepoint. */
684 /* Do not inherit to_download_trace_state_variable. */
685 /* Do not inherit to_enable_tracepoint. */
686 /* Do not inherit to_disable_tracepoint. */
687 /* Do not inherit to_trace_set_readonly_regions. */
688 /* Do not inherit to_trace_start. */
689 /* Do not inherit to_get_trace_status. */
690 /* Do not inherit to_get_tracepoint_status. */
691 /* Do not inherit to_trace_stop. */
692 /* Do not inherit to_trace_find. */
693 /* Do not inherit to_get_trace_state_variable_value. */
694 /* Do not inherit to_save_trace_data. */
695 /* Do not inherit to_upload_tracepoints. */
696 /* Do not inherit to_upload_trace_state_variables. */
697 /* Do not inherit to_get_raw_trace_data. */
698 /* Do not inherit to_get_min_fast_tracepoint_insn_len. */
699 /* Do not inherit to_set_disconnected_tracing. */
700 /* Do not inherit to_set_circular_trace_buffer. */
701 /* Do not inherit to_set_trace_buffer_size. */
702 /* Do not inherit to_set_trace_notes. */
703 /* Do not inherit to_get_tib_address. */
704 /* Do not inherit to_set_permissions. */
705 /* Do not inherit to_static_tracepoint_marker_at. */
706 /* Do not inherit to_static_tracepoint_markers_by_strid. */
707 /* Do not inherit to_traceframe_info. */
708 /* Do not inherit to_use_agent. */
709 /* Do not inherit to_can_use_agent. */
710 /* Do not inherit to_augmented_libraries_svr4_read. */
711 INHERIT (to_magic, t);
712 /* Do not inherit
713 to_supports_evaluation_of_breakpoint_conditions. */
714 /* Do not inherit to_can_run_breakpoint_commands. */
715 /* Do not inherit to_memory_map. */
716 /* Do not inherit to_flash_erase. */
717 /* Do not inherit to_flash_done. */
718 }
719 #undef INHERIT
720
721 /* Clean up a target struct so it no longer has any zero pointers in
722 it. Some entries are defaulted to a method that print an error,
723 others are hard-wired to a standard recursive default. */
724
725 #define de_fault(field, value) \
726 if (!current_target.field) \
727 current_target.field = value
728
729 de_fault (to_open,
730 (void (*) (char *, int))
731 tcomplain);
732 de_fault (to_close,
733 (void (*) (struct target_ops *))
734 target_ignore);
735 de_fault (deprecated_xfer_memory,
736 (int (*) (CORE_ADDR, gdb_byte *, int, int,
737 struct mem_attrib *, struct target_ops *))
738 nomemory);
739 de_fault (to_can_run,
740 (int (*) (struct target_ops *))
741 return_zero);
742 current_target.to_read_description = NULL;
743
744 #undef de_fault
745
746 /* Finally, position the target-stack beneath the squashed
747 "current_target". That way code looking for a non-inherited
748 target method can quickly and simply find it. */
749 current_target.beneath = target_stack;
750
751 if (targetdebug)
752 setup_target_debug ();
753 }
754
755 /* Push a new target type into the stack of the existing target accessors,
756 possibly superseding some of the existing accessors.
757
758 Rather than allow an empty stack, we always have the dummy target at
759 the bottom stratum, so we can call the function vectors without
760 checking them. */
761
762 void
763 push_target (struct target_ops *t)
764 {
765 struct target_ops **cur;
766
767 /* Check magic number. If wrong, it probably means someone changed
768 the struct definition, but not all the places that initialize one. */
769 if (t->to_magic != OPS_MAGIC)
770 {
771 fprintf_unfiltered (gdb_stderr,
772 "Magic number of %s target struct wrong\n",
773 t->to_shortname);
774 internal_error (__FILE__, __LINE__,
775 _("failed internal consistency check"));
776 }
777
778 /* Find the proper stratum to install this target in. */
779 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
780 {
781 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
782 break;
783 }
784
785 /* If there's already targets at this stratum, remove them. */
786 /* FIXME: cagney/2003-10-15: I think this should be popping all
787 targets to CUR, and not just those at this stratum level. */
788 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
789 {
790 /* There's already something at this stratum level. Close it,
791 and un-hook it from the stack. */
792 struct target_ops *tmp = (*cur);
793
794 (*cur) = (*cur)->beneath;
795 tmp->beneath = NULL;
796 target_close (tmp);
797 }
798
799 /* We have removed all targets in our stratum, now add the new one. */
800 t->beneath = (*cur);
801 (*cur) = t;
802
803 update_current_target ();
804 }
805
806 /* Remove a target_ops vector from the stack, wherever it may be.
807 Return how many times it was removed (0 or 1). */
808
809 int
810 unpush_target (struct target_ops *t)
811 {
812 struct target_ops **cur;
813 struct target_ops *tmp;
814
815 if (t->to_stratum == dummy_stratum)
816 internal_error (__FILE__, __LINE__,
817 _("Attempt to unpush the dummy target"));
818
819 /* Look for the specified target. Note that we assume that a target
820 can only occur once in the target stack. */
821
822 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
823 {
824 if ((*cur) == t)
825 break;
826 }
827
828 /* If we don't find target_ops, quit. Only open targets should be
829 closed. */
830 if ((*cur) == NULL)
831 return 0;
832
833 /* Unchain the target. */
834 tmp = (*cur);
835 (*cur) = (*cur)->beneath;
836 tmp->beneath = NULL;
837
838 update_current_target ();
839
840 /* Finally close the target. Note we do this after unchaining, so
841 any target method calls from within the target_close
842 implementation don't end up in T anymore. */
843 target_close (t);
844
845 return 1;
846 }
847
848 void
849 pop_all_targets_above (enum strata above_stratum)
850 {
851 while ((int) (current_target.to_stratum) > (int) above_stratum)
852 {
853 if (!unpush_target (target_stack))
854 {
855 fprintf_unfiltered (gdb_stderr,
856 "pop_all_targets couldn't find target %s\n",
857 target_stack->to_shortname);
858 internal_error (__FILE__, __LINE__,
859 _("failed internal consistency check"));
860 break;
861 }
862 }
863 }
864
865 void
866 pop_all_targets (void)
867 {
868 pop_all_targets_above (dummy_stratum);
869 }
870
871 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
872
873 int
874 target_is_pushed (struct target_ops *t)
875 {
876 struct target_ops **cur;
877
878 /* Check magic number. If wrong, it probably means someone changed
879 the struct definition, but not all the places that initialize one. */
880 if (t->to_magic != OPS_MAGIC)
881 {
882 fprintf_unfiltered (gdb_stderr,
883 "Magic number of %s target struct wrong\n",
884 t->to_shortname);
885 internal_error (__FILE__, __LINE__,
886 _("failed internal consistency check"));
887 }
888
889 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
890 if (*cur == t)
891 return 1;
892
893 return 0;
894 }
895
896 /* Using the objfile specified in OBJFILE, find the address for the
897 current thread's thread-local storage with offset OFFSET. */
898 CORE_ADDR
899 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
900 {
901 volatile CORE_ADDR addr = 0;
902 struct target_ops *target;
903
904 for (target = current_target.beneath;
905 target != NULL;
906 target = target->beneath)
907 {
908 if (target->to_get_thread_local_address != NULL)
909 break;
910 }
911
912 if (target != NULL
913 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
914 {
915 ptid_t ptid = inferior_ptid;
916 volatile struct gdb_exception ex;
917
918 TRY_CATCH (ex, RETURN_MASK_ALL)
919 {
920 CORE_ADDR lm_addr;
921
922 /* Fetch the load module address for this objfile. */
923 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
924 objfile);
925 /* If it's 0, throw the appropriate exception. */
926 if (lm_addr == 0)
927 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
928 _("TLS load module not found"));
929
930 addr = target->to_get_thread_local_address (target, ptid,
931 lm_addr, offset);
932 }
933 /* If an error occurred, print TLS related messages here. Otherwise,
934 throw the error to some higher catcher. */
935 if (ex.reason < 0)
936 {
937 int objfile_is_library = (objfile->flags & OBJF_SHARED);
938
939 switch (ex.error)
940 {
941 case TLS_NO_LIBRARY_SUPPORT_ERROR:
942 error (_("Cannot find thread-local variables "
943 "in this thread library."));
944 break;
945 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
946 if (objfile_is_library)
947 error (_("Cannot find shared library `%s' in dynamic"
948 " linker's load module list"), objfile_name (objfile));
949 else
950 error (_("Cannot find executable file `%s' in dynamic"
951 " linker's load module list"), objfile_name (objfile));
952 break;
953 case TLS_NOT_ALLOCATED_YET_ERROR:
954 if (objfile_is_library)
955 error (_("The inferior has not yet allocated storage for"
956 " thread-local variables in\n"
957 "the shared library `%s'\n"
958 "for %s"),
959 objfile_name (objfile), target_pid_to_str (ptid));
960 else
961 error (_("The inferior has not yet allocated storage for"
962 " thread-local variables in\n"
963 "the executable `%s'\n"
964 "for %s"),
965 objfile_name (objfile), target_pid_to_str (ptid));
966 break;
967 case TLS_GENERIC_ERROR:
968 if (objfile_is_library)
969 error (_("Cannot find thread-local storage for %s, "
970 "shared library %s:\n%s"),
971 target_pid_to_str (ptid),
972 objfile_name (objfile), ex.message);
973 else
974 error (_("Cannot find thread-local storage for %s, "
975 "executable file %s:\n%s"),
976 target_pid_to_str (ptid),
977 objfile_name (objfile), ex.message);
978 break;
979 default:
980 throw_exception (ex);
981 break;
982 }
983 }
984 }
985 /* It wouldn't be wrong here to try a gdbarch method, too; finding
986 TLS is an ABI-specific thing. But we don't do that yet. */
987 else
988 error (_("Cannot find thread-local variables on this target"));
989
990 return addr;
991 }
992
993 const char *
994 target_xfer_status_to_string (enum target_xfer_status err)
995 {
996 #define CASE(X) case X: return #X
997 switch (err)
998 {
999 CASE(TARGET_XFER_E_IO);
1000 CASE(TARGET_XFER_E_UNAVAILABLE);
1001 default:
1002 return "<unknown>";
1003 }
1004 #undef CASE
1005 };
1006
1007
1008 #undef MIN
1009 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1010
1011 /* target_read_string -- read a null terminated string, up to LEN bytes,
1012 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1013 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1014 is responsible for freeing it. Return the number of bytes successfully
1015 read. */
1016
1017 int
1018 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1019 {
1020 int tlen, offset, i;
1021 gdb_byte buf[4];
1022 int errcode = 0;
1023 char *buffer;
1024 int buffer_allocated;
1025 char *bufptr;
1026 unsigned int nbytes_read = 0;
1027
1028 gdb_assert (string);
1029
1030 /* Small for testing. */
1031 buffer_allocated = 4;
1032 buffer = xmalloc (buffer_allocated);
1033 bufptr = buffer;
1034
1035 while (len > 0)
1036 {
1037 tlen = MIN (len, 4 - (memaddr & 3));
1038 offset = memaddr & 3;
1039
1040 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1041 if (errcode != 0)
1042 {
1043 /* The transfer request might have crossed the boundary to an
1044 unallocated region of memory. Retry the transfer, requesting
1045 a single byte. */
1046 tlen = 1;
1047 offset = 0;
1048 errcode = target_read_memory (memaddr, buf, 1);
1049 if (errcode != 0)
1050 goto done;
1051 }
1052
1053 if (bufptr - buffer + tlen > buffer_allocated)
1054 {
1055 unsigned int bytes;
1056
1057 bytes = bufptr - buffer;
1058 buffer_allocated *= 2;
1059 buffer = xrealloc (buffer, buffer_allocated);
1060 bufptr = buffer + bytes;
1061 }
1062
1063 for (i = 0; i < tlen; i++)
1064 {
1065 *bufptr++ = buf[i + offset];
1066 if (buf[i + offset] == '\000')
1067 {
1068 nbytes_read += i + 1;
1069 goto done;
1070 }
1071 }
1072
1073 memaddr += tlen;
1074 len -= tlen;
1075 nbytes_read += tlen;
1076 }
1077 done:
1078 *string = buffer;
1079 if (errnop != NULL)
1080 *errnop = errcode;
1081 return nbytes_read;
1082 }
1083
1084 struct target_section_table *
1085 target_get_section_table (struct target_ops *target)
1086 {
1087 struct target_ops *t;
1088
1089 if (targetdebug)
1090 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1091
1092 for (t = target; t != NULL; t = t->beneath)
1093 if (t->to_get_section_table != NULL)
1094 return (*t->to_get_section_table) (t);
1095
1096 return NULL;
1097 }
1098
1099 /* Find a section containing ADDR. */
1100
1101 struct target_section *
1102 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1103 {
1104 struct target_section_table *table = target_get_section_table (target);
1105 struct target_section *secp;
1106
1107 if (table == NULL)
1108 return NULL;
1109
1110 for (secp = table->sections; secp < table->sections_end; secp++)
1111 {
1112 if (addr >= secp->addr && addr < secp->endaddr)
1113 return secp;
1114 }
1115 return NULL;
1116 }
1117
1118 /* Read memory from the live target, even if currently inspecting a
1119 traceframe. The return is the same as that of target_read. */
1120
1121 static enum target_xfer_status
1122 target_read_live_memory (enum target_object object,
1123 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1124 ULONGEST *xfered_len)
1125 {
1126 enum target_xfer_status ret;
1127 struct cleanup *cleanup;
1128
1129 /* Switch momentarily out of tfind mode so to access live memory.
1130 Note that this must not clear global state, such as the frame
1131 cache, which must still remain valid for the previous traceframe.
1132 We may be _building_ the frame cache at this point. */
1133 cleanup = make_cleanup_restore_traceframe_number ();
1134 set_traceframe_number (-1);
1135
1136 ret = target_xfer_partial (current_target.beneath, object, NULL,
1137 myaddr, NULL, memaddr, len, xfered_len);
1138
1139 do_cleanups (cleanup);
1140 return ret;
1141 }
1142
1143 /* Using the set of read-only target sections of OPS, read live
1144 read-only memory. Note that the actual reads start from the
1145 top-most target again.
1146
1147 For interface/parameters/return description see target.h,
1148 to_xfer_partial. */
1149
1150 static enum target_xfer_status
1151 memory_xfer_live_readonly_partial (struct target_ops *ops,
1152 enum target_object object,
1153 gdb_byte *readbuf, ULONGEST memaddr,
1154 ULONGEST len, ULONGEST *xfered_len)
1155 {
1156 struct target_section *secp;
1157 struct target_section_table *table;
1158
1159 secp = target_section_by_addr (ops, memaddr);
1160 if (secp != NULL
1161 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1162 secp->the_bfd_section)
1163 & SEC_READONLY))
1164 {
1165 struct target_section *p;
1166 ULONGEST memend = memaddr + len;
1167
1168 table = target_get_section_table (ops);
1169
1170 for (p = table->sections; p < table->sections_end; p++)
1171 {
1172 if (memaddr >= p->addr)
1173 {
1174 if (memend <= p->endaddr)
1175 {
1176 /* Entire transfer is within this section. */
1177 return target_read_live_memory (object, memaddr,
1178 readbuf, len, xfered_len);
1179 }
1180 else if (memaddr >= p->endaddr)
1181 {
1182 /* This section ends before the transfer starts. */
1183 continue;
1184 }
1185 else
1186 {
1187 /* This section overlaps the transfer. Just do half. */
1188 len = p->endaddr - memaddr;
1189 return target_read_live_memory (object, memaddr,
1190 readbuf, len, xfered_len);
1191 }
1192 }
1193 }
1194 }
1195
1196 return TARGET_XFER_EOF;
1197 }
1198
1199 /* Read memory from more than one valid target. A core file, for
1200 instance, could have some of memory but delegate other bits to
1201 the target below it. So, we must manually try all targets. */
1202
1203 static enum target_xfer_status
1204 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1205 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1206 ULONGEST *xfered_len)
1207 {
1208 enum target_xfer_status res;
1209
1210 do
1211 {
1212 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1213 readbuf, writebuf, memaddr, len,
1214 xfered_len);
1215 if (res == TARGET_XFER_OK)
1216 break;
1217
1218 /* Stop if the target reports that the memory is not available. */
1219 if (res == TARGET_XFER_E_UNAVAILABLE)
1220 break;
1221
1222 /* We want to continue past core files to executables, but not
1223 past a running target's memory. */
1224 if (ops->to_has_all_memory (ops))
1225 break;
1226
1227 ops = ops->beneath;
1228 }
1229 while (ops != NULL);
1230
1231 return res;
1232 }
1233
1234 /* Perform a partial memory transfer.
1235 For docs see target.h, to_xfer_partial. */
1236
1237 static enum target_xfer_status
1238 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1239 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1240 ULONGEST len, ULONGEST *xfered_len)
1241 {
1242 enum target_xfer_status res;
1243 int reg_len;
1244 struct mem_region *region;
1245 struct inferior *inf;
1246
1247 /* For accesses to unmapped overlay sections, read directly from
1248 files. Must do this first, as MEMADDR may need adjustment. */
1249 if (readbuf != NULL && overlay_debugging)
1250 {
1251 struct obj_section *section = find_pc_overlay (memaddr);
1252
1253 if (pc_in_unmapped_range (memaddr, section))
1254 {
1255 struct target_section_table *table
1256 = target_get_section_table (ops);
1257 const char *section_name = section->the_bfd_section->name;
1258
1259 memaddr = overlay_mapped_address (memaddr, section);
1260 return section_table_xfer_memory_partial (readbuf, writebuf,
1261 memaddr, len, xfered_len,
1262 table->sections,
1263 table->sections_end,
1264 section_name);
1265 }
1266 }
1267
1268 /* Try the executable files, if "trust-readonly-sections" is set. */
1269 if (readbuf != NULL && trust_readonly)
1270 {
1271 struct target_section *secp;
1272 struct target_section_table *table;
1273
1274 secp = target_section_by_addr (ops, memaddr);
1275 if (secp != NULL
1276 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1277 secp->the_bfd_section)
1278 & SEC_READONLY))
1279 {
1280 table = target_get_section_table (ops);
1281 return section_table_xfer_memory_partial (readbuf, writebuf,
1282 memaddr, len, xfered_len,
1283 table->sections,
1284 table->sections_end,
1285 NULL);
1286 }
1287 }
1288
1289 /* If reading unavailable memory in the context of traceframes, and
1290 this address falls within a read-only section, fallback to
1291 reading from live memory. */
1292 if (readbuf != NULL && get_traceframe_number () != -1)
1293 {
1294 VEC(mem_range_s) *available;
1295
1296 /* If we fail to get the set of available memory, then the
1297 target does not support querying traceframe info, and so we
1298 attempt reading from the traceframe anyway (assuming the
1299 target implements the old QTro packet then). */
1300 if (traceframe_available_memory (&available, memaddr, len))
1301 {
1302 struct cleanup *old_chain;
1303
1304 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1305
1306 if (VEC_empty (mem_range_s, available)
1307 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1308 {
1309 /* Don't read into the traceframe's available
1310 memory. */
1311 if (!VEC_empty (mem_range_s, available))
1312 {
1313 LONGEST oldlen = len;
1314
1315 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1316 gdb_assert (len <= oldlen);
1317 }
1318
1319 do_cleanups (old_chain);
1320
1321 /* This goes through the topmost target again. */
1322 res = memory_xfer_live_readonly_partial (ops, object,
1323 readbuf, memaddr,
1324 len, xfered_len);
1325 if (res == TARGET_XFER_OK)
1326 return TARGET_XFER_OK;
1327 else
1328 {
1329 /* No use trying further, we know some memory starting
1330 at MEMADDR isn't available. */
1331 *xfered_len = len;
1332 return TARGET_XFER_E_UNAVAILABLE;
1333 }
1334 }
1335
1336 /* Don't try to read more than how much is available, in
1337 case the target implements the deprecated QTro packet to
1338 cater for older GDBs (the target's knowledge of read-only
1339 sections may be outdated by now). */
1340 len = VEC_index (mem_range_s, available, 0)->length;
1341
1342 do_cleanups (old_chain);
1343 }
1344 }
1345
1346 /* Try GDB's internal data cache. */
1347 region = lookup_mem_region (memaddr);
1348 /* region->hi == 0 means there's no upper bound. */
1349 if (memaddr + len < region->hi || region->hi == 0)
1350 reg_len = len;
1351 else
1352 reg_len = region->hi - memaddr;
1353
1354 switch (region->attrib.mode)
1355 {
1356 case MEM_RO:
1357 if (writebuf != NULL)
1358 return TARGET_XFER_E_IO;
1359 break;
1360
1361 case MEM_WO:
1362 if (readbuf != NULL)
1363 return TARGET_XFER_E_IO;
1364 break;
1365
1366 case MEM_FLASH:
1367 /* We only support writing to flash during "load" for now. */
1368 if (writebuf != NULL)
1369 error (_("Writing to flash memory forbidden in this context"));
1370 break;
1371
1372 case MEM_NONE:
1373 return TARGET_XFER_E_IO;
1374 }
1375
1376 if (!ptid_equal (inferior_ptid, null_ptid))
1377 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1378 else
1379 inf = NULL;
1380
1381 if (inf != NULL
1382 /* The dcache reads whole cache lines; that doesn't play well
1383 with reading from a trace buffer, because reading outside of
1384 the collected memory range fails. */
1385 && get_traceframe_number () == -1
1386 && (region->attrib.cache
1387 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1388 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1389 {
1390 DCACHE *dcache = target_dcache_get_or_init ();
1391 int l;
1392
1393 if (readbuf != NULL)
1394 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1395 else
1396 /* FIXME drow/2006-08-09: If we're going to preserve const
1397 correctness dcache_xfer_memory should take readbuf and
1398 writebuf. */
1399 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1400 reg_len, 1);
1401 if (l <= 0)
1402 return TARGET_XFER_E_IO;
1403 else
1404 {
1405 *xfered_len = (ULONGEST) l;
1406 return TARGET_XFER_OK;
1407 }
1408 }
1409
1410 /* If none of those methods found the memory we wanted, fall back
1411 to a target partial transfer. Normally a single call to
1412 to_xfer_partial is enough; if it doesn't recognize an object
1413 it will call the to_xfer_partial of the next target down.
1414 But for memory this won't do. Memory is the only target
1415 object which can be read from more than one valid target.
1416 A core file, for instance, could have some of memory but
1417 delegate other bits to the target below it. So, we must
1418 manually try all targets. */
1419
1420 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1421 xfered_len);
1422
1423 /* Make sure the cache gets updated no matter what - if we are writing
1424 to the stack. Even if this write is not tagged as such, we still need
1425 to update the cache. */
1426
1427 if (res == TARGET_XFER_OK
1428 && inf != NULL
1429 && writebuf != NULL
1430 && target_dcache_init_p ()
1431 && !region->attrib.cache
1432 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1433 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1434 {
1435 DCACHE *dcache = target_dcache_get ();
1436
1437 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1438 }
1439
1440 /* If we still haven't got anything, return the last error. We
1441 give up. */
1442 return res;
1443 }
1444
1445 /* Perform a partial memory transfer. For docs see target.h,
1446 to_xfer_partial. */
1447
1448 static enum target_xfer_status
1449 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1450 gdb_byte *readbuf, const gdb_byte *writebuf,
1451 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1452 {
1453 enum target_xfer_status res;
1454
1455 /* Zero length requests are ok and require no work. */
1456 if (len == 0)
1457 return TARGET_XFER_EOF;
1458
1459 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1460 breakpoint insns, thus hiding out from higher layers whether
1461 there are software breakpoints inserted in the code stream. */
1462 if (readbuf != NULL)
1463 {
1464 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1465 xfered_len);
1466
1467 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1468 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1469 }
1470 else
1471 {
1472 void *buf;
1473 struct cleanup *old_chain;
1474
1475 /* A large write request is likely to be partially satisfied
1476 by memory_xfer_partial_1. We will continually malloc
1477 and free a copy of the entire write request for breakpoint
1478 shadow handling even though we only end up writing a small
1479 subset of it. Cap writes to 4KB to mitigate this. */
1480 len = min (4096, len);
1481
1482 buf = xmalloc (len);
1483 old_chain = make_cleanup (xfree, buf);
1484 memcpy (buf, writebuf, len);
1485
1486 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1487 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1488 xfered_len);
1489
1490 do_cleanups (old_chain);
1491 }
1492
1493 return res;
1494 }
1495
1496 static void
1497 restore_show_memory_breakpoints (void *arg)
1498 {
1499 show_memory_breakpoints = (uintptr_t) arg;
1500 }
1501
1502 struct cleanup *
1503 make_show_memory_breakpoints_cleanup (int show)
1504 {
1505 int current = show_memory_breakpoints;
1506
1507 show_memory_breakpoints = show;
1508 return make_cleanup (restore_show_memory_breakpoints,
1509 (void *) (uintptr_t) current);
1510 }
1511
1512 /* For docs see target.h, to_xfer_partial. */
1513
1514 enum target_xfer_status
1515 target_xfer_partial (struct target_ops *ops,
1516 enum target_object object, const char *annex,
1517 gdb_byte *readbuf, const gdb_byte *writebuf,
1518 ULONGEST offset, ULONGEST len,
1519 ULONGEST *xfered_len)
1520 {
1521 enum target_xfer_status retval;
1522
1523 gdb_assert (ops->to_xfer_partial != NULL);
1524
1525 /* Transfer is done when LEN is zero. */
1526 if (len == 0)
1527 return TARGET_XFER_EOF;
1528
1529 if (writebuf && !may_write_memory)
1530 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1531 core_addr_to_string_nz (offset), plongest (len));
1532
1533 *xfered_len = 0;
1534
1535 /* If this is a memory transfer, let the memory-specific code
1536 have a look at it instead. Memory transfers are more
1537 complicated. */
1538 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1539 || object == TARGET_OBJECT_CODE_MEMORY)
1540 retval = memory_xfer_partial (ops, object, readbuf,
1541 writebuf, offset, len, xfered_len);
1542 else if (object == TARGET_OBJECT_RAW_MEMORY)
1543 {
1544 /* Request the normal memory object from other layers. */
1545 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1546 xfered_len);
1547 }
1548 else
1549 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1550 writebuf, offset, len, xfered_len);
1551
1552 if (targetdebug)
1553 {
1554 const unsigned char *myaddr = NULL;
1555
1556 fprintf_unfiltered (gdb_stdlog,
1557 "%s:target_xfer_partial "
1558 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1559 ops->to_shortname,
1560 (int) object,
1561 (annex ? annex : "(null)"),
1562 host_address_to_string (readbuf),
1563 host_address_to_string (writebuf),
1564 core_addr_to_string_nz (offset),
1565 pulongest (len), retval,
1566 pulongest (*xfered_len));
1567
1568 if (readbuf)
1569 myaddr = readbuf;
1570 if (writebuf)
1571 myaddr = writebuf;
1572 if (retval == TARGET_XFER_OK && myaddr != NULL)
1573 {
1574 int i;
1575
1576 fputs_unfiltered (", bytes =", gdb_stdlog);
1577 for (i = 0; i < *xfered_len; i++)
1578 {
1579 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1580 {
1581 if (targetdebug < 2 && i > 0)
1582 {
1583 fprintf_unfiltered (gdb_stdlog, " ...");
1584 break;
1585 }
1586 fprintf_unfiltered (gdb_stdlog, "\n");
1587 }
1588
1589 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1590 }
1591 }
1592
1593 fputc_unfiltered ('\n', gdb_stdlog);
1594 }
1595
1596 /* Check implementations of to_xfer_partial update *XFERED_LEN
1597 properly. Do assertion after printing debug messages, so that we
1598 can find more clues on assertion failure from debugging messages. */
1599 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1600 gdb_assert (*xfered_len > 0);
1601
1602 return retval;
1603 }
1604
1605 /* Read LEN bytes of target memory at address MEMADDR, placing the
1606 results in GDB's memory at MYADDR. Returns either 0 for success or
1607 TARGET_XFER_E_IO if any error occurs.
1608
1609 If an error occurs, no guarantee is made about the contents of the data at
1610 MYADDR. In particular, the caller should not depend upon partial reads
1611 filling the buffer with good data. There is no way for the caller to know
1612 how much good data might have been transfered anyway. Callers that can
1613 deal with partial reads should call target_read (which will retry until
1614 it makes no progress, and then return how much was transferred). */
1615
1616 int
1617 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1618 {
1619 /* Dispatch to the topmost target, not the flattened current_target.
1620 Memory accesses check target->to_has_(all_)memory, and the
1621 flattened target doesn't inherit those. */
1622 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1623 myaddr, memaddr, len) == len)
1624 return 0;
1625 else
1626 return TARGET_XFER_E_IO;
1627 }
1628
1629 /* Like target_read_memory, but specify explicitly that this is a read
1630 from the target's raw memory. That is, this read bypasses the
1631 dcache, breakpoint shadowing, etc. */
1632
1633 int
1634 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1635 {
1636 /* See comment in target_read_memory about why the request starts at
1637 current_target.beneath. */
1638 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1639 myaddr, memaddr, len) == len)
1640 return 0;
1641 else
1642 return TARGET_XFER_E_IO;
1643 }
1644
1645 /* Like target_read_memory, but specify explicitly that this is a read from
1646 the target's stack. This may trigger different cache behavior. */
1647
1648 int
1649 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1650 {
1651 /* See comment in target_read_memory about why the request starts at
1652 current_target.beneath. */
1653 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1654 myaddr, memaddr, len) == len)
1655 return 0;
1656 else
1657 return TARGET_XFER_E_IO;
1658 }
1659
1660 /* Like target_read_memory, but specify explicitly that this is a read from
1661 the target's code. This may trigger different cache behavior. */
1662
1663 int
1664 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1665 {
1666 /* See comment in target_read_memory about why the request starts at
1667 current_target.beneath. */
1668 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1669 myaddr, memaddr, len) == len)
1670 return 0;
1671 else
1672 return TARGET_XFER_E_IO;
1673 }
1674
1675 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1676 Returns either 0 for success or TARGET_XFER_E_IO if any
1677 error occurs. If an error occurs, no guarantee is made about how
1678 much data got written. Callers that can deal with partial writes
1679 should call target_write. */
1680
1681 int
1682 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1683 {
1684 /* See comment in target_read_memory about why the request starts at
1685 current_target.beneath. */
1686 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1687 myaddr, memaddr, len) == len)
1688 return 0;
1689 else
1690 return TARGET_XFER_E_IO;
1691 }
1692
1693 /* Write LEN bytes from MYADDR to target raw memory at address
1694 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1695 if any error occurs. If an error occurs, no guarantee is made
1696 about how much data got written. Callers that can deal with
1697 partial writes should call target_write. */
1698
1699 int
1700 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1701 {
1702 /* See comment in target_read_memory about why the request starts at
1703 current_target.beneath. */
1704 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1705 myaddr, memaddr, len) == len)
1706 return 0;
1707 else
1708 return TARGET_XFER_E_IO;
1709 }
1710
1711 /* Fetch the target's memory map. */
1712
1713 VEC(mem_region_s) *
1714 target_memory_map (void)
1715 {
1716 VEC(mem_region_s) *result;
1717 struct mem_region *last_one, *this_one;
1718 int ix;
1719 struct target_ops *t;
1720
1721 if (targetdebug)
1722 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1723
1724 for (t = current_target.beneath; t != NULL; t = t->beneath)
1725 if (t->to_memory_map != NULL)
1726 break;
1727
1728 if (t == NULL)
1729 return NULL;
1730
1731 result = t->to_memory_map (t);
1732 if (result == NULL)
1733 return NULL;
1734
1735 qsort (VEC_address (mem_region_s, result),
1736 VEC_length (mem_region_s, result),
1737 sizeof (struct mem_region), mem_region_cmp);
1738
1739 /* Check that regions do not overlap. Simultaneously assign
1740 a numbering for the "mem" commands to use to refer to
1741 each region. */
1742 last_one = NULL;
1743 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1744 {
1745 this_one->number = ix;
1746
1747 if (last_one && last_one->hi > this_one->lo)
1748 {
1749 warning (_("Overlapping regions in memory map: ignoring"));
1750 VEC_free (mem_region_s, result);
1751 return NULL;
1752 }
1753 last_one = this_one;
1754 }
1755
1756 return result;
1757 }
1758
1759 void
1760 target_flash_erase (ULONGEST address, LONGEST length)
1761 {
1762 struct target_ops *t;
1763
1764 for (t = current_target.beneath; t != NULL; t = t->beneath)
1765 if (t->to_flash_erase != NULL)
1766 {
1767 if (targetdebug)
1768 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1769 hex_string (address), phex (length, 0));
1770 t->to_flash_erase (t, address, length);
1771 return;
1772 }
1773
1774 tcomplain ();
1775 }
1776
1777 void
1778 target_flash_done (void)
1779 {
1780 struct target_ops *t;
1781
1782 for (t = current_target.beneath; t != NULL; t = t->beneath)
1783 if (t->to_flash_done != NULL)
1784 {
1785 if (targetdebug)
1786 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1787 t->to_flash_done (t);
1788 return;
1789 }
1790
1791 tcomplain ();
1792 }
1793
1794 static void
1795 show_trust_readonly (struct ui_file *file, int from_tty,
1796 struct cmd_list_element *c, const char *value)
1797 {
1798 fprintf_filtered (file,
1799 _("Mode for reading from readonly sections is %s.\n"),
1800 value);
1801 }
1802
1803 /* More generic transfers. */
1804
1805 static enum target_xfer_status
1806 default_xfer_partial (struct target_ops *ops, enum target_object object,
1807 const char *annex, gdb_byte *readbuf,
1808 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
1809 ULONGEST *xfered_len)
1810 {
1811 if (object == TARGET_OBJECT_MEMORY
1812 && ops->deprecated_xfer_memory != NULL)
1813 /* If available, fall back to the target's
1814 "deprecated_xfer_memory" method. */
1815 {
1816 int xfered = -1;
1817
1818 errno = 0;
1819 if (writebuf != NULL)
1820 {
1821 void *buffer = xmalloc (len);
1822 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1823
1824 memcpy (buffer, writebuf, len);
1825 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1826 1/*write*/, NULL, ops);
1827 do_cleanups (cleanup);
1828 }
1829 if (readbuf != NULL)
1830 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1831 0/*read*/, NULL, ops);
1832 if (xfered > 0)
1833 {
1834 *xfered_len = (ULONGEST) xfered;
1835 return TARGET_XFER_E_IO;
1836 }
1837 else if (xfered == 0 && errno == 0)
1838 /* "deprecated_xfer_memory" uses 0, cross checked against
1839 ERRNO as one indication of an error. */
1840 return TARGET_XFER_EOF;
1841 else
1842 return TARGET_XFER_E_IO;
1843 }
1844 else
1845 {
1846 gdb_assert (ops->beneath != NULL);
1847 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1848 readbuf, writebuf, offset, len,
1849 xfered_len);
1850 }
1851 }
1852
1853 /* Target vector read/write partial wrapper functions. */
1854
1855 static enum target_xfer_status
1856 target_read_partial (struct target_ops *ops,
1857 enum target_object object,
1858 const char *annex, gdb_byte *buf,
1859 ULONGEST offset, ULONGEST len,
1860 ULONGEST *xfered_len)
1861 {
1862 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1863 xfered_len);
1864 }
1865
1866 static enum target_xfer_status
1867 target_write_partial (struct target_ops *ops,
1868 enum target_object object,
1869 const char *annex, const gdb_byte *buf,
1870 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1871 {
1872 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1873 xfered_len);
1874 }
1875
1876 /* Wrappers to perform the full transfer. */
1877
1878 /* For docs on target_read see target.h. */
1879
1880 LONGEST
1881 target_read (struct target_ops *ops,
1882 enum target_object object,
1883 const char *annex, gdb_byte *buf,
1884 ULONGEST offset, LONGEST len)
1885 {
1886 LONGEST xfered = 0;
1887
1888 while (xfered < len)
1889 {
1890 ULONGEST xfered_len;
1891 enum target_xfer_status status;
1892
1893 status = target_read_partial (ops, object, annex,
1894 (gdb_byte *) buf + xfered,
1895 offset + xfered, len - xfered,
1896 &xfered_len);
1897
1898 /* Call an observer, notifying them of the xfer progress? */
1899 if (status == TARGET_XFER_EOF)
1900 return xfered;
1901 else if (status == TARGET_XFER_OK)
1902 {
1903 xfered += xfered_len;
1904 QUIT;
1905 }
1906 else
1907 return -1;
1908
1909 }
1910 return len;
1911 }
1912
1913 /* Assuming that the entire [begin, end) range of memory cannot be
1914 read, try to read whatever subrange is possible to read.
1915
1916 The function returns, in RESULT, either zero or one memory block.
1917 If there's a readable subrange at the beginning, it is completely
1918 read and returned. Any further readable subrange will not be read.
1919 Otherwise, if there's a readable subrange at the end, it will be
1920 completely read and returned. Any readable subranges before it
1921 (obviously, not starting at the beginning), will be ignored. In
1922 other cases -- either no readable subrange, or readable subrange(s)
1923 that is neither at the beginning, or end, nothing is returned.
1924
1925 The purpose of this function is to handle a read across a boundary
1926 of accessible memory in a case when memory map is not available.
1927 The above restrictions are fine for this case, but will give
1928 incorrect results if the memory is 'patchy'. However, supporting
1929 'patchy' memory would require trying to read every single byte,
1930 and it seems unacceptable solution. Explicit memory map is
1931 recommended for this case -- and target_read_memory_robust will
1932 take care of reading multiple ranges then. */
1933
1934 static void
1935 read_whatever_is_readable (struct target_ops *ops,
1936 ULONGEST begin, ULONGEST end,
1937 VEC(memory_read_result_s) **result)
1938 {
1939 gdb_byte *buf = xmalloc (end - begin);
1940 ULONGEST current_begin = begin;
1941 ULONGEST current_end = end;
1942 int forward;
1943 memory_read_result_s r;
1944 ULONGEST xfered_len;
1945
1946 /* If we previously failed to read 1 byte, nothing can be done here. */
1947 if (end - begin <= 1)
1948 {
1949 xfree (buf);
1950 return;
1951 }
1952
1953 /* Check that either first or the last byte is readable, and give up
1954 if not. This heuristic is meant to permit reading accessible memory
1955 at the boundary of accessible region. */
1956 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1957 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
1958 {
1959 forward = 1;
1960 ++current_begin;
1961 }
1962 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1963 buf + (end-begin) - 1, end - 1, 1,
1964 &xfered_len) == TARGET_XFER_OK)
1965 {
1966 forward = 0;
1967 --current_end;
1968 }
1969 else
1970 {
1971 xfree (buf);
1972 return;
1973 }
1974
1975 /* Loop invariant is that the [current_begin, current_end) was previously
1976 found to be not readable as a whole.
1977
1978 Note loop condition -- if the range has 1 byte, we can't divide the range
1979 so there's no point trying further. */
1980 while (current_end - current_begin > 1)
1981 {
1982 ULONGEST first_half_begin, first_half_end;
1983 ULONGEST second_half_begin, second_half_end;
1984 LONGEST xfer;
1985 ULONGEST middle = current_begin + (current_end - current_begin)/2;
1986
1987 if (forward)
1988 {
1989 first_half_begin = current_begin;
1990 first_half_end = middle;
1991 second_half_begin = middle;
1992 second_half_end = current_end;
1993 }
1994 else
1995 {
1996 first_half_begin = middle;
1997 first_half_end = current_end;
1998 second_half_begin = current_begin;
1999 second_half_end = middle;
2000 }
2001
2002 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2003 buf + (first_half_begin - begin),
2004 first_half_begin,
2005 first_half_end - first_half_begin);
2006
2007 if (xfer == first_half_end - first_half_begin)
2008 {
2009 /* This half reads up fine. So, the error must be in the
2010 other half. */
2011 current_begin = second_half_begin;
2012 current_end = second_half_end;
2013 }
2014 else
2015 {
2016 /* This half is not readable. Because we've tried one byte, we
2017 know some part of this half if actually redable. Go to the next
2018 iteration to divide again and try to read.
2019
2020 We don't handle the other half, because this function only tries
2021 to read a single readable subrange. */
2022 current_begin = first_half_begin;
2023 current_end = first_half_end;
2024 }
2025 }
2026
2027 if (forward)
2028 {
2029 /* The [begin, current_begin) range has been read. */
2030 r.begin = begin;
2031 r.end = current_begin;
2032 r.data = buf;
2033 }
2034 else
2035 {
2036 /* The [current_end, end) range has been read. */
2037 LONGEST rlen = end - current_end;
2038
2039 r.data = xmalloc (rlen);
2040 memcpy (r.data, buf + current_end - begin, rlen);
2041 r.begin = current_end;
2042 r.end = end;
2043 xfree (buf);
2044 }
2045 VEC_safe_push(memory_read_result_s, (*result), &r);
2046 }
2047
2048 void
2049 free_memory_read_result_vector (void *x)
2050 {
2051 VEC(memory_read_result_s) *v = x;
2052 memory_read_result_s *current;
2053 int ix;
2054
2055 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2056 {
2057 xfree (current->data);
2058 }
2059 VEC_free (memory_read_result_s, v);
2060 }
2061
2062 VEC(memory_read_result_s) *
2063 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2064 {
2065 VEC(memory_read_result_s) *result = 0;
2066
2067 LONGEST xfered = 0;
2068 while (xfered < len)
2069 {
2070 struct mem_region *region = lookup_mem_region (offset + xfered);
2071 LONGEST rlen;
2072
2073 /* If there is no explicit region, a fake one should be created. */
2074 gdb_assert (region);
2075
2076 if (region->hi == 0)
2077 rlen = len - xfered;
2078 else
2079 rlen = region->hi - offset;
2080
2081 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2082 {
2083 /* Cannot read this region. Note that we can end up here only
2084 if the region is explicitly marked inaccessible, or
2085 'inaccessible-by-default' is in effect. */
2086 xfered += rlen;
2087 }
2088 else
2089 {
2090 LONGEST to_read = min (len - xfered, rlen);
2091 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2092
2093 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2094 (gdb_byte *) buffer,
2095 offset + xfered, to_read);
2096 /* Call an observer, notifying them of the xfer progress? */
2097 if (xfer <= 0)
2098 {
2099 /* Got an error reading full chunk. See if maybe we can read
2100 some subrange. */
2101 xfree (buffer);
2102 read_whatever_is_readable (ops, offset + xfered,
2103 offset + xfered + to_read, &result);
2104 xfered += to_read;
2105 }
2106 else
2107 {
2108 struct memory_read_result r;
2109 r.data = buffer;
2110 r.begin = offset + xfered;
2111 r.end = r.begin + xfer;
2112 VEC_safe_push (memory_read_result_s, result, &r);
2113 xfered += xfer;
2114 }
2115 QUIT;
2116 }
2117 }
2118 return result;
2119 }
2120
2121
2122 /* An alternative to target_write with progress callbacks. */
2123
2124 LONGEST
2125 target_write_with_progress (struct target_ops *ops,
2126 enum target_object object,
2127 const char *annex, const gdb_byte *buf,
2128 ULONGEST offset, LONGEST len,
2129 void (*progress) (ULONGEST, void *), void *baton)
2130 {
2131 LONGEST xfered = 0;
2132
2133 /* Give the progress callback a chance to set up. */
2134 if (progress)
2135 (*progress) (0, baton);
2136
2137 while (xfered < len)
2138 {
2139 ULONGEST xfered_len;
2140 enum target_xfer_status status;
2141
2142 status = target_write_partial (ops, object, annex,
2143 (gdb_byte *) buf + xfered,
2144 offset + xfered, len - xfered,
2145 &xfered_len);
2146
2147 if (status == TARGET_XFER_EOF)
2148 return xfered;
2149 if (TARGET_XFER_STATUS_ERROR_P (status))
2150 return -1;
2151
2152 gdb_assert (status == TARGET_XFER_OK);
2153 if (progress)
2154 (*progress) (xfered_len, baton);
2155
2156 xfered += xfered_len;
2157 QUIT;
2158 }
2159 return len;
2160 }
2161
2162 /* For docs on target_write see target.h. */
2163
2164 LONGEST
2165 target_write (struct target_ops *ops,
2166 enum target_object object,
2167 const char *annex, const gdb_byte *buf,
2168 ULONGEST offset, LONGEST len)
2169 {
2170 return target_write_with_progress (ops, object, annex, buf, offset, len,
2171 NULL, NULL);
2172 }
2173
2174 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2175 the size of the transferred data. PADDING additional bytes are
2176 available in *BUF_P. This is a helper function for
2177 target_read_alloc; see the declaration of that function for more
2178 information. */
2179
2180 static LONGEST
2181 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2182 const char *annex, gdb_byte **buf_p, int padding)
2183 {
2184 size_t buf_alloc, buf_pos;
2185 gdb_byte *buf;
2186
2187 /* This function does not have a length parameter; it reads the
2188 entire OBJECT). Also, it doesn't support objects fetched partly
2189 from one target and partly from another (in a different stratum,
2190 e.g. a core file and an executable). Both reasons make it
2191 unsuitable for reading memory. */
2192 gdb_assert (object != TARGET_OBJECT_MEMORY);
2193
2194 /* Start by reading up to 4K at a time. The target will throttle
2195 this number down if necessary. */
2196 buf_alloc = 4096;
2197 buf = xmalloc (buf_alloc);
2198 buf_pos = 0;
2199 while (1)
2200 {
2201 ULONGEST xfered_len;
2202 enum target_xfer_status status;
2203
2204 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2205 buf_pos, buf_alloc - buf_pos - padding,
2206 &xfered_len);
2207
2208 if (status == TARGET_XFER_EOF)
2209 {
2210 /* Read all there was. */
2211 if (buf_pos == 0)
2212 xfree (buf);
2213 else
2214 *buf_p = buf;
2215 return buf_pos;
2216 }
2217 else if (status != TARGET_XFER_OK)
2218 {
2219 /* An error occurred. */
2220 xfree (buf);
2221 return TARGET_XFER_E_IO;
2222 }
2223
2224 buf_pos += xfered_len;
2225
2226 /* If the buffer is filling up, expand it. */
2227 if (buf_alloc < buf_pos * 2)
2228 {
2229 buf_alloc *= 2;
2230 buf = xrealloc (buf, buf_alloc);
2231 }
2232
2233 QUIT;
2234 }
2235 }
2236
2237 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2238 the size of the transferred data. See the declaration in "target.h"
2239 function for more information about the return value. */
2240
2241 LONGEST
2242 target_read_alloc (struct target_ops *ops, enum target_object object,
2243 const char *annex, gdb_byte **buf_p)
2244 {
2245 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2246 }
2247
2248 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2249 returned as a string, allocated using xmalloc. If an error occurs
2250 or the transfer is unsupported, NULL is returned. Empty objects
2251 are returned as allocated but empty strings. A warning is issued
2252 if the result contains any embedded NUL bytes. */
2253
2254 char *
2255 target_read_stralloc (struct target_ops *ops, enum target_object object,
2256 const char *annex)
2257 {
2258 gdb_byte *buffer;
2259 char *bufstr;
2260 LONGEST i, transferred;
2261
2262 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2263 bufstr = (char *) buffer;
2264
2265 if (transferred < 0)
2266 return NULL;
2267
2268 if (transferred == 0)
2269 return xstrdup ("");
2270
2271 bufstr[transferred] = 0;
2272
2273 /* Check for embedded NUL bytes; but allow trailing NULs. */
2274 for (i = strlen (bufstr); i < transferred; i++)
2275 if (bufstr[i] != 0)
2276 {
2277 warning (_("target object %d, annex %s, "
2278 "contained unexpected null characters"),
2279 (int) object, annex ? annex : "(none)");
2280 break;
2281 }
2282
2283 return bufstr;
2284 }
2285
2286 /* Memory transfer methods. */
2287
2288 void
2289 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2290 LONGEST len)
2291 {
2292 /* This method is used to read from an alternate, non-current
2293 target. This read must bypass the overlay support (as symbols
2294 don't match this target), and GDB's internal cache (wrong cache
2295 for this target). */
2296 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2297 != len)
2298 memory_error (TARGET_XFER_E_IO, addr);
2299 }
2300
2301 ULONGEST
2302 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2303 int len, enum bfd_endian byte_order)
2304 {
2305 gdb_byte buf[sizeof (ULONGEST)];
2306
2307 gdb_assert (len <= sizeof (buf));
2308 get_target_memory (ops, addr, buf, len);
2309 return extract_unsigned_integer (buf, len, byte_order);
2310 }
2311
2312 /* See target.h. */
2313
2314 int
2315 target_insert_breakpoint (struct gdbarch *gdbarch,
2316 struct bp_target_info *bp_tgt)
2317 {
2318 if (!may_insert_breakpoints)
2319 {
2320 warning (_("May not insert breakpoints"));
2321 return 1;
2322 }
2323
2324 return current_target.to_insert_breakpoint (&current_target,
2325 gdbarch, bp_tgt);
2326 }
2327
2328 /* See target.h. */
2329
2330 int
2331 target_remove_breakpoint (struct gdbarch *gdbarch,
2332 struct bp_target_info *bp_tgt)
2333 {
2334 /* This is kind of a weird case to handle, but the permission might
2335 have been changed after breakpoints were inserted - in which case
2336 we should just take the user literally and assume that any
2337 breakpoints should be left in place. */
2338 if (!may_insert_breakpoints)
2339 {
2340 warning (_("May not remove breakpoints"));
2341 return 1;
2342 }
2343
2344 return current_target.to_remove_breakpoint (&current_target,
2345 gdbarch, bp_tgt);
2346 }
2347
2348 static void
2349 target_info (char *args, int from_tty)
2350 {
2351 struct target_ops *t;
2352 int has_all_mem = 0;
2353
2354 if (symfile_objfile != NULL)
2355 printf_unfiltered (_("Symbols from \"%s\".\n"),
2356 objfile_name (symfile_objfile));
2357
2358 for (t = target_stack; t != NULL; t = t->beneath)
2359 {
2360 if (!(*t->to_has_memory) (t))
2361 continue;
2362
2363 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2364 continue;
2365 if (has_all_mem)
2366 printf_unfiltered (_("\tWhile running this, "
2367 "GDB does not access memory from...\n"));
2368 printf_unfiltered ("%s:\n", t->to_longname);
2369 (t->to_files_info) (t);
2370 has_all_mem = (*t->to_has_all_memory) (t);
2371 }
2372 }
2373
2374 /* This function is called before any new inferior is created, e.g.
2375 by running a program, attaching, or connecting to a target.
2376 It cleans up any state from previous invocations which might
2377 change between runs. This is a subset of what target_preopen
2378 resets (things which might change between targets). */
2379
2380 void
2381 target_pre_inferior (int from_tty)
2382 {
2383 /* Clear out solib state. Otherwise the solib state of the previous
2384 inferior might have survived and is entirely wrong for the new
2385 target. This has been observed on GNU/Linux using glibc 2.3. How
2386 to reproduce:
2387
2388 bash$ ./foo&
2389 [1] 4711
2390 bash$ ./foo&
2391 [1] 4712
2392 bash$ gdb ./foo
2393 [...]
2394 (gdb) attach 4711
2395 (gdb) detach
2396 (gdb) attach 4712
2397 Cannot access memory at address 0xdeadbeef
2398 */
2399
2400 /* In some OSs, the shared library list is the same/global/shared
2401 across inferiors. If code is shared between processes, so are
2402 memory regions and features. */
2403 if (!gdbarch_has_global_solist (target_gdbarch ()))
2404 {
2405 no_shared_libraries (NULL, from_tty);
2406
2407 invalidate_target_mem_regions ();
2408
2409 target_clear_description ();
2410 }
2411
2412 agent_capability_invalidate ();
2413 }
2414
2415 /* Callback for iterate_over_inferiors. Gets rid of the given
2416 inferior. */
2417
2418 static int
2419 dispose_inferior (struct inferior *inf, void *args)
2420 {
2421 struct thread_info *thread;
2422
2423 thread = any_thread_of_process (inf->pid);
2424 if (thread)
2425 {
2426 switch_to_thread (thread->ptid);
2427
2428 /* Core inferiors actually should be detached, not killed. */
2429 if (target_has_execution)
2430 target_kill ();
2431 else
2432 target_detach (NULL, 0);
2433 }
2434
2435 return 0;
2436 }
2437
2438 /* This is to be called by the open routine before it does
2439 anything. */
2440
2441 void
2442 target_preopen (int from_tty)
2443 {
2444 dont_repeat ();
2445
2446 if (have_inferiors ())
2447 {
2448 if (!from_tty
2449 || !have_live_inferiors ()
2450 || query (_("A program is being debugged already. Kill it? ")))
2451 iterate_over_inferiors (dispose_inferior, NULL);
2452 else
2453 error (_("Program not killed."));
2454 }
2455
2456 /* Calling target_kill may remove the target from the stack. But if
2457 it doesn't (which seems like a win for UDI), remove it now. */
2458 /* Leave the exec target, though. The user may be switching from a
2459 live process to a core of the same program. */
2460 pop_all_targets_above (file_stratum);
2461
2462 target_pre_inferior (from_tty);
2463 }
2464
2465 /* Detach a target after doing deferred register stores. */
2466
2467 void
2468 target_detach (const char *args, int from_tty)
2469 {
2470 struct target_ops* t;
2471
2472 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2473 /* Don't remove global breakpoints here. They're removed on
2474 disconnection from the target. */
2475 ;
2476 else
2477 /* If we're in breakpoints-always-inserted mode, have to remove
2478 them before detaching. */
2479 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2480
2481 prepare_for_detach ();
2482
2483 current_target.to_detach (&current_target, args, from_tty);
2484 if (targetdebug)
2485 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2486 args, from_tty);
2487 }
2488
2489 void
2490 target_disconnect (char *args, int from_tty)
2491 {
2492 struct target_ops *t;
2493
2494 /* If we're in breakpoints-always-inserted mode or if breakpoints
2495 are global across processes, we have to remove them before
2496 disconnecting. */
2497 remove_breakpoints ();
2498
2499 for (t = current_target.beneath; t != NULL; t = t->beneath)
2500 if (t->to_disconnect != NULL)
2501 {
2502 if (targetdebug)
2503 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2504 args, from_tty);
2505 t->to_disconnect (t, args, from_tty);
2506 return;
2507 }
2508
2509 tcomplain ();
2510 }
2511
2512 ptid_t
2513 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2514 {
2515 struct target_ops *t;
2516 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2517 status, options);
2518
2519 if (targetdebug)
2520 {
2521 char *status_string;
2522 char *options_string;
2523
2524 status_string = target_waitstatus_to_string (status);
2525 options_string = target_options_to_string (options);
2526 fprintf_unfiltered (gdb_stdlog,
2527 "target_wait (%d, status, options={%s})"
2528 " = %d, %s\n",
2529 ptid_get_pid (ptid), options_string,
2530 ptid_get_pid (retval), status_string);
2531 xfree (status_string);
2532 xfree (options_string);
2533 }
2534
2535 return retval;
2536 }
2537
2538 char *
2539 target_pid_to_str (ptid_t ptid)
2540 {
2541 struct target_ops *t;
2542
2543 for (t = current_target.beneath; t != NULL; t = t->beneath)
2544 {
2545 if (t->to_pid_to_str != NULL)
2546 return (*t->to_pid_to_str) (t, ptid);
2547 }
2548
2549 return normal_pid_to_str (ptid);
2550 }
2551
2552 char *
2553 target_thread_name (struct thread_info *info)
2554 {
2555 return current_target.to_thread_name (&current_target, info);
2556 }
2557
2558 void
2559 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2560 {
2561 struct target_ops *t;
2562
2563 target_dcache_invalidate ();
2564
2565 current_target.to_resume (&current_target, ptid, step, signal);
2566 if (targetdebug)
2567 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2568 ptid_get_pid (ptid),
2569 step ? "step" : "continue",
2570 gdb_signal_to_name (signal));
2571
2572 registers_changed_ptid (ptid);
2573 set_executing (ptid, 1);
2574 set_running (ptid, 1);
2575 clear_inline_frame_state (ptid);
2576 }
2577
2578 void
2579 target_pass_signals (int numsigs, unsigned char *pass_signals)
2580 {
2581 struct target_ops *t;
2582
2583 for (t = current_target.beneath; t != NULL; t = t->beneath)
2584 {
2585 if (t->to_pass_signals != NULL)
2586 {
2587 if (targetdebug)
2588 {
2589 int i;
2590
2591 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2592 numsigs);
2593
2594 for (i = 0; i < numsigs; i++)
2595 if (pass_signals[i])
2596 fprintf_unfiltered (gdb_stdlog, " %s",
2597 gdb_signal_to_name (i));
2598
2599 fprintf_unfiltered (gdb_stdlog, " })\n");
2600 }
2601
2602 (*t->to_pass_signals) (t, numsigs, pass_signals);
2603 return;
2604 }
2605 }
2606 }
2607
2608 void
2609 target_program_signals (int numsigs, unsigned char *program_signals)
2610 {
2611 struct target_ops *t;
2612
2613 for (t = current_target.beneath; t != NULL; t = t->beneath)
2614 {
2615 if (t->to_program_signals != NULL)
2616 {
2617 if (targetdebug)
2618 {
2619 int i;
2620
2621 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2622 numsigs);
2623
2624 for (i = 0; i < numsigs; i++)
2625 if (program_signals[i])
2626 fprintf_unfiltered (gdb_stdlog, " %s",
2627 gdb_signal_to_name (i));
2628
2629 fprintf_unfiltered (gdb_stdlog, " })\n");
2630 }
2631
2632 (*t->to_program_signals) (t, numsigs, program_signals);
2633 return;
2634 }
2635 }
2636 }
2637
2638 static int
2639 default_follow_fork (struct target_ops *self, int follow_child,
2640 int detach_fork)
2641 {
2642 /* Some target returned a fork event, but did not know how to follow it. */
2643 internal_error (__FILE__, __LINE__,
2644 _("could not find a target to follow fork"));
2645 }
2646
2647 /* Look through the list of possible targets for a target that can
2648 follow forks. */
2649
2650 int
2651 target_follow_fork (int follow_child, int detach_fork)
2652 {
2653 int retval = current_target.to_follow_fork (&current_target,
2654 follow_child, detach_fork);
2655
2656 if (targetdebug)
2657 fprintf_unfiltered (gdb_stdlog,
2658 "target_follow_fork (%d, %d) = %d\n",
2659 follow_child, detach_fork, retval);
2660 return retval;
2661 }
2662
2663 void
2664 target_mourn_inferior (void)
2665 {
2666 struct target_ops *t;
2667
2668 for (t = current_target.beneath; t != NULL; t = t->beneath)
2669 {
2670 if (t->to_mourn_inferior != NULL)
2671 {
2672 t->to_mourn_inferior (t);
2673 if (targetdebug)
2674 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2675
2676 /* We no longer need to keep handles on any of the object files.
2677 Make sure to release them to avoid unnecessarily locking any
2678 of them while we're not actually debugging. */
2679 bfd_cache_close_all ();
2680
2681 return;
2682 }
2683 }
2684
2685 internal_error (__FILE__, __LINE__,
2686 _("could not find a target to follow mourn inferior"));
2687 }
2688
2689 /* Look for a target which can describe architectural features, starting
2690 from TARGET. If we find one, return its description. */
2691
2692 const struct target_desc *
2693 target_read_description (struct target_ops *target)
2694 {
2695 struct target_ops *t;
2696
2697 for (t = target; t != NULL; t = t->beneath)
2698 if (t->to_read_description != NULL)
2699 {
2700 const struct target_desc *tdesc;
2701
2702 tdesc = t->to_read_description (t);
2703 if (tdesc)
2704 return tdesc;
2705 }
2706
2707 return NULL;
2708 }
2709
2710 /* The default implementation of to_search_memory.
2711 This implements a basic search of memory, reading target memory and
2712 performing the search here (as opposed to performing the search in on the
2713 target side with, for example, gdbserver). */
2714
2715 int
2716 simple_search_memory (struct target_ops *ops,
2717 CORE_ADDR start_addr, ULONGEST search_space_len,
2718 const gdb_byte *pattern, ULONGEST pattern_len,
2719 CORE_ADDR *found_addrp)
2720 {
2721 /* NOTE: also defined in find.c testcase. */
2722 #define SEARCH_CHUNK_SIZE 16000
2723 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2724 /* Buffer to hold memory contents for searching. */
2725 gdb_byte *search_buf;
2726 unsigned search_buf_size;
2727 struct cleanup *old_cleanups;
2728
2729 search_buf_size = chunk_size + pattern_len - 1;
2730
2731 /* No point in trying to allocate a buffer larger than the search space. */
2732 if (search_space_len < search_buf_size)
2733 search_buf_size = search_space_len;
2734
2735 search_buf = malloc (search_buf_size);
2736 if (search_buf == NULL)
2737 error (_("Unable to allocate memory to perform the search."));
2738 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2739
2740 /* Prime the search buffer. */
2741
2742 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2743 search_buf, start_addr, search_buf_size) != search_buf_size)
2744 {
2745 warning (_("Unable to access %s bytes of target "
2746 "memory at %s, halting search."),
2747 pulongest (search_buf_size), hex_string (start_addr));
2748 do_cleanups (old_cleanups);
2749 return -1;
2750 }
2751
2752 /* Perform the search.
2753
2754 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2755 When we've scanned N bytes we copy the trailing bytes to the start and
2756 read in another N bytes. */
2757
2758 while (search_space_len >= pattern_len)
2759 {
2760 gdb_byte *found_ptr;
2761 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2762
2763 found_ptr = memmem (search_buf, nr_search_bytes,
2764 pattern, pattern_len);
2765
2766 if (found_ptr != NULL)
2767 {
2768 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2769
2770 *found_addrp = found_addr;
2771 do_cleanups (old_cleanups);
2772 return 1;
2773 }
2774
2775 /* Not found in this chunk, skip to next chunk. */
2776
2777 /* Don't let search_space_len wrap here, it's unsigned. */
2778 if (search_space_len >= chunk_size)
2779 search_space_len -= chunk_size;
2780 else
2781 search_space_len = 0;
2782
2783 if (search_space_len >= pattern_len)
2784 {
2785 unsigned keep_len = search_buf_size - chunk_size;
2786 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2787 int nr_to_read;
2788
2789 /* Copy the trailing part of the previous iteration to the front
2790 of the buffer for the next iteration. */
2791 gdb_assert (keep_len == pattern_len - 1);
2792 memcpy (search_buf, search_buf + chunk_size, keep_len);
2793
2794 nr_to_read = min (search_space_len - keep_len, chunk_size);
2795
2796 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2797 search_buf + keep_len, read_addr,
2798 nr_to_read) != nr_to_read)
2799 {
2800 warning (_("Unable to access %s bytes of target "
2801 "memory at %s, halting search."),
2802 plongest (nr_to_read),
2803 hex_string (read_addr));
2804 do_cleanups (old_cleanups);
2805 return -1;
2806 }
2807
2808 start_addr += chunk_size;
2809 }
2810 }
2811
2812 /* Not found. */
2813
2814 do_cleanups (old_cleanups);
2815 return 0;
2816 }
2817
2818 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2819 sequence of bytes in PATTERN with length PATTERN_LEN.
2820
2821 The result is 1 if found, 0 if not found, and -1 if there was an error
2822 requiring halting of the search (e.g. memory read error).
2823 If the pattern is found the address is recorded in FOUND_ADDRP. */
2824
2825 int
2826 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2827 const gdb_byte *pattern, ULONGEST pattern_len,
2828 CORE_ADDR *found_addrp)
2829 {
2830 struct target_ops *t;
2831 int found;
2832
2833 /* We don't use INHERIT to set current_target.to_search_memory,
2834 so we have to scan the target stack and handle targetdebug
2835 ourselves. */
2836
2837 if (targetdebug)
2838 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2839 hex_string (start_addr));
2840
2841 for (t = current_target.beneath; t != NULL; t = t->beneath)
2842 if (t->to_search_memory != NULL)
2843 break;
2844
2845 if (t != NULL)
2846 {
2847 found = t->to_search_memory (t, start_addr, search_space_len,
2848 pattern, pattern_len, found_addrp);
2849 }
2850 else
2851 {
2852 /* If a special version of to_search_memory isn't available, use the
2853 simple version. */
2854 found = simple_search_memory (current_target.beneath,
2855 start_addr, search_space_len,
2856 pattern, pattern_len, found_addrp);
2857 }
2858
2859 if (targetdebug)
2860 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2861
2862 return found;
2863 }
2864
2865 /* Look through the currently pushed targets. If none of them will
2866 be able to restart the currently running process, issue an error
2867 message. */
2868
2869 void
2870 target_require_runnable (void)
2871 {
2872 struct target_ops *t;
2873
2874 for (t = target_stack; t != NULL; t = t->beneath)
2875 {
2876 /* If this target knows how to create a new program, then
2877 assume we will still be able to after killing the current
2878 one. Either killing and mourning will not pop T, or else
2879 find_default_run_target will find it again. */
2880 if (t->to_create_inferior != NULL)
2881 return;
2882
2883 /* Do not worry about thread_stratum targets that can not
2884 create inferiors. Assume they will be pushed again if
2885 necessary, and continue to the process_stratum. */
2886 if (t->to_stratum == thread_stratum
2887 || t->to_stratum == arch_stratum)
2888 continue;
2889
2890 error (_("The \"%s\" target does not support \"run\". "
2891 "Try \"help target\" or \"continue\"."),
2892 t->to_shortname);
2893 }
2894
2895 /* This function is only called if the target is running. In that
2896 case there should have been a process_stratum target and it
2897 should either know how to create inferiors, or not... */
2898 internal_error (__FILE__, __LINE__, _("No targets found"));
2899 }
2900
2901 /* Look through the list of possible targets for a target that can
2902 execute a run or attach command without any other data. This is
2903 used to locate the default process stratum.
2904
2905 If DO_MESG is not NULL, the result is always valid (error() is
2906 called for errors); else, return NULL on error. */
2907
2908 static struct target_ops *
2909 find_default_run_target (char *do_mesg)
2910 {
2911 struct target_ops **t;
2912 struct target_ops *runable = NULL;
2913 int count;
2914
2915 count = 0;
2916
2917 for (t = target_structs; t < target_structs + target_struct_size;
2918 ++t)
2919 {
2920 if ((*t)->to_can_run && target_can_run (*t))
2921 {
2922 runable = *t;
2923 ++count;
2924 }
2925 }
2926
2927 if (count != 1)
2928 {
2929 if (do_mesg)
2930 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2931 else
2932 return NULL;
2933 }
2934
2935 return runable;
2936 }
2937
2938 void
2939 find_default_attach (struct target_ops *ops, char *args, int from_tty)
2940 {
2941 struct target_ops *t;
2942
2943 t = find_default_run_target ("attach");
2944 (t->to_attach) (t, args, from_tty);
2945 return;
2946 }
2947
2948 void
2949 find_default_create_inferior (struct target_ops *ops,
2950 char *exec_file, char *allargs, char **env,
2951 int from_tty)
2952 {
2953 struct target_ops *t;
2954
2955 t = find_default_run_target ("run");
2956 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
2957 return;
2958 }
2959
2960 static int
2961 find_default_can_async_p (struct target_ops *ignore)
2962 {
2963 struct target_ops *t;
2964
2965 /* This may be called before the target is pushed on the stack;
2966 look for the default process stratum. If there's none, gdb isn't
2967 configured with a native debugger, and target remote isn't
2968 connected yet. */
2969 t = find_default_run_target (NULL);
2970 if (t && t->to_can_async_p != delegate_can_async_p)
2971 return (t->to_can_async_p) (t);
2972 return 0;
2973 }
2974
2975 static int
2976 find_default_is_async_p (struct target_ops *ignore)
2977 {
2978 struct target_ops *t;
2979
2980 /* This may be called before the target is pushed on the stack;
2981 look for the default process stratum. If there's none, gdb isn't
2982 configured with a native debugger, and target remote isn't
2983 connected yet. */
2984 t = find_default_run_target (NULL);
2985 if (t && t->to_is_async_p != delegate_is_async_p)
2986 return (t->to_is_async_p) (t);
2987 return 0;
2988 }
2989
2990 static int
2991 find_default_supports_non_stop (struct target_ops *self)
2992 {
2993 struct target_ops *t;
2994
2995 t = find_default_run_target (NULL);
2996 if (t && t->to_supports_non_stop)
2997 return (t->to_supports_non_stop) (t);
2998 return 0;
2999 }
3000
3001 int
3002 target_supports_non_stop (void)
3003 {
3004 struct target_ops *t;
3005
3006 for (t = &current_target; t != NULL; t = t->beneath)
3007 if (t->to_supports_non_stop)
3008 return t->to_supports_non_stop (t);
3009
3010 return 0;
3011 }
3012
3013 /* Implement the "info proc" command. */
3014
3015 int
3016 target_info_proc (char *args, enum info_proc_what what)
3017 {
3018 struct target_ops *t;
3019
3020 /* If we're already connected to something that can get us OS
3021 related data, use it. Otherwise, try using the native
3022 target. */
3023 if (current_target.to_stratum >= process_stratum)
3024 t = current_target.beneath;
3025 else
3026 t = find_default_run_target (NULL);
3027
3028 for (; t != NULL; t = t->beneath)
3029 {
3030 if (t->to_info_proc != NULL)
3031 {
3032 t->to_info_proc (t, args, what);
3033
3034 if (targetdebug)
3035 fprintf_unfiltered (gdb_stdlog,
3036 "target_info_proc (\"%s\", %d)\n", args, what);
3037
3038 return 1;
3039 }
3040 }
3041
3042 return 0;
3043 }
3044
3045 static int
3046 find_default_supports_disable_randomization (struct target_ops *self)
3047 {
3048 struct target_ops *t;
3049
3050 t = find_default_run_target (NULL);
3051 if (t && t->to_supports_disable_randomization)
3052 return (t->to_supports_disable_randomization) (t);
3053 return 0;
3054 }
3055
3056 int
3057 target_supports_disable_randomization (void)
3058 {
3059 struct target_ops *t;
3060
3061 for (t = &current_target; t != NULL; t = t->beneath)
3062 if (t->to_supports_disable_randomization)
3063 return t->to_supports_disable_randomization (t);
3064
3065 return 0;
3066 }
3067
3068 char *
3069 target_get_osdata (const char *type)
3070 {
3071 struct target_ops *t;
3072
3073 /* If we're already connected to something that can get us OS
3074 related data, use it. Otherwise, try using the native
3075 target. */
3076 if (current_target.to_stratum >= process_stratum)
3077 t = current_target.beneath;
3078 else
3079 t = find_default_run_target ("get OS data");
3080
3081 if (!t)
3082 return NULL;
3083
3084 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3085 }
3086
3087 /* Determine the current address space of thread PTID. */
3088
3089 struct address_space *
3090 target_thread_address_space (ptid_t ptid)
3091 {
3092 struct address_space *aspace;
3093 struct inferior *inf;
3094 struct target_ops *t;
3095
3096 for (t = current_target.beneath; t != NULL; t = t->beneath)
3097 {
3098 if (t->to_thread_address_space != NULL)
3099 {
3100 aspace = t->to_thread_address_space (t, ptid);
3101 gdb_assert (aspace);
3102
3103 if (targetdebug)
3104 fprintf_unfiltered (gdb_stdlog,
3105 "target_thread_address_space (%s) = %d\n",
3106 target_pid_to_str (ptid),
3107 address_space_num (aspace));
3108 return aspace;
3109 }
3110 }
3111
3112 /* Fall-back to the "main" address space of the inferior. */
3113 inf = find_inferior_pid (ptid_get_pid (ptid));
3114
3115 if (inf == NULL || inf->aspace == NULL)
3116 internal_error (__FILE__, __LINE__,
3117 _("Can't determine the current "
3118 "address space of thread %s\n"),
3119 target_pid_to_str (ptid));
3120
3121 return inf->aspace;
3122 }
3123
3124
3125 /* Target file operations. */
3126
3127 static struct target_ops *
3128 default_fileio_target (void)
3129 {
3130 /* If we're already connected to something that can perform
3131 file I/O, use it. Otherwise, try using the native target. */
3132 if (current_target.to_stratum >= process_stratum)
3133 return current_target.beneath;
3134 else
3135 return find_default_run_target ("file I/O");
3136 }
3137
3138 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3139 target file descriptor, or -1 if an error occurs (and set
3140 *TARGET_ERRNO). */
3141 int
3142 target_fileio_open (const char *filename, int flags, int mode,
3143 int *target_errno)
3144 {
3145 struct target_ops *t;
3146
3147 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3148 {
3149 if (t->to_fileio_open != NULL)
3150 {
3151 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
3152
3153 if (targetdebug)
3154 fprintf_unfiltered (gdb_stdlog,
3155 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3156 filename, flags, mode,
3157 fd, fd != -1 ? 0 : *target_errno);
3158 return fd;
3159 }
3160 }
3161
3162 *target_errno = FILEIO_ENOSYS;
3163 return -1;
3164 }
3165
3166 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3167 Return the number of bytes written, or -1 if an error occurs
3168 (and set *TARGET_ERRNO). */
3169 int
3170 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3171 ULONGEST offset, int *target_errno)
3172 {
3173 struct target_ops *t;
3174
3175 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3176 {
3177 if (t->to_fileio_pwrite != NULL)
3178 {
3179 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
3180 target_errno);
3181
3182 if (targetdebug)
3183 fprintf_unfiltered (gdb_stdlog,
3184 "target_fileio_pwrite (%d,...,%d,%s) "
3185 "= %d (%d)\n",
3186 fd, len, pulongest (offset),
3187 ret, ret != -1 ? 0 : *target_errno);
3188 return ret;
3189 }
3190 }
3191
3192 *target_errno = FILEIO_ENOSYS;
3193 return -1;
3194 }
3195
3196 /* Read up to LEN bytes FD on the target into READ_BUF.
3197 Return the number of bytes read, or -1 if an error occurs
3198 (and set *TARGET_ERRNO). */
3199 int
3200 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3201 ULONGEST offset, int *target_errno)
3202 {
3203 struct target_ops *t;
3204
3205 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3206 {
3207 if (t->to_fileio_pread != NULL)
3208 {
3209 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3210 target_errno);
3211
3212 if (targetdebug)
3213 fprintf_unfiltered (gdb_stdlog,
3214 "target_fileio_pread (%d,...,%d,%s) "
3215 "= %d (%d)\n",
3216 fd, len, pulongest (offset),
3217 ret, ret != -1 ? 0 : *target_errno);
3218 return ret;
3219 }
3220 }
3221
3222 *target_errno = FILEIO_ENOSYS;
3223 return -1;
3224 }
3225
3226 /* Close FD on the target. Return 0, or -1 if an error occurs
3227 (and set *TARGET_ERRNO). */
3228 int
3229 target_fileio_close (int fd, int *target_errno)
3230 {
3231 struct target_ops *t;
3232
3233 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3234 {
3235 if (t->to_fileio_close != NULL)
3236 {
3237 int ret = t->to_fileio_close (t, fd, target_errno);
3238
3239 if (targetdebug)
3240 fprintf_unfiltered (gdb_stdlog,
3241 "target_fileio_close (%d) = %d (%d)\n",
3242 fd, ret, ret != -1 ? 0 : *target_errno);
3243 return ret;
3244 }
3245 }
3246
3247 *target_errno = FILEIO_ENOSYS;
3248 return -1;
3249 }
3250
3251 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3252 occurs (and set *TARGET_ERRNO). */
3253 int
3254 target_fileio_unlink (const char *filename, int *target_errno)
3255 {
3256 struct target_ops *t;
3257
3258 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3259 {
3260 if (t->to_fileio_unlink != NULL)
3261 {
3262 int ret = t->to_fileio_unlink (t, filename, target_errno);
3263
3264 if (targetdebug)
3265 fprintf_unfiltered (gdb_stdlog,
3266 "target_fileio_unlink (%s) = %d (%d)\n",
3267 filename, ret, ret != -1 ? 0 : *target_errno);
3268 return ret;
3269 }
3270 }
3271
3272 *target_errno = FILEIO_ENOSYS;
3273 return -1;
3274 }
3275
3276 /* Read value of symbolic link FILENAME on the target. Return a
3277 null-terminated string allocated via xmalloc, or NULL if an error
3278 occurs (and set *TARGET_ERRNO). */
3279 char *
3280 target_fileio_readlink (const char *filename, int *target_errno)
3281 {
3282 struct target_ops *t;
3283
3284 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3285 {
3286 if (t->to_fileio_readlink != NULL)
3287 {
3288 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3289
3290 if (targetdebug)
3291 fprintf_unfiltered (gdb_stdlog,
3292 "target_fileio_readlink (%s) = %s (%d)\n",
3293 filename, ret? ret : "(nil)",
3294 ret? 0 : *target_errno);
3295 return ret;
3296 }
3297 }
3298
3299 *target_errno = FILEIO_ENOSYS;
3300 return NULL;
3301 }
3302
3303 static void
3304 target_fileio_close_cleanup (void *opaque)
3305 {
3306 int fd = *(int *) opaque;
3307 int target_errno;
3308
3309 target_fileio_close (fd, &target_errno);
3310 }
3311
3312 /* Read target file FILENAME. Store the result in *BUF_P and
3313 return the size of the transferred data. PADDING additional bytes are
3314 available in *BUF_P. This is a helper function for
3315 target_fileio_read_alloc; see the declaration of that function for more
3316 information. */
3317
3318 static LONGEST
3319 target_fileio_read_alloc_1 (const char *filename,
3320 gdb_byte **buf_p, int padding)
3321 {
3322 struct cleanup *close_cleanup;
3323 size_t buf_alloc, buf_pos;
3324 gdb_byte *buf;
3325 LONGEST n;
3326 int fd;
3327 int target_errno;
3328
3329 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3330 if (fd == -1)
3331 return -1;
3332
3333 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3334
3335 /* Start by reading up to 4K at a time. The target will throttle
3336 this number down if necessary. */
3337 buf_alloc = 4096;
3338 buf = xmalloc (buf_alloc);
3339 buf_pos = 0;
3340 while (1)
3341 {
3342 n = target_fileio_pread (fd, &buf[buf_pos],
3343 buf_alloc - buf_pos - padding, buf_pos,
3344 &target_errno);
3345 if (n < 0)
3346 {
3347 /* An error occurred. */
3348 do_cleanups (close_cleanup);
3349 xfree (buf);
3350 return -1;
3351 }
3352 else if (n == 0)
3353 {
3354 /* Read all there was. */
3355 do_cleanups (close_cleanup);
3356 if (buf_pos == 0)
3357 xfree (buf);
3358 else
3359 *buf_p = buf;
3360 return buf_pos;
3361 }
3362
3363 buf_pos += n;
3364
3365 /* If the buffer is filling up, expand it. */
3366 if (buf_alloc < buf_pos * 2)
3367 {
3368 buf_alloc *= 2;
3369 buf = xrealloc (buf, buf_alloc);
3370 }
3371
3372 QUIT;
3373 }
3374 }
3375
3376 /* Read target file FILENAME. Store the result in *BUF_P and return
3377 the size of the transferred data. See the declaration in "target.h"
3378 function for more information about the return value. */
3379
3380 LONGEST
3381 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3382 {
3383 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3384 }
3385
3386 /* Read target file FILENAME. The result is NUL-terminated and
3387 returned as a string, allocated using xmalloc. If an error occurs
3388 or the transfer is unsupported, NULL is returned. Empty objects
3389 are returned as allocated but empty strings. A warning is issued
3390 if the result contains any embedded NUL bytes. */
3391
3392 char *
3393 target_fileio_read_stralloc (const char *filename)
3394 {
3395 gdb_byte *buffer;
3396 char *bufstr;
3397 LONGEST i, transferred;
3398
3399 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3400 bufstr = (char *) buffer;
3401
3402 if (transferred < 0)
3403 return NULL;
3404
3405 if (transferred == 0)
3406 return xstrdup ("");
3407
3408 bufstr[transferred] = 0;
3409
3410 /* Check for embedded NUL bytes; but allow trailing NULs. */
3411 for (i = strlen (bufstr); i < transferred; i++)
3412 if (bufstr[i] != 0)
3413 {
3414 warning (_("target file %s "
3415 "contained unexpected null characters"),
3416 filename);
3417 break;
3418 }
3419
3420 return bufstr;
3421 }
3422
3423
3424 static int
3425 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3426 CORE_ADDR addr, int len)
3427 {
3428 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3429 }
3430
3431 static int
3432 default_watchpoint_addr_within_range (struct target_ops *target,
3433 CORE_ADDR addr,
3434 CORE_ADDR start, int length)
3435 {
3436 return addr >= start && addr < start + length;
3437 }
3438
3439 static struct gdbarch *
3440 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3441 {
3442 return target_gdbarch ();
3443 }
3444
3445 static int
3446 return_zero (void)
3447 {
3448 return 0;
3449 }
3450
3451 /*
3452 * Find the next target down the stack from the specified target.
3453 */
3454
3455 struct target_ops *
3456 find_target_beneath (struct target_ops *t)
3457 {
3458 return t->beneath;
3459 }
3460
3461 /* See target.h. */
3462
3463 struct target_ops *
3464 find_target_at (enum strata stratum)
3465 {
3466 struct target_ops *t;
3467
3468 for (t = current_target.beneath; t != NULL; t = t->beneath)
3469 if (t->to_stratum == stratum)
3470 return t;
3471
3472 return NULL;
3473 }
3474
3475 \f
3476 /* The inferior process has died. Long live the inferior! */
3477
3478 void
3479 generic_mourn_inferior (void)
3480 {
3481 ptid_t ptid;
3482
3483 ptid = inferior_ptid;
3484 inferior_ptid = null_ptid;
3485
3486 /* Mark breakpoints uninserted in case something tries to delete a
3487 breakpoint while we delete the inferior's threads (which would
3488 fail, since the inferior is long gone). */
3489 mark_breakpoints_out ();
3490
3491 if (!ptid_equal (ptid, null_ptid))
3492 {
3493 int pid = ptid_get_pid (ptid);
3494 exit_inferior (pid);
3495 }
3496
3497 /* Note this wipes step-resume breakpoints, so needs to be done
3498 after exit_inferior, which ends up referencing the step-resume
3499 breakpoints through clear_thread_inferior_resources. */
3500 breakpoint_init_inferior (inf_exited);
3501
3502 registers_changed ();
3503
3504 reopen_exec_file ();
3505 reinit_frame_cache ();
3506
3507 if (deprecated_detach_hook)
3508 deprecated_detach_hook ();
3509 }
3510 \f
3511 /* Convert a normal process ID to a string. Returns the string in a
3512 static buffer. */
3513
3514 char *
3515 normal_pid_to_str (ptid_t ptid)
3516 {
3517 static char buf[32];
3518
3519 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3520 return buf;
3521 }
3522
3523 static char *
3524 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3525 {
3526 return normal_pid_to_str (ptid);
3527 }
3528
3529 /* Error-catcher for target_find_memory_regions. */
3530 static int
3531 dummy_find_memory_regions (struct target_ops *self,
3532 find_memory_region_ftype ignore1, void *ignore2)
3533 {
3534 error (_("Command not implemented for this target."));
3535 return 0;
3536 }
3537
3538 /* Error-catcher for target_make_corefile_notes. */
3539 static char *
3540 dummy_make_corefile_notes (struct target_ops *self,
3541 bfd *ignore1, int *ignore2)
3542 {
3543 error (_("Command not implemented for this target."));
3544 return NULL;
3545 }
3546
3547 /* Set up the handful of non-empty slots needed by the dummy target
3548 vector. */
3549
3550 static void
3551 init_dummy_target (void)
3552 {
3553 dummy_target.to_shortname = "None";
3554 dummy_target.to_longname = "None";
3555 dummy_target.to_doc = "";
3556 dummy_target.to_create_inferior = find_default_create_inferior;
3557 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3558 dummy_target.to_supports_disable_randomization
3559 = find_default_supports_disable_randomization;
3560 dummy_target.to_pid_to_str = dummy_pid_to_str;
3561 dummy_target.to_stratum = dummy_stratum;
3562 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3563 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3564 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3565 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3566 dummy_target.to_has_execution
3567 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3568 dummy_target.to_magic = OPS_MAGIC;
3569
3570 install_dummy_methods (&dummy_target);
3571 }
3572 \f
3573 static void
3574 debug_to_open (char *args, int from_tty)
3575 {
3576 debug_target.to_open (args, from_tty);
3577
3578 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3579 }
3580
3581 void
3582 target_close (struct target_ops *targ)
3583 {
3584 gdb_assert (!target_is_pushed (targ));
3585
3586 if (targ->to_xclose != NULL)
3587 targ->to_xclose (targ);
3588 else if (targ->to_close != NULL)
3589 targ->to_close (targ);
3590
3591 if (targetdebug)
3592 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3593 }
3594
3595 void
3596 target_attach (char *args, int from_tty)
3597 {
3598 current_target.to_attach (&current_target, args, from_tty);
3599 if (targetdebug)
3600 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3601 args, from_tty);
3602 }
3603
3604 int
3605 target_thread_alive (ptid_t ptid)
3606 {
3607 struct target_ops *t;
3608
3609 for (t = current_target.beneath; t != NULL; t = t->beneath)
3610 {
3611 if (t->to_thread_alive != NULL)
3612 {
3613 int retval;
3614
3615 retval = t->to_thread_alive (t, ptid);
3616 if (targetdebug)
3617 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3618 ptid_get_pid (ptid), retval);
3619
3620 return retval;
3621 }
3622 }
3623
3624 return 0;
3625 }
3626
3627 void
3628 target_find_new_threads (void)
3629 {
3630 struct target_ops *t;
3631
3632 for (t = current_target.beneath; t != NULL; t = t->beneath)
3633 {
3634 if (t->to_find_new_threads != NULL)
3635 {
3636 t->to_find_new_threads (t);
3637 if (targetdebug)
3638 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3639
3640 return;
3641 }
3642 }
3643 }
3644
3645 void
3646 target_stop (ptid_t ptid)
3647 {
3648 if (!may_stop)
3649 {
3650 warning (_("May not interrupt or stop the target, ignoring attempt"));
3651 return;
3652 }
3653
3654 (*current_target.to_stop) (&current_target, ptid);
3655 }
3656
3657 static void
3658 debug_to_post_attach (struct target_ops *self, int pid)
3659 {
3660 debug_target.to_post_attach (&debug_target, pid);
3661
3662 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3663 }
3664
3665 /* Concatenate ELEM to LIST, a comma separate list, and return the
3666 result. The LIST incoming argument is released. */
3667
3668 static char *
3669 str_comma_list_concat_elem (char *list, const char *elem)
3670 {
3671 if (list == NULL)
3672 return xstrdup (elem);
3673 else
3674 return reconcat (list, list, ", ", elem, (char *) NULL);
3675 }
3676
3677 /* Helper for target_options_to_string. If OPT is present in
3678 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3679 Returns the new resulting string. OPT is removed from
3680 TARGET_OPTIONS. */
3681
3682 static char *
3683 do_option (int *target_options, char *ret,
3684 int opt, char *opt_str)
3685 {
3686 if ((*target_options & opt) != 0)
3687 {
3688 ret = str_comma_list_concat_elem (ret, opt_str);
3689 *target_options &= ~opt;
3690 }
3691
3692 return ret;
3693 }
3694
3695 char *
3696 target_options_to_string (int target_options)
3697 {
3698 char *ret = NULL;
3699
3700 #define DO_TARG_OPTION(OPT) \
3701 ret = do_option (&target_options, ret, OPT, #OPT)
3702
3703 DO_TARG_OPTION (TARGET_WNOHANG);
3704
3705 if (target_options != 0)
3706 ret = str_comma_list_concat_elem (ret, "unknown???");
3707
3708 if (ret == NULL)
3709 ret = xstrdup ("");
3710 return ret;
3711 }
3712
3713 static void
3714 debug_print_register (const char * func,
3715 struct regcache *regcache, int regno)
3716 {
3717 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3718
3719 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3720 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3721 && gdbarch_register_name (gdbarch, regno) != NULL
3722 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3723 fprintf_unfiltered (gdb_stdlog, "(%s)",
3724 gdbarch_register_name (gdbarch, regno));
3725 else
3726 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3727 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3728 {
3729 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3730 int i, size = register_size (gdbarch, regno);
3731 gdb_byte buf[MAX_REGISTER_SIZE];
3732
3733 regcache_raw_collect (regcache, regno, buf);
3734 fprintf_unfiltered (gdb_stdlog, " = ");
3735 for (i = 0; i < size; i++)
3736 {
3737 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3738 }
3739 if (size <= sizeof (LONGEST))
3740 {
3741 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3742
3743 fprintf_unfiltered (gdb_stdlog, " %s %s",
3744 core_addr_to_string_nz (val), plongest (val));
3745 }
3746 }
3747 fprintf_unfiltered (gdb_stdlog, "\n");
3748 }
3749
3750 void
3751 target_fetch_registers (struct regcache *regcache, int regno)
3752 {
3753 current_target.to_fetch_registers (&current_target, regcache, regno);
3754 if (targetdebug)
3755 debug_print_register ("target_fetch_registers", regcache, regno);
3756 }
3757
3758 void
3759 target_store_registers (struct regcache *regcache, int regno)
3760 {
3761 struct target_ops *t;
3762
3763 if (!may_write_registers)
3764 error (_("Writing to registers is not allowed (regno %d)"), regno);
3765
3766 current_target.to_store_registers (&current_target, regcache, regno);
3767 if (targetdebug)
3768 {
3769 debug_print_register ("target_store_registers", regcache, regno);
3770 }
3771 }
3772
3773 int
3774 target_core_of_thread (ptid_t ptid)
3775 {
3776 struct target_ops *t;
3777
3778 for (t = current_target.beneath; t != NULL; t = t->beneath)
3779 {
3780 if (t->to_core_of_thread != NULL)
3781 {
3782 int retval = t->to_core_of_thread (t, ptid);
3783
3784 if (targetdebug)
3785 fprintf_unfiltered (gdb_stdlog,
3786 "target_core_of_thread (%d) = %d\n",
3787 ptid_get_pid (ptid), retval);
3788 return retval;
3789 }
3790 }
3791
3792 return -1;
3793 }
3794
3795 int
3796 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3797 {
3798 struct target_ops *t;
3799
3800 for (t = current_target.beneath; t != NULL; t = t->beneath)
3801 {
3802 if (t->to_verify_memory != NULL)
3803 {
3804 int retval = t->to_verify_memory (t, data, memaddr, size);
3805
3806 if (targetdebug)
3807 fprintf_unfiltered (gdb_stdlog,
3808 "target_verify_memory (%s, %s) = %d\n",
3809 paddress (target_gdbarch (), memaddr),
3810 pulongest (size),
3811 retval);
3812 return retval;
3813 }
3814 }
3815
3816 tcomplain ();
3817 }
3818
3819 /* The documentation for this function is in its prototype declaration in
3820 target.h. */
3821
3822 int
3823 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3824 {
3825 int ret;
3826
3827 ret = current_target.to_insert_mask_watchpoint (&current_target,
3828 addr, mask, rw);
3829
3830 if (targetdebug)
3831 fprintf_unfiltered (gdb_stdlog, "\
3832 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3833 core_addr_to_string (addr),
3834 core_addr_to_string (mask), rw, ret);
3835
3836 return ret;
3837 }
3838
3839 /* The documentation for this function is in its prototype declaration in
3840 target.h. */
3841
3842 int
3843 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3844 {
3845 int ret;
3846
3847 ret = current_target.to_remove_mask_watchpoint (&current_target,
3848 addr, mask, rw);
3849
3850 if (targetdebug)
3851 fprintf_unfiltered (gdb_stdlog, "\
3852 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3853 core_addr_to_string (addr),
3854 core_addr_to_string (mask), rw, ret);
3855
3856 return ret;
3857 }
3858
3859 /* The documentation for this function is in its prototype declaration
3860 in target.h. */
3861
3862 int
3863 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3864 {
3865 return current_target.to_masked_watch_num_registers (&current_target,
3866 addr, mask);
3867 }
3868
3869 /* The documentation for this function is in its prototype declaration
3870 in target.h. */
3871
3872 int
3873 target_ranged_break_num_registers (void)
3874 {
3875 return current_target.to_ranged_break_num_registers (&current_target);
3876 }
3877
3878 /* See target.h. */
3879
3880 struct btrace_target_info *
3881 target_enable_btrace (ptid_t ptid)
3882 {
3883 struct target_ops *t;
3884
3885 for (t = current_target.beneath; t != NULL; t = t->beneath)
3886 if (t->to_enable_btrace != NULL)
3887 return t->to_enable_btrace (t, ptid);
3888
3889 tcomplain ();
3890 return NULL;
3891 }
3892
3893 /* See target.h. */
3894
3895 void
3896 target_disable_btrace (struct btrace_target_info *btinfo)
3897 {
3898 struct target_ops *t;
3899
3900 for (t = current_target.beneath; t != NULL; t = t->beneath)
3901 if (t->to_disable_btrace != NULL)
3902 {
3903 t->to_disable_btrace (t, btinfo);
3904 return;
3905 }
3906
3907 tcomplain ();
3908 }
3909
3910 /* See target.h. */
3911
3912 void
3913 target_teardown_btrace (struct btrace_target_info *btinfo)
3914 {
3915 struct target_ops *t;
3916
3917 for (t = current_target.beneath; t != NULL; t = t->beneath)
3918 if (t->to_teardown_btrace != NULL)
3919 {
3920 t->to_teardown_btrace (t, btinfo);
3921 return;
3922 }
3923
3924 tcomplain ();
3925 }
3926
3927 /* See target.h. */
3928
3929 enum btrace_error
3930 target_read_btrace (VEC (btrace_block_s) **btrace,
3931 struct btrace_target_info *btinfo,
3932 enum btrace_read_type type)
3933 {
3934 struct target_ops *t;
3935
3936 for (t = current_target.beneath; t != NULL; t = t->beneath)
3937 if (t->to_read_btrace != NULL)
3938 return t->to_read_btrace (t, btrace, btinfo, type);
3939
3940 tcomplain ();
3941 return BTRACE_ERR_NOT_SUPPORTED;
3942 }
3943
3944 /* See target.h. */
3945
3946 void
3947 target_stop_recording (void)
3948 {
3949 struct target_ops *t;
3950
3951 for (t = current_target.beneath; t != NULL; t = t->beneath)
3952 if (t->to_stop_recording != NULL)
3953 {
3954 t->to_stop_recording (t);
3955 return;
3956 }
3957
3958 /* This is optional. */
3959 }
3960
3961 /* See target.h. */
3962
3963 void
3964 target_info_record (void)
3965 {
3966 struct target_ops *t;
3967
3968 for (t = current_target.beneath; t != NULL; t = t->beneath)
3969 if (t->to_info_record != NULL)
3970 {
3971 t->to_info_record (t);
3972 return;
3973 }
3974
3975 tcomplain ();
3976 }
3977
3978 /* See target.h. */
3979
3980 void
3981 target_save_record (const char *filename)
3982 {
3983 struct target_ops *t;
3984
3985 for (t = current_target.beneath; t != NULL; t = t->beneath)
3986 if (t->to_save_record != NULL)
3987 {
3988 t->to_save_record (t, filename);
3989 return;
3990 }
3991
3992 tcomplain ();
3993 }
3994
3995 /* See target.h. */
3996
3997 int
3998 target_supports_delete_record (void)
3999 {
4000 struct target_ops *t;
4001
4002 for (t = current_target.beneath; t != NULL; t = t->beneath)
4003 if (t->to_delete_record != NULL)
4004 return 1;
4005
4006 return 0;
4007 }
4008
4009 /* See target.h. */
4010
4011 void
4012 target_delete_record (void)
4013 {
4014 struct target_ops *t;
4015
4016 for (t = current_target.beneath; t != NULL; t = t->beneath)
4017 if (t->to_delete_record != NULL)
4018 {
4019 t->to_delete_record (t);
4020 return;
4021 }
4022
4023 tcomplain ();
4024 }
4025
4026 /* See target.h. */
4027
4028 int
4029 target_record_is_replaying (void)
4030 {
4031 struct target_ops *t;
4032
4033 for (t = current_target.beneath; t != NULL; t = t->beneath)
4034 if (t->to_record_is_replaying != NULL)
4035 return t->to_record_is_replaying (t);
4036
4037 return 0;
4038 }
4039
4040 /* See target.h. */
4041
4042 void
4043 target_goto_record_begin (void)
4044 {
4045 struct target_ops *t;
4046
4047 for (t = current_target.beneath; t != NULL; t = t->beneath)
4048 if (t->to_goto_record_begin != NULL)
4049 {
4050 t->to_goto_record_begin (t);
4051 return;
4052 }
4053
4054 tcomplain ();
4055 }
4056
4057 /* See target.h. */
4058
4059 void
4060 target_goto_record_end (void)
4061 {
4062 struct target_ops *t;
4063
4064 for (t = current_target.beneath; t != NULL; t = t->beneath)
4065 if (t->to_goto_record_end != NULL)
4066 {
4067 t->to_goto_record_end (t);
4068 return;
4069 }
4070
4071 tcomplain ();
4072 }
4073
4074 /* See target.h. */
4075
4076 void
4077 target_goto_record (ULONGEST insn)
4078 {
4079 struct target_ops *t;
4080
4081 for (t = current_target.beneath; t != NULL; t = t->beneath)
4082 if (t->to_goto_record != NULL)
4083 {
4084 t->to_goto_record (t, insn);
4085 return;
4086 }
4087
4088 tcomplain ();
4089 }
4090
4091 /* See target.h. */
4092
4093 void
4094 target_insn_history (int size, int flags)
4095 {
4096 struct target_ops *t;
4097
4098 for (t = current_target.beneath; t != NULL; t = t->beneath)
4099 if (t->to_insn_history != NULL)
4100 {
4101 t->to_insn_history (t, size, flags);
4102 return;
4103 }
4104
4105 tcomplain ();
4106 }
4107
4108 /* See target.h. */
4109
4110 void
4111 target_insn_history_from (ULONGEST from, int size, int flags)
4112 {
4113 struct target_ops *t;
4114
4115 for (t = current_target.beneath; t != NULL; t = t->beneath)
4116 if (t->to_insn_history_from != NULL)
4117 {
4118 t->to_insn_history_from (t, from, size, flags);
4119 return;
4120 }
4121
4122 tcomplain ();
4123 }
4124
4125 /* See target.h. */
4126
4127 void
4128 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4129 {
4130 struct target_ops *t;
4131
4132 for (t = current_target.beneath; t != NULL; t = t->beneath)
4133 if (t->to_insn_history_range != NULL)
4134 {
4135 t->to_insn_history_range (t, begin, end, flags);
4136 return;
4137 }
4138
4139 tcomplain ();
4140 }
4141
4142 /* See target.h. */
4143
4144 void
4145 target_call_history (int size, int flags)
4146 {
4147 struct target_ops *t;
4148
4149 for (t = current_target.beneath; t != NULL; t = t->beneath)
4150 if (t->to_call_history != NULL)
4151 {
4152 t->to_call_history (t, size, flags);
4153 return;
4154 }
4155
4156 tcomplain ();
4157 }
4158
4159 /* See target.h. */
4160
4161 void
4162 target_call_history_from (ULONGEST begin, int size, int flags)
4163 {
4164 struct target_ops *t;
4165
4166 for (t = current_target.beneath; t != NULL; t = t->beneath)
4167 if (t->to_call_history_from != NULL)
4168 {
4169 t->to_call_history_from (t, begin, size, flags);
4170 return;
4171 }
4172
4173 tcomplain ();
4174 }
4175
4176 /* See target.h. */
4177
4178 void
4179 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4180 {
4181 struct target_ops *t;
4182
4183 for (t = current_target.beneath; t != NULL; t = t->beneath)
4184 if (t->to_call_history_range != NULL)
4185 {
4186 t->to_call_history_range (t, begin, end, flags);
4187 return;
4188 }
4189
4190 tcomplain ();
4191 }
4192
4193 static void
4194 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4195 {
4196 debug_target.to_prepare_to_store (&debug_target, regcache);
4197
4198 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4199 }
4200
4201 /* See target.h. */
4202
4203 const struct frame_unwind *
4204 target_get_unwinder (void)
4205 {
4206 struct target_ops *t;
4207
4208 for (t = current_target.beneath; t != NULL; t = t->beneath)
4209 if (t->to_get_unwinder != NULL)
4210 return t->to_get_unwinder;
4211
4212 return NULL;
4213 }
4214
4215 /* See target.h. */
4216
4217 const struct frame_unwind *
4218 target_get_tailcall_unwinder (void)
4219 {
4220 struct target_ops *t;
4221
4222 for (t = current_target.beneath; t != NULL; t = t->beneath)
4223 if (t->to_get_tailcall_unwinder != NULL)
4224 return t->to_get_tailcall_unwinder;
4225
4226 return NULL;
4227 }
4228
4229 /* See target.h. */
4230
4231 CORE_ADDR
4232 forward_target_decr_pc_after_break (struct target_ops *ops,
4233 struct gdbarch *gdbarch)
4234 {
4235 for (; ops != NULL; ops = ops->beneath)
4236 if (ops->to_decr_pc_after_break != NULL)
4237 return ops->to_decr_pc_after_break (ops, gdbarch);
4238
4239 return gdbarch_decr_pc_after_break (gdbarch);
4240 }
4241
4242 /* See target.h. */
4243
4244 CORE_ADDR
4245 target_decr_pc_after_break (struct gdbarch *gdbarch)
4246 {
4247 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4248 }
4249
4250 static int
4251 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4252 int write, struct mem_attrib *attrib,
4253 struct target_ops *target)
4254 {
4255 int retval;
4256
4257 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4258 attrib, target);
4259
4260 fprintf_unfiltered (gdb_stdlog,
4261 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4262 paddress (target_gdbarch (), memaddr), len,
4263 write ? "write" : "read", retval);
4264
4265 if (retval > 0)
4266 {
4267 int i;
4268
4269 fputs_unfiltered (", bytes =", gdb_stdlog);
4270 for (i = 0; i < retval; i++)
4271 {
4272 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4273 {
4274 if (targetdebug < 2 && i > 0)
4275 {
4276 fprintf_unfiltered (gdb_stdlog, " ...");
4277 break;
4278 }
4279 fprintf_unfiltered (gdb_stdlog, "\n");
4280 }
4281
4282 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4283 }
4284 }
4285
4286 fputc_unfiltered ('\n', gdb_stdlog);
4287
4288 return retval;
4289 }
4290
4291 static void
4292 debug_to_files_info (struct target_ops *target)
4293 {
4294 debug_target.to_files_info (target);
4295
4296 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4297 }
4298
4299 static int
4300 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4301 struct bp_target_info *bp_tgt)
4302 {
4303 int retval;
4304
4305 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4306
4307 fprintf_unfiltered (gdb_stdlog,
4308 "target_insert_breakpoint (%s, xxx) = %ld\n",
4309 core_addr_to_string (bp_tgt->placed_address),
4310 (unsigned long) retval);
4311 return retval;
4312 }
4313
4314 static int
4315 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4316 struct bp_target_info *bp_tgt)
4317 {
4318 int retval;
4319
4320 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4321
4322 fprintf_unfiltered (gdb_stdlog,
4323 "target_remove_breakpoint (%s, xxx) = %ld\n",
4324 core_addr_to_string (bp_tgt->placed_address),
4325 (unsigned long) retval);
4326 return retval;
4327 }
4328
4329 static int
4330 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4331 int type, int cnt, int from_tty)
4332 {
4333 int retval;
4334
4335 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4336 type, cnt, from_tty);
4337
4338 fprintf_unfiltered (gdb_stdlog,
4339 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4340 (unsigned long) type,
4341 (unsigned long) cnt,
4342 (unsigned long) from_tty,
4343 (unsigned long) retval);
4344 return retval;
4345 }
4346
4347 static int
4348 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4349 CORE_ADDR addr, int len)
4350 {
4351 CORE_ADDR retval;
4352
4353 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4354 addr, len);
4355
4356 fprintf_unfiltered (gdb_stdlog,
4357 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4358 core_addr_to_string (addr), (unsigned long) len,
4359 core_addr_to_string (retval));
4360 return retval;
4361 }
4362
4363 static int
4364 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4365 CORE_ADDR addr, int len, int rw,
4366 struct expression *cond)
4367 {
4368 int retval;
4369
4370 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4371 addr, len,
4372 rw, cond);
4373
4374 fprintf_unfiltered (gdb_stdlog,
4375 "target_can_accel_watchpoint_condition "
4376 "(%s, %d, %d, %s) = %ld\n",
4377 core_addr_to_string (addr), len, rw,
4378 host_address_to_string (cond), (unsigned long) retval);
4379 return retval;
4380 }
4381
4382 static int
4383 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4384 {
4385 int retval;
4386
4387 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4388
4389 fprintf_unfiltered (gdb_stdlog,
4390 "target_stopped_by_watchpoint () = %ld\n",
4391 (unsigned long) retval);
4392 return retval;
4393 }
4394
4395 static int
4396 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4397 {
4398 int retval;
4399
4400 retval = debug_target.to_stopped_data_address (target, addr);
4401
4402 fprintf_unfiltered (gdb_stdlog,
4403 "target_stopped_data_address ([%s]) = %ld\n",
4404 core_addr_to_string (*addr),
4405 (unsigned long)retval);
4406 return retval;
4407 }
4408
4409 static int
4410 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4411 CORE_ADDR addr,
4412 CORE_ADDR start, int length)
4413 {
4414 int retval;
4415
4416 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4417 start, length);
4418
4419 fprintf_filtered (gdb_stdlog,
4420 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4421 core_addr_to_string (addr), core_addr_to_string (start),
4422 length, retval);
4423 return retval;
4424 }
4425
4426 static int
4427 debug_to_insert_hw_breakpoint (struct target_ops *self,
4428 struct gdbarch *gdbarch,
4429 struct bp_target_info *bp_tgt)
4430 {
4431 int retval;
4432
4433 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4434 gdbarch, bp_tgt);
4435
4436 fprintf_unfiltered (gdb_stdlog,
4437 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4438 core_addr_to_string (bp_tgt->placed_address),
4439 (unsigned long) retval);
4440 return retval;
4441 }
4442
4443 static int
4444 debug_to_remove_hw_breakpoint (struct target_ops *self,
4445 struct gdbarch *gdbarch,
4446 struct bp_target_info *bp_tgt)
4447 {
4448 int retval;
4449
4450 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4451 gdbarch, bp_tgt);
4452
4453 fprintf_unfiltered (gdb_stdlog,
4454 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4455 core_addr_to_string (bp_tgt->placed_address),
4456 (unsigned long) retval);
4457 return retval;
4458 }
4459
4460 static int
4461 debug_to_insert_watchpoint (struct target_ops *self,
4462 CORE_ADDR addr, int len, int type,
4463 struct expression *cond)
4464 {
4465 int retval;
4466
4467 retval = debug_target.to_insert_watchpoint (&debug_target,
4468 addr, len, type, cond);
4469
4470 fprintf_unfiltered (gdb_stdlog,
4471 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4472 core_addr_to_string (addr), len, type,
4473 host_address_to_string (cond), (unsigned long) retval);
4474 return retval;
4475 }
4476
4477 static int
4478 debug_to_remove_watchpoint (struct target_ops *self,
4479 CORE_ADDR addr, int len, int type,
4480 struct expression *cond)
4481 {
4482 int retval;
4483
4484 retval = debug_target.to_remove_watchpoint (&debug_target,
4485 addr, len, type, cond);
4486
4487 fprintf_unfiltered (gdb_stdlog,
4488 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4489 core_addr_to_string (addr), len, type,
4490 host_address_to_string (cond), (unsigned long) retval);
4491 return retval;
4492 }
4493
4494 static void
4495 debug_to_terminal_init (struct target_ops *self)
4496 {
4497 debug_target.to_terminal_init (&debug_target);
4498
4499 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4500 }
4501
4502 static void
4503 debug_to_terminal_inferior (struct target_ops *self)
4504 {
4505 debug_target.to_terminal_inferior (&debug_target);
4506
4507 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4508 }
4509
4510 static void
4511 debug_to_terminal_ours_for_output (struct target_ops *self)
4512 {
4513 debug_target.to_terminal_ours_for_output (&debug_target);
4514
4515 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4516 }
4517
4518 static void
4519 debug_to_terminal_ours (struct target_ops *self)
4520 {
4521 debug_target.to_terminal_ours (&debug_target);
4522
4523 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4524 }
4525
4526 static void
4527 debug_to_terminal_save_ours (struct target_ops *self)
4528 {
4529 debug_target.to_terminal_save_ours (&debug_target);
4530
4531 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4532 }
4533
4534 static void
4535 debug_to_terminal_info (struct target_ops *self,
4536 const char *arg, int from_tty)
4537 {
4538 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4539
4540 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4541 from_tty);
4542 }
4543
4544 static void
4545 debug_to_load (struct target_ops *self, char *args, int from_tty)
4546 {
4547 debug_target.to_load (&debug_target, args, from_tty);
4548
4549 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4550 }
4551
4552 static void
4553 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4554 {
4555 debug_target.to_post_startup_inferior (&debug_target, ptid);
4556
4557 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4558 ptid_get_pid (ptid));
4559 }
4560
4561 static int
4562 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4563 {
4564 int retval;
4565
4566 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4567
4568 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4569 pid, retval);
4570
4571 return retval;
4572 }
4573
4574 static int
4575 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4576 {
4577 int retval;
4578
4579 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4580
4581 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4582 pid, retval);
4583
4584 return retval;
4585 }
4586
4587 static int
4588 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4589 {
4590 int retval;
4591
4592 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4593
4594 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4595 pid, retval);
4596
4597 return retval;
4598 }
4599
4600 static int
4601 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4602 {
4603 int retval;
4604
4605 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4606
4607 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4608 pid, retval);
4609
4610 return retval;
4611 }
4612
4613 static int
4614 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4615 {
4616 int retval;
4617
4618 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4619
4620 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4621 pid, retval);
4622
4623 return retval;
4624 }
4625
4626 static int
4627 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4628 {
4629 int retval;
4630
4631 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4632
4633 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4634 pid, retval);
4635
4636 return retval;
4637 }
4638
4639 static int
4640 debug_to_has_exited (struct target_ops *self,
4641 int pid, int wait_status, int *exit_status)
4642 {
4643 int has_exited;
4644
4645 has_exited = debug_target.to_has_exited (&debug_target,
4646 pid, wait_status, exit_status);
4647
4648 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4649 pid, wait_status, *exit_status, has_exited);
4650
4651 return has_exited;
4652 }
4653
4654 static int
4655 debug_to_can_run (struct target_ops *self)
4656 {
4657 int retval;
4658
4659 retval = debug_target.to_can_run (&debug_target);
4660
4661 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4662
4663 return retval;
4664 }
4665
4666 static struct gdbarch *
4667 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4668 {
4669 struct gdbarch *retval;
4670
4671 retval = debug_target.to_thread_architecture (ops, ptid);
4672
4673 fprintf_unfiltered (gdb_stdlog,
4674 "target_thread_architecture (%s) = %s [%s]\n",
4675 target_pid_to_str (ptid),
4676 host_address_to_string (retval),
4677 gdbarch_bfd_arch_info (retval)->printable_name);
4678 return retval;
4679 }
4680
4681 static void
4682 debug_to_stop (struct target_ops *self, ptid_t ptid)
4683 {
4684 debug_target.to_stop (&debug_target, ptid);
4685
4686 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4687 target_pid_to_str (ptid));
4688 }
4689
4690 static void
4691 debug_to_rcmd (struct target_ops *self, char *command,
4692 struct ui_file *outbuf)
4693 {
4694 debug_target.to_rcmd (&debug_target, command, outbuf);
4695 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4696 }
4697
4698 static char *
4699 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4700 {
4701 char *exec_file;
4702
4703 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4704
4705 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4706 pid, exec_file);
4707
4708 return exec_file;
4709 }
4710
4711 static void
4712 setup_target_debug (void)
4713 {
4714 memcpy (&debug_target, &current_target, sizeof debug_target);
4715
4716 current_target.to_open = debug_to_open;
4717 current_target.to_post_attach = debug_to_post_attach;
4718 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4719 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4720 current_target.to_files_info = debug_to_files_info;
4721 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4722 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4723 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4724 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4725 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4726 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4727 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4728 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4729 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4730 current_target.to_watchpoint_addr_within_range
4731 = debug_to_watchpoint_addr_within_range;
4732 current_target.to_region_ok_for_hw_watchpoint
4733 = debug_to_region_ok_for_hw_watchpoint;
4734 current_target.to_can_accel_watchpoint_condition
4735 = debug_to_can_accel_watchpoint_condition;
4736 current_target.to_terminal_init = debug_to_terminal_init;
4737 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4738 current_target.to_terminal_ours_for_output
4739 = debug_to_terminal_ours_for_output;
4740 current_target.to_terminal_ours = debug_to_terminal_ours;
4741 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4742 current_target.to_terminal_info = debug_to_terminal_info;
4743 current_target.to_load = debug_to_load;
4744 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4745 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4746 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4747 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4748 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4749 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4750 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4751 current_target.to_has_exited = debug_to_has_exited;
4752 current_target.to_can_run = debug_to_can_run;
4753 current_target.to_stop = debug_to_stop;
4754 current_target.to_rcmd = debug_to_rcmd;
4755 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4756 current_target.to_thread_architecture = debug_to_thread_architecture;
4757 }
4758 \f
4759
4760 static char targ_desc[] =
4761 "Names of targets and files being debugged.\nShows the entire \
4762 stack of targets currently in use (including the exec-file,\n\
4763 core-file, and process, if any), as well as the symbol file name.";
4764
4765 static void
4766 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
4767 {
4768 error (_("\"monitor\" command not supported by this target."));
4769 }
4770
4771 static void
4772 do_monitor_command (char *cmd,
4773 int from_tty)
4774 {
4775 target_rcmd (cmd, gdb_stdtarg);
4776 }
4777
4778 /* Print the name of each layers of our target stack. */
4779
4780 static void
4781 maintenance_print_target_stack (char *cmd, int from_tty)
4782 {
4783 struct target_ops *t;
4784
4785 printf_filtered (_("The current target stack is:\n"));
4786
4787 for (t = target_stack; t != NULL; t = t->beneath)
4788 {
4789 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4790 }
4791 }
4792
4793 /* Controls if async mode is permitted. */
4794 int target_async_permitted = 0;
4795
4796 /* The set command writes to this variable. If the inferior is
4797 executing, target_async_permitted is *not* updated. */
4798 static int target_async_permitted_1 = 0;
4799
4800 static void
4801 set_target_async_command (char *args, int from_tty,
4802 struct cmd_list_element *c)
4803 {
4804 if (have_live_inferiors ())
4805 {
4806 target_async_permitted_1 = target_async_permitted;
4807 error (_("Cannot change this setting while the inferior is running."));
4808 }
4809
4810 target_async_permitted = target_async_permitted_1;
4811 }
4812
4813 static void
4814 show_target_async_command (struct ui_file *file, int from_tty,
4815 struct cmd_list_element *c,
4816 const char *value)
4817 {
4818 fprintf_filtered (file,
4819 _("Controlling the inferior in "
4820 "asynchronous mode is %s.\n"), value);
4821 }
4822
4823 /* Temporary copies of permission settings. */
4824
4825 static int may_write_registers_1 = 1;
4826 static int may_write_memory_1 = 1;
4827 static int may_insert_breakpoints_1 = 1;
4828 static int may_insert_tracepoints_1 = 1;
4829 static int may_insert_fast_tracepoints_1 = 1;
4830 static int may_stop_1 = 1;
4831
4832 /* Make the user-set values match the real values again. */
4833
4834 void
4835 update_target_permissions (void)
4836 {
4837 may_write_registers_1 = may_write_registers;
4838 may_write_memory_1 = may_write_memory;
4839 may_insert_breakpoints_1 = may_insert_breakpoints;
4840 may_insert_tracepoints_1 = may_insert_tracepoints;
4841 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4842 may_stop_1 = may_stop;
4843 }
4844
4845 /* The one function handles (most of) the permission flags in the same
4846 way. */
4847
4848 static void
4849 set_target_permissions (char *args, int from_tty,
4850 struct cmd_list_element *c)
4851 {
4852 if (target_has_execution)
4853 {
4854 update_target_permissions ();
4855 error (_("Cannot change this setting while the inferior is running."));
4856 }
4857
4858 /* Make the real values match the user-changed values. */
4859 may_write_registers = may_write_registers_1;
4860 may_insert_breakpoints = may_insert_breakpoints_1;
4861 may_insert_tracepoints = may_insert_tracepoints_1;
4862 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4863 may_stop = may_stop_1;
4864 update_observer_mode ();
4865 }
4866
4867 /* Set memory write permission independently of observer mode. */
4868
4869 static void
4870 set_write_memory_permission (char *args, int from_tty,
4871 struct cmd_list_element *c)
4872 {
4873 /* Make the real values match the user-changed values. */
4874 may_write_memory = may_write_memory_1;
4875 update_observer_mode ();
4876 }
4877
4878
4879 void
4880 initialize_targets (void)
4881 {
4882 init_dummy_target ();
4883 push_target (&dummy_target);
4884
4885 add_info ("target", target_info, targ_desc);
4886 add_info ("files", target_info, targ_desc);
4887
4888 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4889 Set target debugging."), _("\
4890 Show target debugging."), _("\
4891 When non-zero, target debugging is enabled. Higher numbers are more\n\
4892 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4893 command."),
4894 NULL,
4895 show_targetdebug,
4896 &setdebuglist, &showdebuglist);
4897
4898 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4899 &trust_readonly, _("\
4900 Set mode for reading from readonly sections."), _("\
4901 Show mode for reading from readonly sections."), _("\
4902 When this mode is on, memory reads from readonly sections (such as .text)\n\
4903 will be read from the object file instead of from the target. This will\n\
4904 result in significant performance improvement for remote targets."),
4905 NULL,
4906 show_trust_readonly,
4907 &setlist, &showlist);
4908
4909 add_com ("monitor", class_obscure, do_monitor_command,
4910 _("Send a command to the remote monitor (remote targets only)."));
4911
4912 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4913 _("Print the name of each layer of the internal target stack."),
4914 &maintenanceprintlist);
4915
4916 add_setshow_boolean_cmd ("target-async", no_class,
4917 &target_async_permitted_1, _("\
4918 Set whether gdb controls the inferior in asynchronous mode."), _("\
4919 Show whether gdb controls the inferior in asynchronous mode."), _("\
4920 Tells gdb whether to control the inferior in asynchronous mode."),
4921 set_target_async_command,
4922 show_target_async_command,
4923 &setlist,
4924 &showlist);
4925
4926 add_setshow_boolean_cmd ("may-write-registers", class_support,
4927 &may_write_registers_1, _("\
4928 Set permission to write into registers."), _("\
4929 Show permission to write into registers."), _("\
4930 When this permission is on, GDB may write into the target's registers.\n\
4931 Otherwise, any sort of write attempt will result in an error."),
4932 set_target_permissions, NULL,
4933 &setlist, &showlist);
4934
4935 add_setshow_boolean_cmd ("may-write-memory", class_support,
4936 &may_write_memory_1, _("\
4937 Set permission to write into target memory."), _("\
4938 Show permission to write into target memory."), _("\
4939 When this permission is on, GDB may write into the target's memory.\n\
4940 Otherwise, any sort of write attempt will result in an error."),
4941 set_write_memory_permission, NULL,
4942 &setlist, &showlist);
4943
4944 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4945 &may_insert_breakpoints_1, _("\
4946 Set permission to insert breakpoints in the target."), _("\
4947 Show permission to insert breakpoints in the target."), _("\
4948 When this permission is on, GDB may insert breakpoints in the program.\n\
4949 Otherwise, any sort of insertion attempt will result in an error."),
4950 set_target_permissions, NULL,
4951 &setlist, &showlist);
4952
4953 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4954 &may_insert_tracepoints_1, _("\
4955 Set permission to insert tracepoints in the target."), _("\
4956 Show permission to insert tracepoints in the target."), _("\
4957 When this permission is on, GDB may insert tracepoints in the program.\n\
4958 Otherwise, any sort of insertion attempt will result in an error."),
4959 set_target_permissions, NULL,
4960 &setlist, &showlist);
4961
4962 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4963 &may_insert_fast_tracepoints_1, _("\
4964 Set permission to insert fast tracepoints in the target."), _("\
4965 Show permission to insert fast tracepoints in the target."), _("\
4966 When this permission is on, GDB may insert fast tracepoints.\n\
4967 Otherwise, any sort of insertion attempt will result in an error."),
4968 set_target_permissions, NULL,
4969 &setlist, &showlist);
4970
4971 add_setshow_boolean_cmd ("may-interrupt", class_support,
4972 &may_stop_1, _("\
4973 Set permission to interrupt or signal the target."), _("\
4974 Show permission to interrupt or signal the target."), _("\
4975 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4976 Otherwise, any attempt to interrupt or stop will be ignored."),
4977 set_target_permissions, NULL,
4978 &setlist, &showlist);
4979 }