convert to_get_section_table
[binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (struct target_ops *, const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
59
60 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
61 long lwp, long tid);
62
63 static int default_follow_fork (struct target_ops *self, int follow_child,
64 int detach_fork);
65
66 static void default_mourn_inferior (struct target_ops *self);
67
68 static void tcomplain (void) ATTRIBUTE_NORETURN;
69
70 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
71
72 static int return_zero (void);
73
74 void target_ignore (void);
75
76 static void target_command (char *, int);
77
78 static struct target_ops *find_default_run_target (char *);
79
80 static target_xfer_partial_ftype default_xfer_partial;
81
82 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
83 ptid_t ptid);
84
85 static int dummy_find_memory_regions (struct target_ops *self,
86 find_memory_region_ftype ignore1,
87 void *ignore2);
88
89 static char *dummy_make_corefile_notes (struct target_ops *self,
90 bfd *ignore1, int *ignore2);
91
92 static char *default_pid_to_str (struct target_ops *ops, ptid_t ptid);
93
94 static int find_default_can_async_p (struct target_ops *ignore);
95
96 static int find_default_is_async_p (struct target_ops *ignore);
97
98 static enum exec_direction_kind default_execution_direction
99 (struct target_ops *self);
100
101 #include "target-delegates.c"
102
103 static void init_dummy_target (void);
104
105 static struct target_ops debug_target;
106
107 static void debug_to_open (char *, int);
108
109 static void debug_to_prepare_to_store (struct target_ops *self,
110 struct regcache *);
111
112 static void debug_to_files_info (struct target_ops *);
113
114 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
115 struct bp_target_info *);
116
117 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
118 struct bp_target_info *);
119
120 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
121 int, int, int);
122
123 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
124 struct gdbarch *,
125 struct bp_target_info *);
126
127 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
128 struct gdbarch *,
129 struct bp_target_info *);
130
131 static int debug_to_insert_watchpoint (struct target_ops *self,
132 CORE_ADDR, int, int,
133 struct expression *);
134
135 static int debug_to_remove_watchpoint (struct target_ops *self,
136 CORE_ADDR, int, int,
137 struct expression *);
138
139 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
140
141 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
142 CORE_ADDR, CORE_ADDR, int);
143
144 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
145 CORE_ADDR, int);
146
147 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
148 CORE_ADDR, int, int,
149 struct expression *);
150
151 static void debug_to_terminal_init (struct target_ops *self);
152
153 static void debug_to_terminal_inferior (struct target_ops *self);
154
155 static void debug_to_terminal_ours_for_output (struct target_ops *self);
156
157 static void debug_to_terminal_save_ours (struct target_ops *self);
158
159 static void debug_to_terminal_ours (struct target_ops *self);
160
161 static void debug_to_load (struct target_ops *self, char *, int);
162
163 static int debug_to_can_run (struct target_ops *self);
164
165 static void debug_to_stop (struct target_ops *self, ptid_t);
166
167 /* Pointer to array of target architecture structures; the size of the
168 array; the current index into the array; the allocated size of the
169 array. */
170 struct target_ops **target_structs;
171 unsigned target_struct_size;
172 unsigned target_struct_allocsize;
173 #define DEFAULT_ALLOCSIZE 10
174
175 /* The initial current target, so that there is always a semi-valid
176 current target. */
177
178 static struct target_ops dummy_target;
179
180 /* Top of target stack. */
181
182 static struct target_ops *target_stack;
183
184 /* The target structure we are currently using to talk to a process
185 or file or whatever "inferior" we have. */
186
187 struct target_ops current_target;
188
189 /* Command list for target. */
190
191 static struct cmd_list_element *targetlist = NULL;
192
193 /* Nonzero if we should trust readonly sections from the
194 executable when reading memory. */
195
196 static int trust_readonly = 0;
197
198 /* Nonzero if we should show true memory content including
199 memory breakpoint inserted by gdb. */
200
201 static int show_memory_breakpoints = 0;
202
203 /* These globals control whether GDB attempts to perform these
204 operations; they are useful for targets that need to prevent
205 inadvertant disruption, such as in non-stop mode. */
206
207 int may_write_registers = 1;
208
209 int may_write_memory = 1;
210
211 int may_insert_breakpoints = 1;
212
213 int may_insert_tracepoints = 1;
214
215 int may_insert_fast_tracepoints = 1;
216
217 int may_stop = 1;
218
219 /* Non-zero if we want to see trace of target level stuff. */
220
221 static unsigned int targetdebug = 0;
222 static void
223 show_targetdebug (struct ui_file *file, int from_tty,
224 struct cmd_list_element *c, const char *value)
225 {
226 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
227 }
228
229 static void setup_target_debug (void);
230
231 /* The user just typed 'target' without the name of a target. */
232
233 static void
234 target_command (char *arg, int from_tty)
235 {
236 fputs_filtered ("Argument required (target name). Try `help target'\n",
237 gdb_stdout);
238 }
239
240 /* Default target_has_* methods for process_stratum targets. */
241
242 int
243 default_child_has_all_memory (struct target_ops *ops)
244 {
245 /* If no inferior selected, then we can't read memory here. */
246 if (ptid_equal (inferior_ptid, null_ptid))
247 return 0;
248
249 return 1;
250 }
251
252 int
253 default_child_has_memory (struct target_ops *ops)
254 {
255 /* If no inferior selected, then we can't read memory here. */
256 if (ptid_equal (inferior_ptid, null_ptid))
257 return 0;
258
259 return 1;
260 }
261
262 int
263 default_child_has_stack (struct target_ops *ops)
264 {
265 /* If no inferior selected, there's no stack. */
266 if (ptid_equal (inferior_ptid, null_ptid))
267 return 0;
268
269 return 1;
270 }
271
272 int
273 default_child_has_registers (struct target_ops *ops)
274 {
275 /* Can't read registers from no inferior. */
276 if (ptid_equal (inferior_ptid, null_ptid))
277 return 0;
278
279 return 1;
280 }
281
282 int
283 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
284 {
285 /* If there's no thread selected, then we can't make it run through
286 hoops. */
287 if (ptid_equal (the_ptid, null_ptid))
288 return 0;
289
290 return 1;
291 }
292
293
294 int
295 target_has_all_memory_1 (void)
296 {
297 struct target_ops *t;
298
299 for (t = current_target.beneath; t != NULL; t = t->beneath)
300 if (t->to_has_all_memory (t))
301 return 1;
302
303 return 0;
304 }
305
306 int
307 target_has_memory_1 (void)
308 {
309 struct target_ops *t;
310
311 for (t = current_target.beneath; t != NULL; t = t->beneath)
312 if (t->to_has_memory (t))
313 return 1;
314
315 return 0;
316 }
317
318 int
319 target_has_stack_1 (void)
320 {
321 struct target_ops *t;
322
323 for (t = current_target.beneath; t != NULL; t = t->beneath)
324 if (t->to_has_stack (t))
325 return 1;
326
327 return 0;
328 }
329
330 int
331 target_has_registers_1 (void)
332 {
333 struct target_ops *t;
334
335 for (t = current_target.beneath; t != NULL; t = t->beneath)
336 if (t->to_has_registers (t))
337 return 1;
338
339 return 0;
340 }
341
342 int
343 target_has_execution_1 (ptid_t the_ptid)
344 {
345 struct target_ops *t;
346
347 for (t = current_target.beneath; t != NULL; t = t->beneath)
348 if (t->to_has_execution (t, the_ptid))
349 return 1;
350
351 return 0;
352 }
353
354 int
355 target_has_execution_current (void)
356 {
357 return target_has_execution_1 (inferior_ptid);
358 }
359
360 /* Complete initialization of T. This ensures that various fields in
361 T are set, if needed by the target implementation. */
362
363 void
364 complete_target_initialization (struct target_ops *t)
365 {
366 /* Provide default values for all "must have" methods. */
367 if (t->to_xfer_partial == NULL)
368 t->to_xfer_partial = default_xfer_partial;
369
370 if (t->to_has_all_memory == NULL)
371 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
372
373 if (t->to_has_memory == NULL)
374 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
375
376 if (t->to_has_stack == NULL)
377 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
378
379 if (t->to_has_registers == NULL)
380 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
381
382 if (t->to_has_execution == NULL)
383 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
384
385 install_delegators (t);
386 }
387
388 /* Add possible target architecture T to the list and add a new
389 command 'target T->to_shortname'. Set COMPLETER as the command's
390 completer if not NULL. */
391
392 void
393 add_target_with_completer (struct target_ops *t,
394 completer_ftype *completer)
395 {
396 struct cmd_list_element *c;
397
398 complete_target_initialization (t);
399
400 if (!target_structs)
401 {
402 target_struct_allocsize = DEFAULT_ALLOCSIZE;
403 target_structs = (struct target_ops **) xmalloc
404 (target_struct_allocsize * sizeof (*target_structs));
405 }
406 if (target_struct_size >= target_struct_allocsize)
407 {
408 target_struct_allocsize *= 2;
409 target_structs = (struct target_ops **)
410 xrealloc ((char *) target_structs,
411 target_struct_allocsize * sizeof (*target_structs));
412 }
413 target_structs[target_struct_size++] = t;
414
415 if (targetlist == NULL)
416 add_prefix_cmd ("target", class_run, target_command, _("\
417 Connect to a target machine or process.\n\
418 The first argument is the type or protocol of the target machine.\n\
419 Remaining arguments are interpreted by the target protocol. For more\n\
420 information on the arguments for a particular protocol, type\n\
421 `help target ' followed by the protocol name."),
422 &targetlist, "target ", 0, &cmdlist);
423 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
424 &targetlist);
425 if (completer != NULL)
426 set_cmd_completer (c, completer);
427 }
428
429 /* Add a possible target architecture to the list. */
430
431 void
432 add_target (struct target_ops *t)
433 {
434 add_target_with_completer (t, NULL);
435 }
436
437 /* See target.h. */
438
439 void
440 add_deprecated_target_alias (struct target_ops *t, char *alias)
441 {
442 struct cmd_list_element *c;
443 char *alt;
444
445 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
446 see PR cli/15104. */
447 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
448 alt = xstrprintf ("target %s", t->to_shortname);
449 deprecate_cmd (c, alt);
450 }
451
452 /* Stub functions */
453
454 void
455 target_ignore (void)
456 {
457 }
458
459 void
460 target_kill (void)
461 {
462 if (targetdebug)
463 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
464
465 current_target.to_kill (&current_target);
466 }
467
468 void
469 target_load (char *arg, int from_tty)
470 {
471 target_dcache_invalidate ();
472 (*current_target.to_load) (&current_target, arg, from_tty);
473 }
474
475 void
476 target_create_inferior (char *exec_file, char *args,
477 char **env, int from_tty)
478 {
479 struct target_ops *t;
480
481 for (t = current_target.beneath; t != NULL; t = t->beneath)
482 {
483 if (t->to_create_inferior != NULL)
484 {
485 t->to_create_inferior (t, exec_file, args, env, from_tty);
486 if (targetdebug)
487 fprintf_unfiltered (gdb_stdlog,
488 "target_create_inferior (%s, %s, xxx, %d)\n",
489 exec_file, args, from_tty);
490 return;
491 }
492 }
493
494 internal_error (__FILE__, __LINE__,
495 _("could not find a target to create inferior"));
496 }
497
498 void
499 target_terminal_inferior (void)
500 {
501 /* A background resume (``run&'') should leave GDB in control of the
502 terminal. Use target_can_async_p, not target_is_async_p, since at
503 this point the target is not async yet. However, if sync_execution
504 is not set, we know it will become async prior to resume. */
505 if (target_can_async_p () && !sync_execution)
506 return;
507
508 /* If GDB is resuming the inferior in the foreground, install
509 inferior's terminal modes. */
510 (*current_target.to_terminal_inferior) (&current_target);
511 }
512
513 static int
514 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
515 struct target_ops *t)
516 {
517 errno = EIO; /* Can't read/write this location. */
518 return 0; /* No bytes handled. */
519 }
520
521 static void
522 tcomplain (void)
523 {
524 error (_("You can't do that when your target is `%s'"),
525 current_target.to_shortname);
526 }
527
528 void
529 noprocess (void)
530 {
531 error (_("You can't do that without a process to debug."));
532 }
533
534 static void
535 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
536 {
537 printf_unfiltered (_("No saved terminal information.\n"));
538 }
539
540 /* A default implementation for the to_get_ada_task_ptid target method.
541
542 This function builds the PTID by using both LWP and TID as part of
543 the PTID lwp and tid elements. The pid used is the pid of the
544 inferior_ptid. */
545
546 static ptid_t
547 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
548 {
549 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
550 }
551
552 static enum exec_direction_kind
553 default_execution_direction (struct target_ops *self)
554 {
555 if (!target_can_execute_reverse)
556 return EXEC_FORWARD;
557 else if (!target_can_async_p ())
558 return EXEC_FORWARD;
559 else
560 gdb_assert_not_reached ("\
561 to_execution_direction must be implemented for reverse async");
562 }
563
564 /* Go through the target stack from top to bottom, copying over zero
565 entries in current_target, then filling in still empty entries. In
566 effect, we are doing class inheritance through the pushed target
567 vectors.
568
569 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
570 is currently implemented, is that it discards any knowledge of
571 which target an inherited method originally belonged to.
572 Consequently, new new target methods should instead explicitly and
573 locally search the target stack for the target that can handle the
574 request. */
575
576 static void
577 update_current_target (void)
578 {
579 struct target_ops *t;
580
581 /* First, reset current's contents. */
582 memset (&current_target, 0, sizeof (current_target));
583
584 /* Install the delegators. */
585 install_delegators (&current_target);
586
587 #define INHERIT(FIELD, TARGET) \
588 if (!current_target.FIELD) \
589 current_target.FIELD = (TARGET)->FIELD
590
591 for (t = target_stack; t; t = t->beneath)
592 {
593 INHERIT (to_shortname, t);
594 INHERIT (to_longname, t);
595 INHERIT (to_doc, t);
596 /* Do not inherit to_open. */
597 /* Do not inherit to_close. */
598 /* Do not inherit to_attach. */
599 /* Do not inherit to_post_attach. */
600 INHERIT (to_attach_no_wait, t);
601 /* Do not inherit to_detach. */
602 /* Do not inherit to_disconnect. */
603 /* Do not inherit to_resume. */
604 /* Do not inherit to_wait. */
605 /* Do not inherit to_fetch_registers. */
606 /* Do not inherit to_store_registers. */
607 /* Do not inherit to_prepare_to_store. */
608 INHERIT (deprecated_xfer_memory, t);
609 /* Do not inherit to_files_info. */
610 /* Do not inherit to_insert_breakpoint. */
611 /* Do not inherit to_remove_breakpoint. */
612 /* Do not inherit to_can_use_hw_breakpoint. */
613 /* Do not inherit to_insert_hw_breakpoint. */
614 /* Do not inherit to_remove_hw_breakpoint. */
615 /* Do not inherit to_ranged_break_num_registers. */
616 /* Do not inherit to_insert_watchpoint. */
617 /* Do not inherit to_remove_watchpoint. */
618 /* Do not inherit to_insert_mask_watchpoint. */
619 /* Do not inherit to_remove_mask_watchpoint. */
620 /* Do not inherit to_stopped_data_address. */
621 INHERIT (to_have_steppable_watchpoint, t);
622 INHERIT (to_have_continuable_watchpoint, t);
623 /* Do not inherit to_stopped_by_watchpoint. */
624 /* Do not inherit to_watchpoint_addr_within_range. */
625 /* Do not inherit to_region_ok_for_hw_watchpoint. */
626 /* Do not inherit to_can_accel_watchpoint_condition. */
627 /* Do not inherit to_masked_watch_num_registers. */
628 /* Do not inherit to_terminal_init. */
629 /* Do not inherit to_terminal_inferior. */
630 /* Do not inherit to_terminal_ours_for_output. */
631 /* Do not inherit to_terminal_ours. */
632 /* Do not inherit to_terminal_save_ours. */
633 /* Do not inherit to_terminal_info. */
634 /* Do not inherit to_kill. */
635 /* Do not inherit to_load. */
636 /* Do no inherit to_create_inferior. */
637 /* Do not inherit to_post_startup_inferior. */
638 /* Do not inherit to_insert_fork_catchpoint. */
639 /* Do not inherit to_remove_fork_catchpoint. */
640 /* Do not inherit to_insert_vfork_catchpoint. */
641 /* Do not inherit to_remove_vfork_catchpoint. */
642 /* Do not inherit to_follow_fork. */
643 /* Do not inherit to_insert_exec_catchpoint. */
644 /* Do not inherit to_remove_exec_catchpoint. */
645 /* Do not inherit to_set_syscall_catchpoint. */
646 /* Do not inherit to_has_exited. */
647 /* Do not inherit to_mourn_inferior. */
648 INHERIT (to_can_run, t);
649 /* Do not inherit to_pass_signals. */
650 /* Do not inherit to_program_signals. */
651 /* Do not inherit to_thread_alive. */
652 /* Do not inherit to_find_new_threads. */
653 /* Do not inherit to_pid_to_str. */
654 /* Do not inherit to_extra_thread_info. */
655 /* Do not inherit to_thread_name. */
656 /* Do not inherit to_stop. */
657 /* Do not inherit to_xfer_partial. */
658 /* Do not inherit to_rcmd. */
659 /* Do not inherit to_pid_to_exec_file. */
660 /* Do not inherit to_log_command. */
661 INHERIT (to_stratum, t);
662 /* Do not inherit to_has_all_memory. */
663 /* Do not inherit to_has_memory. */
664 /* Do not inherit to_has_stack. */
665 /* Do not inherit to_has_registers. */
666 /* Do not inherit to_has_execution. */
667 INHERIT (to_has_thread_control, t);
668 /* Do not inherit to_can_async_p. */
669 /* Do not inherit to_is_async_p. */
670 /* Do not inherit to_async. */
671 /* Do not inherit to_find_memory_regions. */
672 /* Do not inherit to_make_corefile_notes. */
673 /* Do not inherit to_get_bookmark. */
674 /* Do not inherit to_goto_bookmark. */
675 /* Do not inherit to_get_thread_local_address. */
676 /* Do not inherit to_can_execute_reverse. */
677 /* Do not inherit to_execution_direction. */
678 /* Do not inherit to_thread_architecture. */
679 /* Do not inherit to_read_description. */
680 /* Do not inherit to_get_ada_task_ptid. */
681 /* Do not inherit to_search_memory. */
682 /* Do not inherit to_supports_multi_process. */
683 /* Do not inherit to_supports_enable_disable_tracepoint. */
684 /* Do not inherit to_supports_string_tracing. */
685 /* Do not inherit to_trace_init. */
686 /* Do not inherit to_download_tracepoint. */
687 /* Do not inherit to_can_download_tracepoint. */
688 /* Do not inherit to_download_trace_state_variable. */
689 /* Do not inherit to_enable_tracepoint. */
690 /* Do not inherit to_disable_tracepoint. */
691 /* Do not inherit to_trace_set_readonly_regions. */
692 /* Do not inherit to_trace_start. */
693 /* Do not inherit to_get_trace_status. */
694 /* Do not inherit to_get_tracepoint_status. */
695 /* Do not inherit to_trace_stop. */
696 /* Do not inherit to_trace_find. */
697 /* Do not inherit to_get_trace_state_variable_value. */
698 /* Do not inherit to_save_trace_data. */
699 /* Do not inherit to_upload_tracepoints. */
700 /* Do not inherit to_upload_trace_state_variables. */
701 /* Do not inherit to_get_raw_trace_data. */
702 /* Do not inherit to_get_min_fast_tracepoint_insn_len. */
703 /* Do not inherit to_set_disconnected_tracing. */
704 /* Do not inherit to_set_circular_trace_buffer. */
705 /* Do not inherit to_set_trace_buffer_size. */
706 /* Do not inherit to_set_trace_notes. */
707 /* Do not inherit to_get_tib_address. */
708 /* Do not inherit to_set_permissions. */
709 /* Do not inherit to_static_tracepoint_marker_at. */
710 /* Do not inherit to_static_tracepoint_markers_by_strid. */
711 /* Do not inherit to_traceframe_info. */
712 /* Do not inherit to_use_agent. */
713 /* Do not inherit to_can_use_agent. */
714 /* Do not inherit to_augmented_libraries_svr4_read. */
715 INHERIT (to_magic, t);
716 /* Do not inherit
717 to_supports_evaluation_of_breakpoint_conditions. */
718 /* Do not inherit to_can_run_breakpoint_commands. */
719 /* Do not inherit to_memory_map. */
720 /* Do not inherit to_flash_erase. */
721 /* Do not inherit to_flash_done. */
722 }
723 #undef INHERIT
724
725 /* Clean up a target struct so it no longer has any zero pointers in
726 it. Some entries are defaulted to a method that print an error,
727 others are hard-wired to a standard recursive default. */
728
729 #define de_fault(field, value) \
730 if (!current_target.field) \
731 current_target.field = value
732
733 de_fault (to_open,
734 (void (*) (char *, int))
735 tcomplain);
736 de_fault (to_close,
737 (void (*) (struct target_ops *))
738 target_ignore);
739 de_fault (deprecated_xfer_memory,
740 (int (*) (CORE_ADDR, gdb_byte *, int, int,
741 struct mem_attrib *, struct target_ops *))
742 nomemory);
743 de_fault (to_can_run,
744 (int (*) (struct target_ops *))
745 return_zero);
746 current_target.to_read_description = NULL;
747
748 #undef de_fault
749
750 /* Finally, position the target-stack beneath the squashed
751 "current_target". That way code looking for a non-inherited
752 target method can quickly and simply find it. */
753 current_target.beneath = target_stack;
754
755 if (targetdebug)
756 setup_target_debug ();
757 }
758
759 /* Push a new target type into the stack of the existing target accessors,
760 possibly superseding some of the existing accessors.
761
762 Rather than allow an empty stack, we always have the dummy target at
763 the bottom stratum, so we can call the function vectors without
764 checking them. */
765
766 void
767 push_target (struct target_ops *t)
768 {
769 struct target_ops **cur;
770
771 /* Check magic number. If wrong, it probably means someone changed
772 the struct definition, but not all the places that initialize one. */
773 if (t->to_magic != OPS_MAGIC)
774 {
775 fprintf_unfiltered (gdb_stderr,
776 "Magic number of %s target struct wrong\n",
777 t->to_shortname);
778 internal_error (__FILE__, __LINE__,
779 _("failed internal consistency check"));
780 }
781
782 /* Find the proper stratum to install this target in. */
783 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
784 {
785 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
786 break;
787 }
788
789 /* If there's already targets at this stratum, remove them. */
790 /* FIXME: cagney/2003-10-15: I think this should be popping all
791 targets to CUR, and not just those at this stratum level. */
792 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
793 {
794 /* There's already something at this stratum level. Close it,
795 and un-hook it from the stack. */
796 struct target_ops *tmp = (*cur);
797
798 (*cur) = (*cur)->beneath;
799 tmp->beneath = NULL;
800 target_close (tmp);
801 }
802
803 /* We have removed all targets in our stratum, now add the new one. */
804 t->beneath = (*cur);
805 (*cur) = t;
806
807 update_current_target ();
808 }
809
810 /* Remove a target_ops vector from the stack, wherever it may be.
811 Return how many times it was removed (0 or 1). */
812
813 int
814 unpush_target (struct target_ops *t)
815 {
816 struct target_ops **cur;
817 struct target_ops *tmp;
818
819 if (t->to_stratum == dummy_stratum)
820 internal_error (__FILE__, __LINE__,
821 _("Attempt to unpush the dummy target"));
822
823 /* Look for the specified target. Note that we assume that a target
824 can only occur once in the target stack. */
825
826 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
827 {
828 if ((*cur) == t)
829 break;
830 }
831
832 /* If we don't find target_ops, quit. Only open targets should be
833 closed. */
834 if ((*cur) == NULL)
835 return 0;
836
837 /* Unchain the target. */
838 tmp = (*cur);
839 (*cur) = (*cur)->beneath;
840 tmp->beneath = NULL;
841
842 update_current_target ();
843
844 /* Finally close the target. Note we do this after unchaining, so
845 any target method calls from within the target_close
846 implementation don't end up in T anymore. */
847 target_close (t);
848
849 return 1;
850 }
851
852 void
853 pop_all_targets_above (enum strata above_stratum)
854 {
855 while ((int) (current_target.to_stratum) > (int) above_stratum)
856 {
857 if (!unpush_target (target_stack))
858 {
859 fprintf_unfiltered (gdb_stderr,
860 "pop_all_targets couldn't find target %s\n",
861 target_stack->to_shortname);
862 internal_error (__FILE__, __LINE__,
863 _("failed internal consistency check"));
864 break;
865 }
866 }
867 }
868
869 void
870 pop_all_targets (void)
871 {
872 pop_all_targets_above (dummy_stratum);
873 }
874
875 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
876
877 int
878 target_is_pushed (struct target_ops *t)
879 {
880 struct target_ops **cur;
881
882 /* Check magic number. If wrong, it probably means someone changed
883 the struct definition, but not all the places that initialize one. */
884 if (t->to_magic != OPS_MAGIC)
885 {
886 fprintf_unfiltered (gdb_stderr,
887 "Magic number of %s target struct wrong\n",
888 t->to_shortname);
889 internal_error (__FILE__, __LINE__,
890 _("failed internal consistency check"));
891 }
892
893 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
894 if (*cur == t)
895 return 1;
896
897 return 0;
898 }
899
900 /* Using the objfile specified in OBJFILE, find the address for the
901 current thread's thread-local storage with offset OFFSET. */
902 CORE_ADDR
903 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
904 {
905 volatile CORE_ADDR addr = 0;
906 struct target_ops *target;
907
908 for (target = current_target.beneath;
909 target != NULL;
910 target = target->beneath)
911 {
912 if (target->to_get_thread_local_address != NULL)
913 break;
914 }
915
916 if (target != NULL
917 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
918 {
919 ptid_t ptid = inferior_ptid;
920 volatile struct gdb_exception ex;
921
922 TRY_CATCH (ex, RETURN_MASK_ALL)
923 {
924 CORE_ADDR lm_addr;
925
926 /* Fetch the load module address for this objfile. */
927 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
928 objfile);
929 /* If it's 0, throw the appropriate exception. */
930 if (lm_addr == 0)
931 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
932 _("TLS load module not found"));
933
934 addr = target->to_get_thread_local_address (target, ptid,
935 lm_addr, offset);
936 }
937 /* If an error occurred, print TLS related messages here. Otherwise,
938 throw the error to some higher catcher. */
939 if (ex.reason < 0)
940 {
941 int objfile_is_library = (objfile->flags & OBJF_SHARED);
942
943 switch (ex.error)
944 {
945 case TLS_NO_LIBRARY_SUPPORT_ERROR:
946 error (_("Cannot find thread-local variables "
947 "in this thread library."));
948 break;
949 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
950 if (objfile_is_library)
951 error (_("Cannot find shared library `%s' in dynamic"
952 " linker's load module list"), objfile_name (objfile));
953 else
954 error (_("Cannot find executable file `%s' in dynamic"
955 " linker's load module list"), objfile_name (objfile));
956 break;
957 case TLS_NOT_ALLOCATED_YET_ERROR:
958 if (objfile_is_library)
959 error (_("The inferior has not yet allocated storage for"
960 " thread-local variables in\n"
961 "the shared library `%s'\n"
962 "for %s"),
963 objfile_name (objfile), target_pid_to_str (ptid));
964 else
965 error (_("The inferior has not yet allocated storage for"
966 " thread-local variables in\n"
967 "the executable `%s'\n"
968 "for %s"),
969 objfile_name (objfile), target_pid_to_str (ptid));
970 break;
971 case TLS_GENERIC_ERROR:
972 if (objfile_is_library)
973 error (_("Cannot find thread-local storage for %s, "
974 "shared library %s:\n%s"),
975 target_pid_to_str (ptid),
976 objfile_name (objfile), ex.message);
977 else
978 error (_("Cannot find thread-local storage for %s, "
979 "executable file %s:\n%s"),
980 target_pid_to_str (ptid),
981 objfile_name (objfile), ex.message);
982 break;
983 default:
984 throw_exception (ex);
985 break;
986 }
987 }
988 }
989 /* It wouldn't be wrong here to try a gdbarch method, too; finding
990 TLS is an ABI-specific thing. But we don't do that yet. */
991 else
992 error (_("Cannot find thread-local variables on this target"));
993
994 return addr;
995 }
996
997 const char *
998 target_xfer_status_to_string (enum target_xfer_status err)
999 {
1000 #define CASE(X) case X: return #X
1001 switch (err)
1002 {
1003 CASE(TARGET_XFER_E_IO);
1004 CASE(TARGET_XFER_E_UNAVAILABLE);
1005 default:
1006 return "<unknown>";
1007 }
1008 #undef CASE
1009 };
1010
1011
1012 #undef MIN
1013 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1014
1015 /* target_read_string -- read a null terminated string, up to LEN bytes,
1016 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1017 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1018 is responsible for freeing it. Return the number of bytes successfully
1019 read. */
1020
1021 int
1022 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1023 {
1024 int tlen, offset, i;
1025 gdb_byte buf[4];
1026 int errcode = 0;
1027 char *buffer;
1028 int buffer_allocated;
1029 char *bufptr;
1030 unsigned int nbytes_read = 0;
1031
1032 gdb_assert (string);
1033
1034 /* Small for testing. */
1035 buffer_allocated = 4;
1036 buffer = xmalloc (buffer_allocated);
1037 bufptr = buffer;
1038
1039 while (len > 0)
1040 {
1041 tlen = MIN (len, 4 - (memaddr & 3));
1042 offset = memaddr & 3;
1043
1044 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1045 if (errcode != 0)
1046 {
1047 /* The transfer request might have crossed the boundary to an
1048 unallocated region of memory. Retry the transfer, requesting
1049 a single byte. */
1050 tlen = 1;
1051 offset = 0;
1052 errcode = target_read_memory (memaddr, buf, 1);
1053 if (errcode != 0)
1054 goto done;
1055 }
1056
1057 if (bufptr - buffer + tlen > buffer_allocated)
1058 {
1059 unsigned int bytes;
1060
1061 bytes = bufptr - buffer;
1062 buffer_allocated *= 2;
1063 buffer = xrealloc (buffer, buffer_allocated);
1064 bufptr = buffer + bytes;
1065 }
1066
1067 for (i = 0; i < tlen; i++)
1068 {
1069 *bufptr++ = buf[i + offset];
1070 if (buf[i + offset] == '\000')
1071 {
1072 nbytes_read += i + 1;
1073 goto done;
1074 }
1075 }
1076
1077 memaddr += tlen;
1078 len -= tlen;
1079 nbytes_read += tlen;
1080 }
1081 done:
1082 *string = buffer;
1083 if (errnop != NULL)
1084 *errnop = errcode;
1085 return nbytes_read;
1086 }
1087
1088 struct target_section_table *
1089 target_get_section_table (struct target_ops *target)
1090 {
1091 if (targetdebug)
1092 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1093
1094 return (*target->to_get_section_table) (target);
1095 }
1096
1097 /* Find a section containing ADDR. */
1098
1099 struct target_section *
1100 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1101 {
1102 struct target_section_table *table = target_get_section_table (target);
1103 struct target_section *secp;
1104
1105 if (table == NULL)
1106 return NULL;
1107
1108 for (secp = table->sections; secp < table->sections_end; secp++)
1109 {
1110 if (addr >= secp->addr && addr < secp->endaddr)
1111 return secp;
1112 }
1113 return NULL;
1114 }
1115
1116 /* Read memory from the live target, even if currently inspecting a
1117 traceframe. The return is the same as that of target_read. */
1118
1119 static enum target_xfer_status
1120 target_read_live_memory (enum target_object object,
1121 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1122 ULONGEST *xfered_len)
1123 {
1124 enum target_xfer_status ret;
1125 struct cleanup *cleanup;
1126
1127 /* Switch momentarily out of tfind mode so to access live memory.
1128 Note that this must not clear global state, such as the frame
1129 cache, which must still remain valid for the previous traceframe.
1130 We may be _building_ the frame cache at this point. */
1131 cleanup = make_cleanup_restore_traceframe_number ();
1132 set_traceframe_number (-1);
1133
1134 ret = target_xfer_partial (current_target.beneath, object, NULL,
1135 myaddr, NULL, memaddr, len, xfered_len);
1136
1137 do_cleanups (cleanup);
1138 return ret;
1139 }
1140
1141 /* Using the set of read-only target sections of OPS, read live
1142 read-only memory. Note that the actual reads start from the
1143 top-most target again.
1144
1145 For interface/parameters/return description see target.h,
1146 to_xfer_partial. */
1147
1148 static enum target_xfer_status
1149 memory_xfer_live_readonly_partial (struct target_ops *ops,
1150 enum target_object object,
1151 gdb_byte *readbuf, ULONGEST memaddr,
1152 ULONGEST len, ULONGEST *xfered_len)
1153 {
1154 struct target_section *secp;
1155 struct target_section_table *table;
1156
1157 secp = target_section_by_addr (ops, memaddr);
1158 if (secp != NULL
1159 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1160 secp->the_bfd_section)
1161 & SEC_READONLY))
1162 {
1163 struct target_section *p;
1164 ULONGEST memend = memaddr + len;
1165
1166 table = target_get_section_table (ops);
1167
1168 for (p = table->sections; p < table->sections_end; p++)
1169 {
1170 if (memaddr >= p->addr)
1171 {
1172 if (memend <= p->endaddr)
1173 {
1174 /* Entire transfer is within this section. */
1175 return target_read_live_memory (object, memaddr,
1176 readbuf, len, xfered_len);
1177 }
1178 else if (memaddr >= p->endaddr)
1179 {
1180 /* This section ends before the transfer starts. */
1181 continue;
1182 }
1183 else
1184 {
1185 /* This section overlaps the transfer. Just do half. */
1186 len = p->endaddr - memaddr;
1187 return target_read_live_memory (object, memaddr,
1188 readbuf, len, xfered_len);
1189 }
1190 }
1191 }
1192 }
1193
1194 return TARGET_XFER_EOF;
1195 }
1196
1197 /* Read memory from more than one valid target. A core file, for
1198 instance, could have some of memory but delegate other bits to
1199 the target below it. So, we must manually try all targets. */
1200
1201 static enum target_xfer_status
1202 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1203 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1204 ULONGEST *xfered_len)
1205 {
1206 enum target_xfer_status res;
1207
1208 do
1209 {
1210 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1211 readbuf, writebuf, memaddr, len,
1212 xfered_len);
1213 if (res == TARGET_XFER_OK)
1214 break;
1215
1216 /* Stop if the target reports that the memory is not available. */
1217 if (res == TARGET_XFER_E_UNAVAILABLE)
1218 break;
1219
1220 /* We want to continue past core files to executables, but not
1221 past a running target's memory. */
1222 if (ops->to_has_all_memory (ops))
1223 break;
1224
1225 ops = ops->beneath;
1226 }
1227 while (ops != NULL);
1228
1229 return res;
1230 }
1231
1232 /* Perform a partial memory transfer.
1233 For docs see target.h, to_xfer_partial. */
1234
1235 static enum target_xfer_status
1236 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1237 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1238 ULONGEST len, ULONGEST *xfered_len)
1239 {
1240 enum target_xfer_status res;
1241 int reg_len;
1242 struct mem_region *region;
1243 struct inferior *inf;
1244
1245 /* For accesses to unmapped overlay sections, read directly from
1246 files. Must do this first, as MEMADDR may need adjustment. */
1247 if (readbuf != NULL && overlay_debugging)
1248 {
1249 struct obj_section *section = find_pc_overlay (memaddr);
1250
1251 if (pc_in_unmapped_range (memaddr, section))
1252 {
1253 struct target_section_table *table
1254 = target_get_section_table (ops);
1255 const char *section_name = section->the_bfd_section->name;
1256
1257 memaddr = overlay_mapped_address (memaddr, section);
1258 return section_table_xfer_memory_partial (readbuf, writebuf,
1259 memaddr, len, xfered_len,
1260 table->sections,
1261 table->sections_end,
1262 section_name);
1263 }
1264 }
1265
1266 /* Try the executable files, if "trust-readonly-sections" is set. */
1267 if (readbuf != NULL && trust_readonly)
1268 {
1269 struct target_section *secp;
1270 struct target_section_table *table;
1271
1272 secp = target_section_by_addr (ops, memaddr);
1273 if (secp != NULL
1274 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1275 secp->the_bfd_section)
1276 & SEC_READONLY))
1277 {
1278 table = target_get_section_table (ops);
1279 return section_table_xfer_memory_partial (readbuf, writebuf,
1280 memaddr, len, xfered_len,
1281 table->sections,
1282 table->sections_end,
1283 NULL);
1284 }
1285 }
1286
1287 /* If reading unavailable memory in the context of traceframes, and
1288 this address falls within a read-only section, fallback to
1289 reading from live memory. */
1290 if (readbuf != NULL && get_traceframe_number () != -1)
1291 {
1292 VEC(mem_range_s) *available;
1293
1294 /* If we fail to get the set of available memory, then the
1295 target does not support querying traceframe info, and so we
1296 attempt reading from the traceframe anyway (assuming the
1297 target implements the old QTro packet then). */
1298 if (traceframe_available_memory (&available, memaddr, len))
1299 {
1300 struct cleanup *old_chain;
1301
1302 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1303
1304 if (VEC_empty (mem_range_s, available)
1305 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1306 {
1307 /* Don't read into the traceframe's available
1308 memory. */
1309 if (!VEC_empty (mem_range_s, available))
1310 {
1311 LONGEST oldlen = len;
1312
1313 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1314 gdb_assert (len <= oldlen);
1315 }
1316
1317 do_cleanups (old_chain);
1318
1319 /* This goes through the topmost target again. */
1320 res = memory_xfer_live_readonly_partial (ops, object,
1321 readbuf, memaddr,
1322 len, xfered_len);
1323 if (res == TARGET_XFER_OK)
1324 return TARGET_XFER_OK;
1325 else
1326 {
1327 /* No use trying further, we know some memory starting
1328 at MEMADDR isn't available. */
1329 *xfered_len = len;
1330 return TARGET_XFER_E_UNAVAILABLE;
1331 }
1332 }
1333
1334 /* Don't try to read more than how much is available, in
1335 case the target implements the deprecated QTro packet to
1336 cater for older GDBs (the target's knowledge of read-only
1337 sections may be outdated by now). */
1338 len = VEC_index (mem_range_s, available, 0)->length;
1339
1340 do_cleanups (old_chain);
1341 }
1342 }
1343
1344 /* Try GDB's internal data cache. */
1345 region = lookup_mem_region (memaddr);
1346 /* region->hi == 0 means there's no upper bound. */
1347 if (memaddr + len < region->hi || region->hi == 0)
1348 reg_len = len;
1349 else
1350 reg_len = region->hi - memaddr;
1351
1352 switch (region->attrib.mode)
1353 {
1354 case MEM_RO:
1355 if (writebuf != NULL)
1356 return TARGET_XFER_E_IO;
1357 break;
1358
1359 case MEM_WO:
1360 if (readbuf != NULL)
1361 return TARGET_XFER_E_IO;
1362 break;
1363
1364 case MEM_FLASH:
1365 /* We only support writing to flash during "load" for now. */
1366 if (writebuf != NULL)
1367 error (_("Writing to flash memory forbidden in this context"));
1368 break;
1369
1370 case MEM_NONE:
1371 return TARGET_XFER_E_IO;
1372 }
1373
1374 if (!ptid_equal (inferior_ptid, null_ptid))
1375 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1376 else
1377 inf = NULL;
1378
1379 if (inf != NULL
1380 /* The dcache reads whole cache lines; that doesn't play well
1381 with reading from a trace buffer, because reading outside of
1382 the collected memory range fails. */
1383 && get_traceframe_number () == -1
1384 && (region->attrib.cache
1385 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1386 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1387 {
1388 DCACHE *dcache = target_dcache_get_or_init ();
1389 int l;
1390
1391 if (readbuf != NULL)
1392 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1393 else
1394 /* FIXME drow/2006-08-09: If we're going to preserve const
1395 correctness dcache_xfer_memory should take readbuf and
1396 writebuf. */
1397 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1398 reg_len, 1);
1399 if (l <= 0)
1400 return TARGET_XFER_E_IO;
1401 else
1402 {
1403 *xfered_len = (ULONGEST) l;
1404 return TARGET_XFER_OK;
1405 }
1406 }
1407
1408 /* If none of those methods found the memory we wanted, fall back
1409 to a target partial transfer. Normally a single call to
1410 to_xfer_partial is enough; if it doesn't recognize an object
1411 it will call the to_xfer_partial of the next target down.
1412 But for memory this won't do. Memory is the only target
1413 object which can be read from more than one valid target.
1414 A core file, for instance, could have some of memory but
1415 delegate other bits to the target below it. So, we must
1416 manually try all targets. */
1417
1418 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1419 xfered_len);
1420
1421 /* Make sure the cache gets updated no matter what - if we are writing
1422 to the stack. Even if this write is not tagged as such, we still need
1423 to update the cache. */
1424
1425 if (res == TARGET_XFER_OK
1426 && inf != NULL
1427 && writebuf != NULL
1428 && target_dcache_init_p ()
1429 && !region->attrib.cache
1430 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1431 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1432 {
1433 DCACHE *dcache = target_dcache_get ();
1434
1435 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1436 }
1437
1438 /* If we still haven't got anything, return the last error. We
1439 give up. */
1440 return res;
1441 }
1442
1443 /* Perform a partial memory transfer. For docs see target.h,
1444 to_xfer_partial. */
1445
1446 static enum target_xfer_status
1447 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1448 gdb_byte *readbuf, const gdb_byte *writebuf,
1449 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1450 {
1451 enum target_xfer_status res;
1452
1453 /* Zero length requests are ok and require no work. */
1454 if (len == 0)
1455 return TARGET_XFER_EOF;
1456
1457 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1458 breakpoint insns, thus hiding out from higher layers whether
1459 there are software breakpoints inserted in the code stream. */
1460 if (readbuf != NULL)
1461 {
1462 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1463 xfered_len);
1464
1465 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1466 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1467 }
1468 else
1469 {
1470 void *buf;
1471 struct cleanup *old_chain;
1472
1473 /* A large write request is likely to be partially satisfied
1474 by memory_xfer_partial_1. We will continually malloc
1475 and free a copy of the entire write request for breakpoint
1476 shadow handling even though we only end up writing a small
1477 subset of it. Cap writes to 4KB to mitigate this. */
1478 len = min (4096, len);
1479
1480 buf = xmalloc (len);
1481 old_chain = make_cleanup (xfree, buf);
1482 memcpy (buf, writebuf, len);
1483
1484 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1485 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1486 xfered_len);
1487
1488 do_cleanups (old_chain);
1489 }
1490
1491 return res;
1492 }
1493
1494 static void
1495 restore_show_memory_breakpoints (void *arg)
1496 {
1497 show_memory_breakpoints = (uintptr_t) arg;
1498 }
1499
1500 struct cleanup *
1501 make_show_memory_breakpoints_cleanup (int show)
1502 {
1503 int current = show_memory_breakpoints;
1504
1505 show_memory_breakpoints = show;
1506 return make_cleanup (restore_show_memory_breakpoints,
1507 (void *) (uintptr_t) current);
1508 }
1509
1510 /* For docs see target.h, to_xfer_partial. */
1511
1512 enum target_xfer_status
1513 target_xfer_partial (struct target_ops *ops,
1514 enum target_object object, const char *annex,
1515 gdb_byte *readbuf, const gdb_byte *writebuf,
1516 ULONGEST offset, ULONGEST len,
1517 ULONGEST *xfered_len)
1518 {
1519 enum target_xfer_status retval;
1520
1521 gdb_assert (ops->to_xfer_partial != NULL);
1522
1523 /* Transfer is done when LEN is zero. */
1524 if (len == 0)
1525 return TARGET_XFER_EOF;
1526
1527 if (writebuf && !may_write_memory)
1528 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1529 core_addr_to_string_nz (offset), plongest (len));
1530
1531 *xfered_len = 0;
1532
1533 /* If this is a memory transfer, let the memory-specific code
1534 have a look at it instead. Memory transfers are more
1535 complicated. */
1536 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1537 || object == TARGET_OBJECT_CODE_MEMORY)
1538 retval = memory_xfer_partial (ops, object, readbuf,
1539 writebuf, offset, len, xfered_len);
1540 else if (object == TARGET_OBJECT_RAW_MEMORY)
1541 {
1542 /* Request the normal memory object from other layers. */
1543 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1544 xfered_len);
1545 }
1546 else
1547 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1548 writebuf, offset, len, xfered_len);
1549
1550 if (targetdebug)
1551 {
1552 const unsigned char *myaddr = NULL;
1553
1554 fprintf_unfiltered (gdb_stdlog,
1555 "%s:target_xfer_partial "
1556 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1557 ops->to_shortname,
1558 (int) object,
1559 (annex ? annex : "(null)"),
1560 host_address_to_string (readbuf),
1561 host_address_to_string (writebuf),
1562 core_addr_to_string_nz (offset),
1563 pulongest (len), retval,
1564 pulongest (*xfered_len));
1565
1566 if (readbuf)
1567 myaddr = readbuf;
1568 if (writebuf)
1569 myaddr = writebuf;
1570 if (retval == TARGET_XFER_OK && myaddr != NULL)
1571 {
1572 int i;
1573
1574 fputs_unfiltered (", bytes =", gdb_stdlog);
1575 for (i = 0; i < *xfered_len; i++)
1576 {
1577 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1578 {
1579 if (targetdebug < 2 && i > 0)
1580 {
1581 fprintf_unfiltered (gdb_stdlog, " ...");
1582 break;
1583 }
1584 fprintf_unfiltered (gdb_stdlog, "\n");
1585 }
1586
1587 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1588 }
1589 }
1590
1591 fputc_unfiltered ('\n', gdb_stdlog);
1592 }
1593
1594 /* Check implementations of to_xfer_partial update *XFERED_LEN
1595 properly. Do assertion after printing debug messages, so that we
1596 can find more clues on assertion failure from debugging messages. */
1597 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1598 gdb_assert (*xfered_len > 0);
1599
1600 return retval;
1601 }
1602
1603 /* Read LEN bytes of target memory at address MEMADDR, placing the
1604 results in GDB's memory at MYADDR. Returns either 0 for success or
1605 TARGET_XFER_E_IO if any error occurs.
1606
1607 If an error occurs, no guarantee is made about the contents of the data at
1608 MYADDR. In particular, the caller should not depend upon partial reads
1609 filling the buffer with good data. There is no way for the caller to know
1610 how much good data might have been transfered anyway. Callers that can
1611 deal with partial reads should call target_read (which will retry until
1612 it makes no progress, and then return how much was transferred). */
1613
1614 int
1615 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1616 {
1617 /* Dispatch to the topmost target, not the flattened current_target.
1618 Memory accesses check target->to_has_(all_)memory, and the
1619 flattened target doesn't inherit those. */
1620 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1621 myaddr, memaddr, len) == len)
1622 return 0;
1623 else
1624 return TARGET_XFER_E_IO;
1625 }
1626
1627 /* Like target_read_memory, but specify explicitly that this is a read
1628 from the target's raw memory. That is, this read bypasses the
1629 dcache, breakpoint shadowing, etc. */
1630
1631 int
1632 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1633 {
1634 /* See comment in target_read_memory about why the request starts at
1635 current_target.beneath. */
1636 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1637 myaddr, memaddr, len) == len)
1638 return 0;
1639 else
1640 return TARGET_XFER_E_IO;
1641 }
1642
1643 /* Like target_read_memory, but specify explicitly that this is a read from
1644 the target's stack. This may trigger different cache behavior. */
1645
1646 int
1647 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1648 {
1649 /* See comment in target_read_memory about why the request starts at
1650 current_target.beneath. */
1651 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1652 myaddr, memaddr, len) == len)
1653 return 0;
1654 else
1655 return TARGET_XFER_E_IO;
1656 }
1657
1658 /* Like target_read_memory, but specify explicitly that this is a read from
1659 the target's code. This may trigger different cache behavior. */
1660
1661 int
1662 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1663 {
1664 /* See comment in target_read_memory about why the request starts at
1665 current_target.beneath. */
1666 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1667 myaddr, memaddr, len) == len)
1668 return 0;
1669 else
1670 return TARGET_XFER_E_IO;
1671 }
1672
1673 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1674 Returns either 0 for success or TARGET_XFER_E_IO if any
1675 error occurs. If an error occurs, no guarantee is made about how
1676 much data got written. Callers that can deal with partial writes
1677 should call target_write. */
1678
1679 int
1680 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1681 {
1682 /* See comment in target_read_memory about why the request starts at
1683 current_target.beneath. */
1684 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1685 myaddr, memaddr, len) == len)
1686 return 0;
1687 else
1688 return TARGET_XFER_E_IO;
1689 }
1690
1691 /* Write LEN bytes from MYADDR to target raw memory at address
1692 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1693 if any error occurs. If an error occurs, no guarantee is made
1694 about how much data got written. Callers that can deal with
1695 partial writes should call target_write. */
1696
1697 int
1698 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1699 {
1700 /* See comment in target_read_memory about why the request starts at
1701 current_target.beneath. */
1702 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1703 myaddr, memaddr, len) == len)
1704 return 0;
1705 else
1706 return TARGET_XFER_E_IO;
1707 }
1708
1709 /* Fetch the target's memory map. */
1710
1711 VEC(mem_region_s) *
1712 target_memory_map (void)
1713 {
1714 VEC(mem_region_s) *result;
1715 struct mem_region *last_one, *this_one;
1716 int ix;
1717 struct target_ops *t;
1718
1719 if (targetdebug)
1720 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1721
1722 for (t = current_target.beneath; t != NULL; t = t->beneath)
1723 if (t->to_memory_map != NULL)
1724 break;
1725
1726 if (t == NULL)
1727 return NULL;
1728
1729 result = t->to_memory_map (t);
1730 if (result == NULL)
1731 return NULL;
1732
1733 qsort (VEC_address (mem_region_s, result),
1734 VEC_length (mem_region_s, result),
1735 sizeof (struct mem_region), mem_region_cmp);
1736
1737 /* Check that regions do not overlap. Simultaneously assign
1738 a numbering for the "mem" commands to use to refer to
1739 each region. */
1740 last_one = NULL;
1741 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1742 {
1743 this_one->number = ix;
1744
1745 if (last_one && last_one->hi > this_one->lo)
1746 {
1747 warning (_("Overlapping regions in memory map: ignoring"));
1748 VEC_free (mem_region_s, result);
1749 return NULL;
1750 }
1751 last_one = this_one;
1752 }
1753
1754 return result;
1755 }
1756
1757 void
1758 target_flash_erase (ULONGEST address, LONGEST length)
1759 {
1760 struct target_ops *t;
1761
1762 for (t = current_target.beneath; t != NULL; t = t->beneath)
1763 if (t->to_flash_erase != NULL)
1764 {
1765 if (targetdebug)
1766 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1767 hex_string (address), phex (length, 0));
1768 t->to_flash_erase (t, address, length);
1769 return;
1770 }
1771
1772 tcomplain ();
1773 }
1774
1775 void
1776 target_flash_done (void)
1777 {
1778 struct target_ops *t;
1779
1780 for (t = current_target.beneath; t != NULL; t = t->beneath)
1781 if (t->to_flash_done != NULL)
1782 {
1783 if (targetdebug)
1784 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1785 t->to_flash_done (t);
1786 return;
1787 }
1788
1789 tcomplain ();
1790 }
1791
1792 static void
1793 show_trust_readonly (struct ui_file *file, int from_tty,
1794 struct cmd_list_element *c, const char *value)
1795 {
1796 fprintf_filtered (file,
1797 _("Mode for reading from readonly sections is %s.\n"),
1798 value);
1799 }
1800
1801 /* More generic transfers. */
1802
1803 static enum target_xfer_status
1804 default_xfer_partial (struct target_ops *ops, enum target_object object,
1805 const char *annex, gdb_byte *readbuf,
1806 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
1807 ULONGEST *xfered_len)
1808 {
1809 if (object == TARGET_OBJECT_MEMORY
1810 && ops->deprecated_xfer_memory != NULL)
1811 /* If available, fall back to the target's
1812 "deprecated_xfer_memory" method. */
1813 {
1814 int xfered = -1;
1815
1816 errno = 0;
1817 if (writebuf != NULL)
1818 {
1819 void *buffer = xmalloc (len);
1820 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1821
1822 memcpy (buffer, writebuf, len);
1823 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1824 1/*write*/, NULL, ops);
1825 do_cleanups (cleanup);
1826 }
1827 if (readbuf != NULL)
1828 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1829 0/*read*/, NULL, ops);
1830 if (xfered > 0)
1831 {
1832 *xfered_len = (ULONGEST) xfered;
1833 return TARGET_XFER_E_IO;
1834 }
1835 else if (xfered == 0 && errno == 0)
1836 /* "deprecated_xfer_memory" uses 0, cross checked against
1837 ERRNO as one indication of an error. */
1838 return TARGET_XFER_EOF;
1839 else
1840 return TARGET_XFER_E_IO;
1841 }
1842 else
1843 {
1844 gdb_assert (ops->beneath != NULL);
1845 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1846 readbuf, writebuf, offset, len,
1847 xfered_len);
1848 }
1849 }
1850
1851 /* Target vector read/write partial wrapper functions. */
1852
1853 static enum target_xfer_status
1854 target_read_partial (struct target_ops *ops,
1855 enum target_object object,
1856 const char *annex, gdb_byte *buf,
1857 ULONGEST offset, ULONGEST len,
1858 ULONGEST *xfered_len)
1859 {
1860 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1861 xfered_len);
1862 }
1863
1864 static enum target_xfer_status
1865 target_write_partial (struct target_ops *ops,
1866 enum target_object object,
1867 const char *annex, const gdb_byte *buf,
1868 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1869 {
1870 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1871 xfered_len);
1872 }
1873
1874 /* Wrappers to perform the full transfer. */
1875
1876 /* For docs on target_read see target.h. */
1877
1878 LONGEST
1879 target_read (struct target_ops *ops,
1880 enum target_object object,
1881 const char *annex, gdb_byte *buf,
1882 ULONGEST offset, LONGEST len)
1883 {
1884 LONGEST xfered = 0;
1885
1886 while (xfered < len)
1887 {
1888 ULONGEST xfered_len;
1889 enum target_xfer_status status;
1890
1891 status = target_read_partial (ops, object, annex,
1892 (gdb_byte *) buf + xfered,
1893 offset + xfered, len - xfered,
1894 &xfered_len);
1895
1896 /* Call an observer, notifying them of the xfer progress? */
1897 if (status == TARGET_XFER_EOF)
1898 return xfered;
1899 else if (status == TARGET_XFER_OK)
1900 {
1901 xfered += xfered_len;
1902 QUIT;
1903 }
1904 else
1905 return -1;
1906
1907 }
1908 return len;
1909 }
1910
1911 /* Assuming that the entire [begin, end) range of memory cannot be
1912 read, try to read whatever subrange is possible to read.
1913
1914 The function returns, in RESULT, either zero or one memory block.
1915 If there's a readable subrange at the beginning, it is completely
1916 read and returned. Any further readable subrange will not be read.
1917 Otherwise, if there's a readable subrange at the end, it will be
1918 completely read and returned. Any readable subranges before it
1919 (obviously, not starting at the beginning), will be ignored. In
1920 other cases -- either no readable subrange, or readable subrange(s)
1921 that is neither at the beginning, or end, nothing is returned.
1922
1923 The purpose of this function is to handle a read across a boundary
1924 of accessible memory in a case when memory map is not available.
1925 The above restrictions are fine for this case, but will give
1926 incorrect results if the memory is 'patchy'. However, supporting
1927 'patchy' memory would require trying to read every single byte,
1928 and it seems unacceptable solution. Explicit memory map is
1929 recommended for this case -- and target_read_memory_robust will
1930 take care of reading multiple ranges then. */
1931
1932 static void
1933 read_whatever_is_readable (struct target_ops *ops,
1934 ULONGEST begin, ULONGEST end,
1935 VEC(memory_read_result_s) **result)
1936 {
1937 gdb_byte *buf = xmalloc (end - begin);
1938 ULONGEST current_begin = begin;
1939 ULONGEST current_end = end;
1940 int forward;
1941 memory_read_result_s r;
1942 ULONGEST xfered_len;
1943
1944 /* If we previously failed to read 1 byte, nothing can be done here. */
1945 if (end - begin <= 1)
1946 {
1947 xfree (buf);
1948 return;
1949 }
1950
1951 /* Check that either first or the last byte is readable, and give up
1952 if not. This heuristic is meant to permit reading accessible memory
1953 at the boundary of accessible region. */
1954 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1955 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
1956 {
1957 forward = 1;
1958 ++current_begin;
1959 }
1960 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1961 buf + (end-begin) - 1, end - 1, 1,
1962 &xfered_len) == TARGET_XFER_OK)
1963 {
1964 forward = 0;
1965 --current_end;
1966 }
1967 else
1968 {
1969 xfree (buf);
1970 return;
1971 }
1972
1973 /* Loop invariant is that the [current_begin, current_end) was previously
1974 found to be not readable as a whole.
1975
1976 Note loop condition -- if the range has 1 byte, we can't divide the range
1977 so there's no point trying further. */
1978 while (current_end - current_begin > 1)
1979 {
1980 ULONGEST first_half_begin, first_half_end;
1981 ULONGEST second_half_begin, second_half_end;
1982 LONGEST xfer;
1983 ULONGEST middle = current_begin + (current_end - current_begin)/2;
1984
1985 if (forward)
1986 {
1987 first_half_begin = current_begin;
1988 first_half_end = middle;
1989 second_half_begin = middle;
1990 second_half_end = current_end;
1991 }
1992 else
1993 {
1994 first_half_begin = middle;
1995 first_half_end = current_end;
1996 second_half_begin = current_begin;
1997 second_half_end = middle;
1998 }
1999
2000 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2001 buf + (first_half_begin - begin),
2002 first_half_begin,
2003 first_half_end - first_half_begin);
2004
2005 if (xfer == first_half_end - first_half_begin)
2006 {
2007 /* This half reads up fine. So, the error must be in the
2008 other half. */
2009 current_begin = second_half_begin;
2010 current_end = second_half_end;
2011 }
2012 else
2013 {
2014 /* This half is not readable. Because we've tried one byte, we
2015 know some part of this half if actually redable. Go to the next
2016 iteration to divide again and try to read.
2017
2018 We don't handle the other half, because this function only tries
2019 to read a single readable subrange. */
2020 current_begin = first_half_begin;
2021 current_end = first_half_end;
2022 }
2023 }
2024
2025 if (forward)
2026 {
2027 /* The [begin, current_begin) range has been read. */
2028 r.begin = begin;
2029 r.end = current_begin;
2030 r.data = buf;
2031 }
2032 else
2033 {
2034 /* The [current_end, end) range has been read. */
2035 LONGEST rlen = end - current_end;
2036
2037 r.data = xmalloc (rlen);
2038 memcpy (r.data, buf + current_end - begin, rlen);
2039 r.begin = current_end;
2040 r.end = end;
2041 xfree (buf);
2042 }
2043 VEC_safe_push(memory_read_result_s, (*result), &r);
2044 }
2045
2046 void
2047 free_memory_read_result_vector (void *x)
2048 {
2049 VEC(memory_read_result_s) *v = x;
2050 memory_read_result_s *current;
2051 int ix;
2052
2053 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2054 {
2055 xfree (current->data);
2056 }
2057 VEC_free (memory_read_result_s, v);
2058 }
2059
2060 VEC(memory_read_result_s) *
2061 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2062 {
2063 VEC(memory_read_result_s) *result = 0;
2064
2065 LONGEST xfered = 0;
2066 while (xfered < len)
2067 {
2068 struct mem_region *region = lookup_mem_region (offset + xfered);
2069 LONGEST rlen;
2070
2071 /* If there is no explicit region, a fake one should be created. */
2072 gdb_assert (region);
2073
2074 if (region->hi == 0)
2075 rlen = len - xfered;
2076 else
2077 rlen = region->hi - offset;
2078
2079 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2080 {
2081 /* Cannot read this region. Note that we can end up here only
2082 if the region is explicitly marked inaccessible, or
2083 'inaccessible-by-default' is in effect. */
2084 xfered += rlen;
2085 }
2086 else
2087 {
2088 LONGEST to_read = min (len - xfered, rlen);
2089 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2090
2091 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2092 (gdb_byte *) buffer,
2093 offset + xfered, to_read);
2094 /* Call an observer, notifying them of the xfer progress? */
2095 if (xfer <= 0)
2096 {
2097 /* Got an error reading full chunk. See if maybe we can read
2098 some subrange. */
2099 xfree (buffer);
2100 read_whatever_is_readable (ops, offset + xfered,
2101 offset + xfered + to_read, &result);
2102 xfered += to_read;
2103 }
2104 else
2105 {
2106 struct memory_read_result r;
2107 r.data = buffer;
2108 r.begin = offset + xfered;
2109 r.end = r.begin + xfer;
2110 VEC_safe_push (memory_read_result_s, result, &r);
2111 xfered += xfer;
2112 }
2113 QUIT;
2114 }
2115 }
2116 return result;
2117 }
2118
2119
2120 /* An alternative to target_write with progress callbacks. */
2121
2122 LONGEST
2123 target_write_with_progress (struct target_ops *ops,
2124 enum target_object object,
2125 const char *annex, const gdb_byte *buf,
2126 ULONGEST offset, LONGEST len,
2127 void (*progress) (ULONGEST, void *), void *baton)
2128 {
2129 LONGEST xfered = 0;
2130
2131 /* Give the progress callback a chance to set up. */
2132 if (progress)
2133 (*progress) (0, baton);
2134
2135 while (xfered < len)
2136 {
2137 ULONGEST xfered_len;
2138 enum target_xfer_status status;
2139
2140 status = target_write_partial (ops, object, annex,
2141 (gdb_byte *) buf + xfered,
2142 offset + xfered, len - xfered,
2143 &xfered_len);
2144
2145 if (status == TARGET_XFER_EOF)
2146 return xfered;
2147 if (TARGET_XFER_STATUS_ERROR_P (status))
2148 return -1;
2149
2150 gdb_assert (status == TARGET_XFER_OK);
2151 if (progress)
2152 (*progress) (xfered_len, baton);
2153
2154 xfered += xfered_len;
2155 QUIT;
2156 }
2157 return len;
2158 }
2159
2160 /* For docs on target_write see target.h. */
2161
2162 LONGEST
2163 target_write (struct target_ops *ops,
2164 enum target_object object,
2165 const char *annex, const gdb_byte *buf,
2166 ULONGEST offset, LONGEST len)
2167 {
2168 return target_write_with_progress (ops, object, annex, buf, offset, len,
2169 NULL, NULL);
2170 }
2171
2172 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2173 the size of the transferred data. PADDING additional bytes are
2174 available in *BUF_P. This is a helper function for
2175 target_read_alloc; see the declaration of that function for more
2176 information. */
2177
2178 static LONGEST
2179 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2180 const char *annex, gdb_byte **buf_p, int padding)
2181 {
2182 size_t buf_alloc, buf_pos;
2183 gdb_byte *buf;
2184
2185 /* This function does not have a length parameter; it reads the
2186 entire OBJECT). Also, it doesn't support objects fetched partly
2187 from one target and partly from another (in a different stratum,
2188 e.g. a core file and an executable). Both reasons make it
2189 unsuitable for reading memory. */
2190 gdb_assert (object != TARGET_OBJECT_MEMORY);
2191
2192 /* Start by reading up to 4K at a time. The target will throttle
2193 this number down if necessary. */
2194 buf_alloc = 4096;
2195 buf = xmalloc (buf_alloc);
2196 buf_pos = 0;
2197 while (1)
2198 {
2199 ULONGEST xfered_len;
2200 enum target_xfer_status status;
2201
2202 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2203 buf_pos, buf_alloc - buf_pos - padding,
2204 &xfered_len);
2205
2206 if (status == TARGET_XFER_EOF)
2207 {
2208 /* Read all there was. */
2209 if (buf_pos == 0)
2210 xfree (buf);
2211 else
2212 *buf_p = buf;
2213 return buf_pos;
2214 }
2215 else if (status != TARGET_XFER_OK)
2216 {
2217 /* An error occurred. */
2218 xfree (buf);
2219 return TARGET_XFER_E_IO;
2220 }
2221
2222 buf_pos += xfered_len;
2223
2224 /* If the buffer is filling up, expand it. */
2225 if (buf_alloc < buf_pos * 2)
2226 {
2227 buf_alloc *= 2;
2228 buf = xrealloc (buf, buf_alloc);
2229 }
2230
2231 QUIT;
2232 }
2233 }
2234
2235 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2236 the size of the transferred data. See the declaration in "target.h"
2237 function for more information about the return value. */
2238
2239 LONGEST
2240 target_read_alloc (struct target_ops *ops, enum target_object object,
2241 const char *annex, gdb_byte **buf_p)
2242 {
2243 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2244 }
2245
2246 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2247 returned as a string, allocated using xmalloc. If an error occurs
2248 or the transfer is unsupported, NULL is returned. Empty objects
2249 are returned as allocated but empty strings. A warning is issued
2250 if the result contains any embedded NUL bytes. */
2251
2252 char *
2253 target_read_stralloc (struct target_ops *ops, enum target_object object,
2254 const char *annex)
2255 {
2256 gdb_byte *buffer;
2257 char *bufstr;
2258 LONGEST i, transferred;
2259
2260 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2261 bufstr = (char *) buffer;
2262
2263 if (transferred < 0)
2264 return NULL;
2265
2266 if (transferred == 0)
2267 return xstrdup ("");
2268
2269 bufstr[transferred] = 0;
2270
2271 /* Check for embedded NUL bytes; but allow trailing NULs. */
2272 for (i = strlen (bufstr); i < transferred; i++)
2273 if (bufstr[i] != 0)
2274 {
2275 warning (_("target object %d, annex %s, "
2276 "contained unexpected null characters"),
2277 (int) object, annex ? annex : "(none)");
2278 break;
2279 }
2280
2281 return bufstr;
2282 }
2283
2284 /* Memory transfer methods. */
2285
2286 void
2287 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2288 LONGEST len)
2289 {
2290 /* This method is used to read from an alternate, non-current
2291 target. This read must bypass the overlay support (as symbols
2292 don't match this target), and GDB's internal cache (wrong cache
2293 for this target). */
2294 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2295 != len)
2296 memory_error (TARGET_XFER_E_IO, addr);
2297 }
2298
2299 ULONGEST
2300 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2301 int len, enum bfd_endian byte_order)
2302 {
2303 gdb_byte buf[sizeof (ULONGEST)];
2304
2305 gdb_assert (len <= sizeof (buf));
2306 get_target_memory (ops, addr, buf, len);
2307 return extract_unsigned_integer (buf, len, byte_order);
2308 }
2309
2310 /* See target.h. */
2311
2312 int
2313 target_insert_breakpoint (struct gdbarch *gdbarch,
2314 struct bp_target_info *bp_tgt)
2315 {
2316 if (!may_insert_breakpoints)
2317 {
2318 warning (_("May not insert breakpoints"));
2319 return 1;
2320 }
2321
2322 return current_target.to_insert_breakpoint (&current_target,
2323 gdbarch, bp_tgt);
2324 }
2325
2326 /* See target.h. */
2327
2328 int
2329 target_remove_breakpoint (struct gdbarch *gdbarch,
2330 struct bp_target_info *bp_tgt)
2331 {
2332 /* This is kind of a weird case to handle, but the permission might
2333 have been changed after breakpoints were inserted - in which case
2334 we should just take the user literally and assume that any
2335 breakpoints should be left in place. */
2336 if (!may_insert_breakpoints)
2337 {
2338 warning (_("May not remove breakpoints"));
2339 return 1;
2340 }
2341
2342 return current_target.to_remove_breakpoint (&current_target,
2343 gdbarch, bp_tgt);
2344 }
2345
2346 static void
2347 target_info (char *args, int from_tty)
2348 {
2349 struct target_ops *t;
2350 int has_all_mem = 0;
2351
2352 if (symfile_objfile != NULL)
2353 printf_unfiltered (_("Symbols from \"%s\".\n"),
2354 objfile_name (symfile_objfile));
2355
2356 for (t = target_stack; t != NULL; t = t->beneath)
2357 {
2358 if (!(*t->to_has_memory) (t))
2359 continue;
2360
2361 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2362 continue;
2363 if (has_all_mem)
2364 printf_unfiltered (_("\tWhile running this, "
2365 "GDB does not access memory from...\n"));
2366 printf_unfiltered ("%s:\n", t->to_longname);
2367 (t->to_files_info) (t);
2368 has_all_mem = (*t->to_has_all_memory) (t);
2369 }
2370 }
2371
2372 /* This function is called before any new inferior is created, e.g.
2373 by running a program, attaching, or connecting to a target.
2374 It cleans up any state from previous invocations which might
2375 change between runs. This is a subset of what target_preopen
2376 resets (things which might change between targets). */
2377
2378 void
2379 target_pre_inferior (int from_tty)
2380 {
2381 /* Clear out solib state. Otherwise the solib state of the previous
2382 inferior might have survived and is entirely wrong for the new
2383 target. This has been observed on GNU/Linux using glibc 2.3. How
2384 to reproduce:
2385
2386 bash$ ./foo&
2387 [1] 4711
2388 bash$ ./foo&
2389 [1] 4712
2390 bash$ gdb ./foo
2391 [...]
2392 (gdb) attach 4711
2393 (gdb) detach
2394 (gdb) attach 4712
2395 Cannot access memory at address 0xdeadbeef
2396 */
2397
2398 /* In some OSs, the shared library list is the same/global/shared
2399 across inferiors. If code is shared between processes, so are
2400 memory regions and features. */
2401 if (!gdbarch_has_global_solist (target_gdbarch ()))
2402 {
2403 no_shared_libraries (NULL, from_tty);
2404
2405 invalidate_target_mem_regions ();
2406
2407 target_clear_description ();
2408 }
2409
2410 agent_capability_invalidate ();
2411 }
2412
2413 /* Callback for iterate_over_inferiors. Gets rid of the given
2414 inferior. */
2415
2416 static int
2417 dispose_inferior (struct inferior *inf, void *args)
2418 {
2419 struct thread_info *thread;
2420
2421 thread = any_thread_of_process (inf->pid);
2422 if (thread)
2423 {
2424 switch_to_thread (thread->ptid);
2425
2426 /* Core inferiors actually should be detached, not killed. */
2427 if (target_has_execution)
2428 target_kill ();
2429 else
2430 target_detach (NULL, 0);
2431 }
2432
2433 return 0;
2434 }
2435
2436 /* This is to be called by the open routine before it does
2437 anything. */
2438
2439 void
2440 target_preopen (int from_tty)
2441 {
2442 dont_repeat ();
2443
2444 if (have_inferiors ())
2445 {
2446 if (!from_tty
2447 || !have_live_inferiors ()
2448 || query (_("A program is being debugged already. Kill it? ")))
2449 iterate_over_inferiors (dispose_inferior, NULL);
2450 else
2451 error (_("Program not killed."));
2452 }
2453
2454 /* Calling target_kill may remove the target from the stack. But if
2455 it doesn't (which seems like a win for UDI), remove it now. */
2456 /* Leave the exec target, though. The user may be switching from a
2457 live process to a core of the same program. */
2458 pop_all_targets_above (file_stratum);
2459
2460 target_pre_inferior (from_tty);
2461 }
2462
2463 /* Detach a target after doing deferred register stores. */
2464
2465 void
2466 target_detach (const char *args, int from_tty)
2467 {
2468 struct target_ops* t;
2469
2470 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2471 /* Don't remove global breakpoints here. They're removed on
2472 disconnection from the target. */
2473 ;
2474 else
2475 /* If we're in breakpoints-always-inserted mode, have to remove
2476 them before detaching. */
2477 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2478
2479 prepare_for_detach ();
2480
2481 current_target.to_detach (&current_target, args, from_tty);
2482 if (targetdebug)
2483 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2484 args, from_tty);
2485 }
2486
2487 void
2488 target_disconnect (char *args, int from_tty)
2489 {
2490 struct target_ops *t;
2491
2492 /* If we're in breakpoints-always-inserted mode or if breakpoints
2493 are global across processes, we have to remove them before
2494 disconnecting. */
2495 remove_breakpoints ();
2496
2497 for (t = current_target.beneath; t != NULL; t = t->beneath)
2498 if (t->to_disconnect != NULL)
2499 {
2500 if (targetdebug)
2501 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2502 args, from_tty);
2503 t->to_disconnect (t, args, from_tty);
2504 return;
2505 }
2506
2507 tcomplain ();
2508 }
2509
2510 ptid_t
2511 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2512 {
2513 struct target_ops *t;
2514 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2515 status, options);
2516
2517 if (targetdebug)
2518 {
2519 char *status_string;
2520 char *options_string;
2521
2522 status_string = target_waitstatus_to_string (status);
2523 options_string = target_options_to_string (options);
2524 fprintf_unfiltered (gdb_stdlog,
2525 "target_wait (%d, status, options={%s})"
2526 " = %d, %s\n",
2527 ptid_get_pid (ptid), options_string,
2528 ptid_get_pid (retval), status_string);
2529 xfree (status_string);
2530 xfree (options_string);
2531 }
2532
2533 return retval;
2534 }
2535
2536 char *
2537 target_pid_to_str (ptid_t ptid)
2538 {
2539 return (*current_target.to_pid_to_str) (&current_target, ptid);
2540 }
2541
2542 char *
2543 target_thread_name (struct thread_info *info)
2544 {
2545 return current_target.to_thread_name (&current_target, info);
2546 }
2547
2548 void
2549 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2550 {
2551 struct target_ops *t;
2552
2553 target_dcache_invalidate ();
2554
2555 current_target.to_resume (&current_target, ptid, step, signal);
2556 if (targetdebug)
2557 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2558 ptid_get_pid (ptid),
2559 step ? "step" : "continue",
2560 gdb_signal_to_name (signal));
2561
2562 registers_changed_ptid (ptid);
2563 set_executing (ptid, 1);
2564 set_running (ptid, 1);
2565 clear_inline_frame_state (ptid);
2566 }
2567
2568 void
2569 target_pass_signals (int numsigs, unsigned char *pass_signals)
2570 {
2571 if (targetdebug)
2572 {
2573 int i;
2574
2575 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2576 numsigs);
2577
2578 for (i = 0; i < numsigs; i++)
2579 if (pass_signals[i])
2580 fprintf_unfiltered (gdb_stdlog, " %s",
2581 gdb_signal_to_name (i));
2582
2583 fprintf_unfiltered (gdb_stdlog, " })\n");
2584 }
2585
2586 (*current_target.to_pass_signals) (&current_target, numsigs, pass_signals);
2587 }
2588
2589 void
2590 target_program_signals (int numsigs, unsigned char *program_signals)
2591 {
2592 if (targetdebug)
2593 {
2594 int i;
2595
2596 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2597 numsigs);
2598
2599 for (i = 0; i < numsigs; i++)
2600 if (program_signals[i])
2601 fprintf_unfiltered (gdb_stdlog, " %s",
2602 gdb_signal_to_name (i));
2603
2604 fprintf_unfiltered (gdb_stdlog, " })\n");
2605 }
2606
2607 (*current_target.to_program_signals) (&current_target,
2608 numsigs, program_signals);
2609 }
2610
2611 static int
2612 default_follow_fork (struct target_ops *self, int follow_child,
2613 int detach_fork)
2614 {
2615 /* Some target returned a fork event, but did not know how to follow it. */
2616 internal_error (__FILE__, __LINE__,
2617 _("could not find a target to follow fork"));
2618 }
2619
2620 /* Look through the list of possible targets for a target that can
2621 follow forks. */
2622
2623 int
2624 target_follow_fork (int follow_child, int detach_fork)
2625 {
2626 int retval = current_target.to_follow_fork (&current_target,
2627 follow_child, detach_fork);
2628
2629 if (targetdebug)
2630 fprintf_unfiltered (gdb_stdlog,
2631 "target_follow_fork (%d, %d) = %d\n",
2632 follow_child, detach_fork, retval);
2633 return retval;
2634 }
2635
2636 static void
2637 default_mourn_inferior (struct target_ops *self)
2638 {
2639 internal_error (__FILE__, __LINE__,
2640 _("could not find a target to follow mourn inferior"));
2641 }
2642
2643 void
2644 target_mourn_inferior (void)
2645 {
2646 current_target.to_mourn_inferior (&current_target);
2647 if (targetdebug)
2648 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2649
2650 /* We no longer need to keep handles on any of the object files.
2651 Make sure to release them to avoid unnecessarily locking any
2652 of them while we're not actually debugging. */
2653 bfd_cache_close_all ();
2654 }
2655
2656 /* Look for a target which can describe architectural features, starting
2657 from TARGET. If we find one, return its description. */
2658
2659 const struct target_desc *
2660 target_read_description (struct target_ops *target)
2661 {
2662 struct target_ops *t;
2663
2664 for (t = target; t != NULL; t = t->beneath)
2665 if (t->to_read_description != NULL)
2666 {
2667 const struct target_desc *tdesc;
2668
2669 tdesc = t->to_read_description (t);
2670 if (tdesc)
2671 return tdesc;
2672 }
2673
2674 return NULL;
2675 }
2676
2677 /* The default implementation of to_search_memory.
2678 This implements a basic search of memory, reading target memory and
2679 performing the search here (as opposed to performing the search in on the
2680 target side with, for example, gdbserver). */
2681
2682 int
2683 simple_search_memory (struct target_ops *ops,
2684 CORE_ADDR start_addr, ULONGEST search_space_len,
2685 const gdb_byte *pattern, ULONGEST pattern_len,
2686 CORE_ADDR *found_addrp)
2687 {
2688 /* NOTE: also defined in find.c testcase. */
2689 #define SEARCH_CHUNK_SIZE 16000
2690 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2691 /* Buffer to hold memory contents for searching. */
2692 gdb_byte *search_buf;
2693 unsigned search_buf_size;
2694 struct cleanup *old_cleanups;
2695
2696 search_buf_size = chunk_size + pattern_len - 1;
2697
2698 /* No point in trying to allocate a buffer larger than the search space. */
2699 if (search_space_len < search_buf_size)
2700 search_buf_size = search_space_len;
2701
2702 search_buf = malloc (search_buf_size);
2703 if (search_buf == NULL)
2704 error (_("Unable to allocate memory to perform the search."));
2705 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2706
2707 /* Prime the search buffer. */
2708
2709 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2710 search_buf, start_addr, search_buf_size) != search_buf_size)
2711 {
2712 warning (_("Unable to access %s bytes of target "
2713 "memory at %s, halting search."),
2714 pulongest (search_buf_size), hex_string (start_addr));
2715 do_cleanups (old_cleanups);
2716 return -1;
2717 }
2718
2719 /* Perform the search.
2720
2721 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2722 When we've scanned N bytes we copy the trailing bytes to the start and
2723 read in another N bytes. */
2724
2725 while (search_space_len >= pattern_len)
2726 {
2727 gdb_byte *found_ptr;
2728 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2729
2730 found_ptr = memmem (search_buf, nr_search_bytes,
2731 pattern, pattern_len);
2732
2733 if (found_ptr != NULL)
2734 {
2735 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2736
2737 *found_addrp = found_addr;
2738 do_cleanups (old_cleanups);
2739 return 1;
2740 }
2741
2742 /* Not found in this chunk, skip to next chunk. */
2743
2744 /* Don't let search_space_len wrap here, it's unsigned. */
2745 if (search_space_len >= chunk_size)
2746 search_space_len -= chunk_size;
2747 else
2748 search_space_len = 0;
2749
2750 if (search_space_len >= pattern_len)
2751 {
2752 unsigned keep_len = search_buf_size - chunk_size;
2753 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2754 int nr_to_read;
2755
2756 /* Copy the trailing part of the previous iteration to the front
2757 of the buffer for the next iteration. */
2758 gdb_assert (keep_len == pattern_len - 1);
2759 memcpy (search_buf, search_buf + chunk_size, keep_len);
2760
2761 nr_to_read = min (search_space_len - keep_len, chunk_size);
2762
2763 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2764 search_buf + keep_len, read_addr,
2765 nr_to_read) != nr_to_read)
2766 {
2767 warning (_("Unable to access %s bytes of target "
2768 "memory at %s, halting search."),
2769 plongest (nr_to_read),
2770 hex_string (read_addr));
2771 do_cleanups (old_cleanups);
2772 return -1;
2773 }
2774
2775 start_addr += chunk_size;
2776 }
2777 }
2778
2779 /* Not found. */
2780
2781 do_cleanups (old_cleanups);
2782 return 0;
2783 }
2784
2785 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2786 sequence of bytes in PATTERN with length PATTERN_LEN.
2787
2788 The result is 1 if found, 0 if not found, and -1 if there was an error
2789 requiring halting of the search (e.g. memory read error).
2790 If the pattern is found the address is recorded in FOUND_ADDRP. */
2791
2792 int
2793 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2794 const gdb_byte *pattern, ULONGEST pattern_len,
2795 CORE_ADDR *found_addrp)
2796 {
2797 struct target_ops *t;
2798 int found;
2799
2800 /* We don't use INHERIT to set current_target.to_search_memory,
2801 so we have to scan the target stack and handle targetdebug
2802 ourselves. */
2803
2804 if (targetdebug)
2805 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2806 hex_string (start_addr));
2807
2808 for (t = current_target.beneath; t != NULL; t = t->beneath)
2809 if (t->to_search_memory != NULL)
2810 break;
2811
2812 if (t != NULL)
2813 {
2814 found = t->to_search_memory (t, start_addr, search_space_len,
2815 pattern, pattern_len, found_addrp);
2816 }
2817 else
2818 {
2819 /* If a special version of to_search_memory isn't available, use the
2820 simple version. */
2821 found = simple_search_memory (current_target.beneath,
2822 start_addr, search_space_len,
2823 pattern, pattern_len, found_addrp);
2824 }
2825
2826 if (targetdebug)
2827 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2828
2829 return found;
2830 }
2831
2832 /* Look through the currently pushed targets. If none of them will
2833 be able to restart the currently running process, issue an error
2834 message. */
2835
2836 void
2837 target_require_runnable (void)
2838 {
2839 struct target_ops *t;
2840
2841 for (t = target_stack; t != NULL; t = t->beneath)
2842 {
2843 /* If this target knows how to create a new program, then
2844 assume we will still be able to after killing the current
2845 one. Either killing and mourning will not pop T, or else
2846 find_default_run_target will find it again. */
2847 if (t->to_create_inferior != NULL)
2848 return;
2849
2850 /* Do not worry about thread_stratum targets that can not
2851 create inferiors. Assume they will be pushed again if
2852 necessary, and continue to the process_stratum. */
2853 if (t->to_stratum == thread_stratum
2854 || t->to_stratum == arch_stratum)
2855 continue;
2856
2857 error (_("The \"%s\" target does not support \"run\". "
2858 "Try \"help target\" or \"continue\"."),
2859 t->to_shortname);
2860 }
2861
2862 /* This function is only called if the target is running. In that
2863 case there should have been a process_stratum target and it
2864 should either know how to create inferiors, or not... */
2865 internal_error (__FILE__, __LINE__, _("No targets found"));
2866 }
2867
2868 /* Look through the list of possible targets for a target that can
2869 execute a run or attach command without any other data. This is
2870 used to locate the default process stratum.
2871
2872 If DO_MESG is not NULL, the result is always valid (error() is
2873 called for errors); else, return NULL on error. */
2874
2875 static struct target_ops *
2876 find_default_run_target (char *do_mesg)
2877 {
2878 struct target_ops **t;
2879 struct target_ops *runable = NULL;
2880 int count;
2881
2882 count = 0;
2883
2884 for (t = target_structs; t < target_structs + target_struct_size;
2885 ++t)
2886 {
2887 if ((*t)->to_can_run && target_can_run (*t))
2888 {
2889 runable = *t;
2890 ++count;
2891 }
2892 }
2893
2894 if (count != 1)
2895 {
2896 if (do_mesg)
2897 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2898 else
2899 return NULL;
2900 }
2901
2902 return runable;
2903 }
2904
2905 void
2906 find_default_attach (struct target_ops *ops, char *args, int from_tty)
2907 {
2908 struct target_ops *t;
2909
2910 t = find_default_run_target ("attach");
2911 (t->to_attach) (t, args, from_tty);
2912 return;
2913 }
2914
2915 void
2916 find_default_create_inferior (struct target_ops *ops,
2917 char *exec_file, char *allargs, char **env,
2918 int from_tty)
2919 {
2920 struct target_ops *t;
2921
2922 t = find_default_run_target ("run");
2923 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
2924 return;
2925 }
2926
2927 static int
2928 find_default_can_async_p (struct target_ops *ignore)
2929 {
2930 struct target_ops *t;
2931
2932 /* This may be called before the target is pushed on the stack;
2933 look for the default process stratum. If there's none, gdb isn't
2934 configured with a native debugger, and target remote isn't
2935 connected yet. */
2936 t = find_default_run_target (NULL);
2937 if (t && t->to_can_async_p != delegate_can_async_p)
2938 return (t->to_can_async_p) (t);
2939 return 0;
2940 }
2941
2942 static int
2943 find_default_is_async_p (struct target_ops *ignore)
2944 {
2945 struct target_ops *t;
2946
2947 /* This may be called before the target is pushed on the stack;
2948 look for the default process stratum. If there's none, gdb isn't
2949 configured with a native debugger, and target remote isn't
2950 connected yet. */
2951 t = find_default_run_target (NULL);
2952 if (t && t->to_is_async_p != delegate_is_async_p)
2953 return (t->to_is_async_p) (t);
2954 return 0;
2955 }
2956
2957 static int
2958 find_default_supports_non_stop (struct target_ops *self)
2959 {
2960 struct target_ops *t;
2961
2962 t = find_default_run_target (NULL);
2963 if (t && t->to_supports_non_stop)
2964 return (t->to_supports_non_stop) (t);
2965 return 0;
2966 }
2967
2968 int
2969 target_supports_non_stop (void)
2970 {
2971 struct target_ops *t;
2972
2973 for (t = &current_target; t != NULL; t = t->beneath)
2974 if (t->to_supports_non_stop)
2975 return t->to_supports_non_stop (t);
2976
2977 return 0;
2978 }
2979
2980 /* Implement the "info proc" command. */
2981
2982 int
2983 target_info_proc (char *args, enum info_proc_what what)
2984 {
2985 struct target_ops *t;
2986
2987 /* If we're already connected to something that can get us OS
2988 related data, use it. Otherwise, try using the native
2989 target. */
2990 if (current_target.to_stratum >= process_stratum)
2991 t = current_target.beneath;
2992 else
2993 t = find_default_run_target (NULL);
2994
2995 for (; t != NULL; t = t->beneath)
2996 {
2997 if (t->to_info_proc != NULL)
2998 {
2999 t->to_info_proc (t, args, what);
3000
3001 if (targetdebug)
3002 fprintf_unfiltered (gdb_stdlog,
3003 "target_info_proc (\"%s\", %d)\n", args, what);
3004
3005 return 1;
3006 }
3007 }
3008
3009 return 0;
3010 }
3011
3012 static int
3013 find_default_supports_disable_randomization (struct target_ops *self)
3014 {
3015 struct target_ops *t;
3016
3017 t = find_default_run_target (NULL);
3018 if (t && t->to_supports_disable_randomization)
3019 return (t->to_supports_disable_randomization) (t);
3020 return 0;
3021 }
3022
3023 int
3024 target_supports_disable_randomization (void)
3025 {
3026 struct target_ops *t;
3027
3028 for (t = &current_target; t != NULL; t = t->beneath)
3029 if (t->to_supports_disable_randomization)
3030 return t->to_supports_disable_randomization (t);
3031
3032 return 0;
3033 }
3034
3035 char *
3036 target_get_osdata (const char *type)
3037 {
3038 struct target_ops *t;
3039
3040 /* If we're already connected to something that can get us OS
3041 related data, use it. Otherwise, try using the native
3042 target. */
3043 if (current_target.to_stratum >= process_stratum)
3044 t = current_target.beneath;
3045 else
3046 t = find_default_run_target ("get OS data");
3047
3048 if (!t)
3049 return NULL;
3050
3051 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3052 }
3053
3054 /* Determine the current address space of thread PTID. */
3055
3056 struct address_space *
3057 target_thread_address_space (ptid_t ptid)
3058 {
3059 struct address_space *aspace;
3060 struct inferior *inf;
3061 struct target_ops *t;
3062
3063 for (t = current_target.beneath; t != NULL; t = t->beneath)
3064 {
3065 if (t->to_thread_address_space != NULL)
3066 {
3067 aspace = t->to_thread_address_space (t, ptid);
3068 gdb_assert (aspace);
3069
3070 if (targetdebug)
3071 fprintf_unfiltered (gdb_stdlog,
3072 "target_thread_address_space (%s) = %d\n",
3073 target_pid_to_str (ptid),
3074 address_space_num (aspace));
3075 return aspace;
3076 }
3077 }
3078
3079 /* Fall-back to the "main" address space of the inferior. */
3080 inf = find_inferior_pid (ptid_get_pid (ptid));
3081
3082 if (inf == NULL || inf->aspace == NULL)
3083 internal_error (__FILE__, __LINE__,
3084 _("Can't determine the current "
3085 "address space of thread %s\n"),
3086 target_pid_to_str (ptid));
3087
3088 return inf->aspace;
3089 }
3090
3091
3092 /* Target file operations. */
3093
3094 static struct target_ops *
3095 default_fileio_target (void)
3096 {
3097 /* If we're already connected to something that can perform
3098 file I/O, use it. Otherwise, try using the native target. */
3099 if (current_target.to_stratum >= process_stratum)
3100 return current_target.beneath;
3101 else
3102 return find_default_run_target ("file I/O");
3103 }
3104
3105 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3106 target file descriptor, or -1 if an error occurs (and set
3107 *TARGET_ERRNO). */
3108 int
3109 target_fileio_open (const char *filename, int flags, int mode,
3110 int *target_errno)
3111 {
3112 struct target_ops *t;
3113
3114 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3115 {
3116 if (t->to_fileio_open != NULL)
3117 {
3118 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
3119
3120 if (targetdebug)
3121 fprintf_unfiltered (gdb_stdlog,
3122 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3123 filename, flags, mode,
3124 fd, fd != -1 ? 0 : *target_errno);
3125 return fd;
3126 }
3127 }
3128
3129 *target_errno = FILEIO_ENOSYS;
3130 return -1;
3131 }
3132
3133 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3134 Return the number of bytes written, or -1 if an error occurs
3135 (and set *TARGET_ERRNO). */
3136 int
3137 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3138 ULONGEST offset, int *target_errno)
3139 {
3140 struct target_ops *t;
3141
3142 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3143 {
3144 if (t->to_fileio_pwrite != NULL)
3145 {
3146 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
3147 target_errno);
3148
3149 if (targetdebug)
3150 fprintf_unfiltered (gdb_stdlog,
3151 "target_fileio_pwrite (%d,...,%d,%s) "
3152 "= %d (%d)\n",
3153 fd, len, pulongest (offset),
3154 ret, ret != -1 ? 0 : *target_errno);
3155 return ret;
3156 }
3157 }
3158
3159 *target_errno = FILEIO_ENOSYS;
3160 return -1;
3161 }
3162
3163 /* Read up to LEN bytes FD on the target into READ_BUF.
3164 Return the number of bytes read, or -1 if an error occurs
3165 (and set *TARGET_ERRNO). */
3166 int
3167 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3168 ULONGEST offset, int *target_errno)
3169 {
3170 struct target_ops *t;
3171
3172 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3173 {
3174 if (t->to_fileio_pread != NULL)
3175 {
3176 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3177 target_errno);
3178
3179 if (targetdebug)
3180 fprintf_unfiltered (gdb_stdlog,
3181 "target_fileio_pread (%d,...,%d,%s) "
3182 "= %d (%d)\n",
3183 fd, len, pulongest (offset),
3184 ret, ret != -1 ? 0 : *target_errno);
3185 return ret;
3186 }
3187 }
3188
3189 *target_errno = FILEIO_ENOSYS;
3190 return -1;
3191 }
3192
3193 /* Close FD on the target. Return 0, or -1 if an error occurs
3194 (and set *TARGET_ERRNO). */
3195 int
3196 target_fileio_close (int fd, int *target_errno)
3197 {
3198 struct target_ops *t;
3199
3200 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3201 {
3202 if (t->to_fileio_close != NULL)
3203 {
3204 int ret = t->to_fileio_close (t, fd, target_errno);
3205
3206 if (targetdebug)
3207 fprintf_unfiltered (gdb_stdlog,
3208 "target_fileio_close (%d) = %d (%d)\n",
3209 fd, ret, ret != -1 ? 0 : *target_errno);
3210 return ret;
3211 }
3212 }
3213
3214 *target_errno = FILEIO_ENOSYS;
3215 return -1;
3216 }
3217
3218 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3219 occurs (and set *TARGET_ERRNO). */
3220 int
3221 target_fileio_unlink (const char *filename, int *target_errno)
3222 {
3223 struct target_ops *t;
3224
3225 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3226 {
3227 if (t->to_fileio_unlink != NULL)
3228 {
3229 int ret = t->to_fileio_unlink (t, filename, target_errno);
3230
3231 if (targetdebug)
3232 fprintf_unfiltered (gdb_stdlog,
3233 "target_fileio_unlink (%s) = %d (%d)\n",
3234 filename, ret, ret != -1 ? 0 : *target_errno);
3235 return ret;
3236 }
3237 }
3238
3239 *target_errno = FILEIO_ENOSYS;
3240 return -1;
3241 }
3242
3243 /* Read value of symbolic link FILENAME on the target. Return a
3244 null-terminated string allocated via xmalloc, or NULL if an error
3245 occurs (and set *TARGET_ERRNO). */
3246 char *
3247 target_fileio_readlink (const char *filename, int *target_errno)
3248 {
3249 struct target_ops *t;
3250
3251 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3252 {
3253 if (t->to_fileio_readlink != NULL)
3254 {
3255 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3256
3257 if (targetdebug)
3258 fprintf_unfiltered (gdb_stdlog,
3259 "target_fileio_readlink (%s) = %s (%d)\n",
3260 filename, ret? ret : "(nil)",
3261 ret? 0 : *target_errno);
3262 return ret;
3263 }
3264 }
3265
3266 *target_errno = FILEIO_ENOSYS;
3267 return NULL;
3268 }
3269
3270 static void
3271 target_fileio_close_cleanup (void *opaque)
3272 {
3273 int fd = *(int *) opaque;
3274 int target_errno;
3275
3276 target_fileio_close (fd, &target_errno);
3277 }
3278
3279 /* Read target file FILENAME. Store the result in *BUF_P and
3280 return the size of the transferred data. PADDING additional bytes are
3281 available in *BUF_P. This is a helper function for
3282 target_fileio_read_alloc; see the declaration of that function for more
3283 information. */
3284
3285 static LONGEST
3286 target_fileio_read_alloc_1 (const char *filename,
3287 gdb_byte **buf_p, int padding)
3288 {
3289 struct cleanup *close_cleanup;
3290 size_t buf_alloc, buf_pos;
3291 gdb_byte *buf;
3292 LONGEST n;
3293 int fd;
3294 int target_errno;
3295
3296 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3297 if (fd == -1)
3298 return -1;
3299
3300 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3301
3302 /* Start by reading up to 4K at a time. The target will throttle
3303 this number down if necessary. */
3304 buf_alloc = 4096;
3305 buf = xmalloc (buf_alloc);
3306 buf_pos = 0;
3307 while (1)
3308 {
3309 n = target_fileio_pread (fd, &buf[buf_pos],
3310 buf_alloc - buf_pos - padding, buf_pos,
3311 &target_errno);
3312 if (n < 0)
3313 {
3314 /* An error occurred. */
3315 do_cleanups (close_cleanup);
3316 xfree (buf);
3317 return -1;
3318 }
3319 else if (n == 0)
3320 {
3321 /* Read all there was. */
3322 do_cleanups (close_cleanup);
3323 if (buf_pos == 0)
3324 xfree (buf);
3325 else
3326 *buf_p = buf;
3327 return buf_pos;
3328 }
3329
3330 buf_pos += n;
3331
3332 /* If the buffer is filling up, expand it. */
3333 if (buf_alloc < buf_pos * 2)
3334 {
3335 buf_alloc *= 2;
3336 buf = xrealloc (buf, buf_alloc);
3337 }
3338
3339 QUIT;
3340 }
3341 }
3342
3343 /* Read target file FILENAME. Store the result in *BUF_P and return
3344 the size of the transferred data. See the declaration in "target.h"
3345 function for more information about the return value. */
3346
3347 LONGEST
3348 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3349 {
3350 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3351 }
3352
3353 /* Read target file FILENAME. The result is NUL-terminated and
3354 returned as a string, allocated using xmalloc. If an error occurs
3355 or the transfer is unsupported, NULL is returned. Empty objects
3356 are returned as allocated but empty strings. A warning is issued
3357 if the result contains any embedded NUL bytes. */
3358
3359 char *
3360 target_fileio_read_stralloc (const char *filename)
3361 {
3362 gdb_byte *buffer;
3363 char *bufstr;
3364 LONGEST i, transferred;
3365
3366 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3367 bufstr = (char *) buffer;
3368
3369 if (transferred < 0)
3370 return NULL;
3371
3372 if (transferred == 0)
3373 return xstrdup ("");
3374
3375 bufstr[transferred] = 0;
3376
3377 /* Check for embedded NUL bytes; but allow trailing NULs. */
3378 for (i = strlen (bufstr); i < transferred; i++)
3379 if (bufstr[i] != 0)
3380 {
3381 warning (_("target file %s "
3382 "contained unexpected null characters"),
3383 filename);
3384 break;
3385 }
3386
3387 return bufstr;
3388 }
3389
3390
3391 static int
3392 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3393 CORE_ADDR addr, int len)
3394 {
3395 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3396 }
3397
3398 static int
3399 default_watchpoint_addr_within_range (struct target_ops *target,
3400 CORE_ADDR addr,
3401 CORE_ADDR start, int length)
3402 {
3403 return addr >= start && addr < start + length;
3404 }
3405
3406 static struct gdbarch *
3407 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3408 {
3409 return target_gdbarch ();
3410 }
3411
3412 static int
3413 return_zero (void)
3414 {
3415 return 0;
3416 }
3417
3418 /*
3419 * Find the next target down the stack from the specified target.
3420 */
3421
3422 struct target_ops *
3423 find_target_beneath (struct target_ops *t)
3424 {
3425 return t->beneath;
3426 }
3427
3428 /* See target.h. */
3429
3430 struct target_ops *
3431 find_target_at (enum strata stratum)
3432 {
3433 struct target_ops *t;
3434
3435 for (t = current_target.beneath; t != NULL; t = t->beneath)
3436 if (t->to_stratum == stratum)
3437 return t;
3438
3439 return NULL;
3440 }
3441
3442 \f
3443 /* The inferior process has died. Long live the inferior! */
3444
3445 void
3446 generic_mourn_inferior (void)
3447 {
3448 ptid_t ptid;
3449
3450 ptid = inferior_ptid;
3451 inferior_ptid = null_ptid;
3452
3453 /* Mark breakpoints uninserted in case something tries to delete a
3454 breakpoint while we delete the inferior's threads (which would
3455 fail, since the inferior is long gone). */
3456 mark_breakpoints_out ();
3457
3458 if (!ptid_equal (ptid, null_ptid))
3459 {
3460 int pid = ptid_get_pid (ptid);
3461 exit_inferior (pid);
3462 }
3463
3464 /* Note this wipes step-resume breakpoints, so needs to be done
3465 after exit_inferior, which ends up referencing the step-resume
3466 breakpoints through clear_thread_inferior_resources. */
3467 breakpoint_init_inferior (inf_exited);
3468
3469 registers_changed ();
3470
3471 reopen_exec_file ();
3472 reinit_frame_cache ();
3473
3474 if (deprecated_detach_hook)
3475 deprecated_detach_hook ();
3476 }
3477 \f
3478 /* Convert a normal process ID to a string. Returns the string in a
3479 static buffer. */
3480
3481 char *
3482 normal_pid_to_str (ptid_t ptid)
3483 {
3484 static char buf[32];
3485
3486 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3487 return buf;
3488 }
3489
3490 static char *
3491 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3492 {
3493 return normal_pid_to_str (ptid);
3494 }
3495
3496 /* Error-catcher for target_find_memory_regions. */
3497 static int
3498 dummy_find_memory_regions (struct target_ops *self,
3499 find_memory_region_ftype ignore1, void *ignore2)
3500 {
3501 error (_("Command not implemented for this target."));
3502 return 0;
3503 }
3504
3505 /* Error-catcher for target_make_corefile_notes. */
3506 static char *
3507 dummy_make_corefile_notes (struct target_ops *self,
3508 bfd *ignore1, int *ignore2)
3509 {
3510 error (_("Command not implemented for this target."));
3511 return NULL;
3512 }
3513
3514 /* Set up the handful of non-empty slots needed by the dummy target
3515 vector. */
3516
3517 static void
3518 init_dummy_target (void)
3519 {
3520 dummy_target.to_shortname = "None";
3521 dummy_target.to_longname = "None";
3522 dummy_target.to_doc = "";
3523 dummy_target.to_create_inferior = find_default_create_inferior;
3524 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3525 dummy_target.to_supports_disable_randomization
3526 = find_default_supports_disable_randomization;
3527 dummy_target.to_stratum = dummy_stratum;
3528 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3529 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3530 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3531 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3532 dummy_target.to_has_execution
3533 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3534 dummy_target.to_magic = OPS_MAGIC;
3535
3536 install_dummy_methods (&dummy_target);
3537 }
3538 \f
3539 static void
3540 debug_to_open (char *args, int from_tty)
3541 {
3542 debug_target.to_open (args, from_tty);
3543
3544 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3545 }
3546
3547 void
3548 target_close (struct target_ops *targ)
3549 {
3550 gdb_assert (!target_is_pushed (targ));
3551
3552 if (targ->to_xclose != NULL)
3553 targ->to_xclose (targ);
3554 else if (targ->to_close != NULL)
3555 targ->to_close (targ);
3556
3557 if (targetdebug)
3558 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3559 }
3560
3561 void
3562 target_attach (char *args, int from_tty)
3563 {
3564 current_target.to_attach (&current_target, args, from_tty);
3565 if (targetdebug)
3566 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3567 args, from_tty);
3568 }
3569
3570 int
3571 target_thread_alive (ptid_t ptid)
3572 {
3573 struct target_ops *t;
3574
3575 for (t = current_target.beneath; t != NULL; t = t->beneath)
3576 {
3577 if (t->to_thread_alive != NULL)
3578 {
3579 int retval;
3580
3581 retval = t->to_thread_alive (t, ptid);
3582 if (targetdebug)
3583 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3584 ptid_get_pid (ptid), retval);
3585
3586 return retval;
3587 }
3588 }
3589
3590 return 0;
3591 }
3592
3593 void
3594 target_find_new_threads (void)
3595 {
3596 current_target.to_find_new_threads (&current_target);
3597 if (targetdebug)
3598 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3599 }
3600
3601 void
3602 target_stop (ptid_t ptid)
3603 {
3604 if (!may_stop)
3605 {
3606 warning (_("May not interrupt or stop the target, ignoring attempt"));
3607 return;
3608 }
3609
3610 (*current_target.to_stop) (&current_target, ptid);
3611 }
3612
3613 static void
3614 debug_to_post_attach (struct target_ops *self, int pid)
3615 {
3616 debug_target.to_post_attach (&debug_target, pid);
3617
3618 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3619 }
3620
3621 /* Concatenate ELEM to LIST, a comma separate list, and return the
3622 result. The LIST incoming argument is released. */
3623
3624 static char *
3625 str_comma_list_concat_elem (char *list, const char *elem)
3626 {
3627 if (list == NULL)
3628 return xstrdup (elem);
3629 else
3630 return reconcat (list, list, ", ", elem, (char *) NULL);
3631 }
3632
3633 /* Helper for target_options_to_string. If OPT is present in
3634 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3635 Returns the new resulting string. OPT is removed from
3636 TARGET_OPTIONS. */
3637
3638 static char *
3639 do_option (int *target_options, char *ret,
3640 int opt, char *opt_str)
3641 {
3642 if ((*target_options & opt) != 0)
3643 {
3644 ret = str_comma_list_concat_elem (ret, opt_str);
3645 *target_options &= ~opt;
3646 }
3647
3648 return ret;
3649 }
3650
3651 char *
3652 target_options_to_string (int target_options)
3653 {
3654 char *ret = NULL;
3655
3656 #define DO_TARG_OPTION(OPT) \
3657 ret = do_option (&target_options, ret, OPT, #OPT)
3658
3659 DO_TARG_OPTION (TARGET_WNOHANG);
3660
3661 if (target_options != 0)
3662 ret = str_comma_list_concat_elem (ret, "unknown???");
3663
3664 if (ret == NULL)
3665 ret = xstrdup ("");
3666 return ret;
3667 }
3668
3669 static void
3670 debug_print_register (const char * func,
3671 struct regcache *regcache, int regno)
3672 {
3673 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3674
3675 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3676 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3677 && gdbarch_register_name (gdbarch, regno) != NULL
3678 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3679 fprintf_unfiltered (gdb_stdlog, "(%s)",
3680 gdbarch_register_name (gdbarch, regno));
3681 else
3682 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3683 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3684 {
3685 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3686 int i, size = register_size (gdbarch, regno);
3687 gdb_byte buf[MAX_REGISTER_SIZE];
3688
3689 regcache_raw_collect (regcache, regno, buf);
3690 fprintf_unfiltered (gdb_stdlog, " = ");
3691 for (i = 0; i < size; i++)
3692 {
3693 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3694 }
3695 if (size <= sizeof (LONGEST))
3696 {
3697 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3698
3699 fprintf_unfiltered (gdb_stdlog, " %s %s",
3700 core_addr_to_string_nz (val), plongest (val));
3701 }
3702 }
3703 fprintf_unfiltered (gdb_stdlog, "\n");
3704 }
3705
3706 void
3707 target_fetch_registers (struct regcache *regcache, int regno)
3708 {
3709 current_target.to_fetch_registers (&current_target, regcache, regno);
3710 if (targetdebug)
3711 debug_print_register ("target_fetch_registers", regcache, regno);
3712 }
3713
3714 void
3715 target_store_registers (struct regcache *regcache, int regno)
3716 {
3717 struct target_ops *t;
3718
3719 if (!may_write_registers)
3720 error (_("Writing to registers is not allowed (regno %d)"), regno);
3721
3722 current_target.to_store_registers (&current_target, regcache, regno);
3723 if (targetdebug)
3724 {
3725 debug_print_register ("target_store_registers", regcache, regno);
3726 }
3727 }
3728
3729 int
3730 target_core_of_thread (ptid_t ptid)
3731 {
3732 struct target_ops *t;
3733
3734 for (t = current_target.beneath; t != NULL; t = t->beneath)
3735 {
3736 if (t->to_core_of_thread != NULL)
3737 {
3738 int retval = t->to_core_of_thread (t, ptid);
3739
3740 if (targetdebug)
3741 fprintf_unfiltered (gdb_stdlog,
3742 "target_core_of_thread (%d) = %d\n",
3743 ptid_get_pid (ptid), retval);
3744 return retval;
3745 }
3746 }
3747
3748 return -1;
3749 }
3750
3751 int
3752 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3753 {
3754 struct target_ops *t;
3755
3756 for (t = current_target.beneath; t != NULL; t = t->beneath)
3757 {
3758 if (t->to_verify_memory != NULL)
3759 {
3760 int retval = t->to_verify_memory (t, data, memaddr, size);
3761
3762 if (targetdebug)
3763 fprintf_unfiltered (gdb_stdlog,
3764 "target_verify_memory (%s, %s) = %d\n",
3765 paddress (target_gdbarch (), memaddr),
3766 pulongest (size),
3767 retval);
3768 return retval;
3769 }
3770 }
3771
3772 tcomplain ();
3773 }
3774
3775 /* The documentation for this function is in its prototype declaration in
3776 target.h. */
3777
3778 int
3779 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3780 {
3781 int ret;
3782
3783 ret = current_target.to_insert_mask_watchpoint (&current_target,
3784 addr, mask, rw);
3785
3786 if (targetdebug)
3787 fprintf_unfiltered (gdb_stdlog, "\
3788 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3789 core_addr_to_string (addr),
3790 core_addr_to_string (mask), rw, ret);
3791
3792 return ret;
3793 }
3794
3795 /* The documentation for this function is in its prototype declaration in
3796 target.h. */
3797
3798 int
3799 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3800 {
3801 int ret;
3802
3803 ret = current_target.to_remove_mask_watchpoint (&current_target,
3804 addr, mask, rw);
3805
3806 if (targetdebug)
3807 fprintf_unfiltered (gdb_stdlog, "\
3808 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3809 core_addr_to_string (addr),
3810 core_addr_to_string (mask), rw, ret);
3811
3812 return ret;
3813 }
3814
3815 /* The documentation for this function is in its prototype declaration
3816 in target.h. */
3817
3818 int
3819 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3820 {
3821 return current_target.to_masked_watch_num_registers (&current_target,
3822 addr, mask);
3823 }
3824
3825 /* The documentation for this function is in its prototype declaration
3826 in target.h. */
3827
3828 int
3829 target_ranged_break_num_registers (void)
3830 {
3831 return current_target.to_ranged_break_num_registers (&current_target);
3832 }
3833
3834 /* See target.h. */
3835
3836 struct btrace_target_info *
3837 target_enable_btrace (ptid_t ptid)
3838 {
3839 struct target_ops *t;
3840
3841 for (t = current_target.beneath; t != NULL; t = t->beneath)
3842 if (t->to_enable_btrace != NULL)
3843 return t->to_enable_btrace (t, ptid);
3844
3845 tcomplain ();
3846 return NULL;
3847 }
3848
3849 /* See target.h. */
3850
3851 void
3852 target_disable_btrace (struct btrace_target_info *btinfo)
3853 {
3854 struct target_ops *t;
3855
3856 for (t = current_target.beneath; t != NULL; t = t->beneath)
3857 if (t->to_disable_btrace != NULL)
3858 {
3859 t->to_disable_btrace (t, btinfo);
3860 return;
3861 }
3862
3863 tcomplain ();
3864 }
3865
3866 /* See target.h. */
3867
3868 void
3869 target_teardown_btrace (struct btrace_target_info *btinfo)
3870 {
3871 struct target_ops *t;
3872
3873 for (t = current_target.beneath; t != NULL; t = t->beneath)
3874 if (t->to_teardown_btrace != NULL)
3875 {
3876 t->to_teardown_btrace (t, btinfo);
3877 return;
3878 }
3879
3880 tcomplain ();
3881 }
3882
3883 /* See target.h. */
3884
3885 enum btrace_error
3886 target_read_btrace (VEC (btrace_block_s) **btrace,
3887 struct btrace_target_info *btinfo,
3888 enum btrace_read_type type)
3889 {
3890 struct target_ops *t;
3891
3892 for (t = current_target.beneath; t != NULL; t = t->beneath)
3893 if (t->to_read_btrace != NULL)
3894 return t->to_read_btrace (t, btrace, btinfo, type);
3895
3896 tcomplain ();
3897 return BTRACE_ERR_NOT_SUPPORTED;
3898 }
3899
3900 /* See target.h. */
3901
3902 void
3903 target_stop_recording (void)
3904 {
3905 struct target_ops *t;
3906
3907 for (t = current_target.beneath; t != NULL; t = t->beneath)
3908 if (t->to_stop_recording != NULL)
3909 {
3910 t->to_stop_recording (t);
3911 return;
3912 }
3913
3914 /* This is optional. */
3915 }
3916
3917 /* See target.h. */
3918
3919 void
3920 target_info_record (void)
3921 {
3922 struct target_ops *t;
3923
3924 for (t = current_target.beneath; t != NULL; t = t->beneath)
3925 if (t->to_info_record != NULL)
3926 {
3927 t->to_info_record (t);
3928 return;
3929 }
3930
3931 tcomplain ();
3932 }
3933
3934 /* See target.h. */
3935
3936 void
3937 target_save_record (const char *filename)
3938 {
3939 struct target_ops *t;
3940
3941 for (t = current_target.beneath; t != NULL; t = t->beneath)
3942 if (t->to_save_record != NULL)
3943 {
3944 t->to_save_record (t, filename);
3945 return;
3946 }
3947
3948 tcomplain ();
3949 }
3950
3951 /* See target.h. */
3952
3953 int
3954 target_supports_delete_record (void)
3955 {
3956 struct target_ops *t;
3957
3958 for (t = current_target.beneath; t != NULL; t = t->beneath)
3959 if (t->to_delete_record != NULL)
3960 return 1;
3961
3962 return 0;
3963 }
3964
3965 /* See target.h. */
3966
3967 void
3968 target_delete_record (void)
3969 {
3970 struct target_ops *t;
3971
3972 for (t = current_target.beneath; t != NULL; t = t->beneath)
3973 if (t->to_delete_record != NULL)
3974 {
3975 t->to_delete_record (t);
3976 return;
3977 }
3978
3979 tcomplain ();
3980 }
3981
3982 /* See target.h. */
3983
3984 int
3985 target_record_is_replaying (void)
3986 {
3987 struct target_ops *t;
3988
3989 for (t = current_target.beneath; t != NULL; t = t->beneath)
3990 if (t->to_record_is_replaying != NULL)
3991 return t->to_record_is_replaying (t);
3992
3993 return 0;
3994 }
3995
3996 /* See target.h. */
3997
3998 void
3999 target_goto_record_begin (void)
4000 {
4001 struct target_ops *t;
4002
4003 for (t = current_target.beneath; t != NULL; t = t->beneath)
4004 if (t->to_goto_record_begin != NULL)
4005 {
4006 t->to_goto_record_begin (t);
4007 return;
4008 }
4009
4010 tcomplain ();
4011 }
4012
4013 /* See target.h. */
4014
4015 void
4016 target_goto_record_end (void)
4017 {
4018 struct target_ops *t;
4019
4020 for (t = current_target.beneath; t != NULL; t = t->beneath)
4021 if (t->to_goto_record_end != NULL)
4022 {
4023 t->to_goto_record_end (t);
4024 return;
4025 }
4026
4027 tcomplain ();
4028 }
4029
4030 /* See target.h. */
4031
4032 void
4033 target_goto_record (ULONGEST insn)
4034 {
4035 struct target_ops *t;
4036
4037 for (t = current_target.beneath; t != NULL; t = t->beneath)
4038 if (t->to_goto_record != NULL)
4039 {
4040 t->to_goto_record (t, insn);
4041 return;
4042 }
4043
4044 tcomplain ();
4045 }
4046
4047 /* See target.h. */
4048
4049 void
4050 target_insn_history (int size, int flags)
4051 {
4052 struct target_ops *t;
4053
4054 for (t = current_target.beneath; t != NULL; t = t->beneath)
4055 if (t->to_insn_history != NULL)
4056 {
4057 t->to_insn_history (t, size, flags);
4058 return;
4059 }
4060
4061 tcomplain ();
4062 }
4063
4064 /* See target.h. */
4065
4066 void
4067 target_insn_history_from (ULONGEST from, int size, int flags)
4068 {
4069 struct target_ops *t;
4070
4071 for (t = current_target.beneath; t != NULL; t = t->beneath)
4072 if (t->to_insn_history_from != NULL)
4073 {
4074 t->to_insn_history_from (t, from, size, flags);
4075 return;
4076 }
4077
4078 tcomplain ();
4079 }
4080
4081 /* See target.h. */
4082
4083 void
4084 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4085 {
4086 struct target_ops *t;
4087
4088 for (t = current_target.beneath; t != NULL; t = t->beneath)
4089 if (t->to_insn_history_range != NULL)
4090 {
4091 t->to_insn_history_range (t, begin, end, flags);
4092 return;
4093 }
4094
4095 tcomplain ();
4096 }
4097
4098 /* See target.h. */
4099
4100 void
4101 target_call_history (int size, int flags)
4102 {
4103 struct target_ops *t;
4104
4105 for (t = current_target.beneath; t != NULL; t = t->beneath)
4106 if (t->to_call_history != NULL)
4107 {
4108 t->to_call_history (t, size, flags);
4109 return;
4110 }
4111
4112 tcomplain ();
4113 }
4114
4115 /* See target.h. */
4116
4117 void
4118 target_call_history_from (ULONGEST begin, int size, int flags)
4119 {
4120 struct target_ops *t;
4121
4122 for (t = current_target.beneath; t != NULL; t = t->beneath)
4123 if (t->to_call_history_from != NULL)
4124 {
4125 t->to_call_history_from (t, begin, size, flags);
4126 return;
4127 }
4128
4129 tcomplain ();
4130 }
4131
4132 /* See target.h. */
4133
4134 void
4135 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4136 {
4137 struct target_ops *t;
4138
4139 for (t = current_target.beneath; t != NULL; t = t->beneath)
4140 if (t->to_call_history_range != NULL)
4141 {
4142 t->to_call_history_range (t, begin, end, flags);
4143 return;
4144 }
4145
4146 tcomplain ();
4147 }
4148
4149 static void
4150 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4151 {
4152 debug_target.to_prepare_to_store (&debug_target, regcache);
4153
4154 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4155 }
4156
4157 /* See target.h. */
4158
4159 const struct frame_unwind *
4160 target_get_unwinder (void)
4161 {
4162 struct target_ops *t;
4163
4164 for (t = current_target.beneath; t != NULL; t = t->beneath)
4165 if (t->to_get_unwinder != NULL)
4166 return t->to_get_unwinder;
4167
4168 return NULL;
4169 }
4170
4171 /* See target.h. */
4172
4173 const struct frame_unwind *
4174 target_get_tailcall_unwinder (void)
4175 {
4176 struct target_ops *t;
4177
4178 for (t = current_target.beneath; t != NULL; t = t->beneath)
4179 if (t->to_get_tailcall_unwinder != NULL)
4180 return t->to_get_tailcall_unwinder;
4181
4182 return NULL;
4183 }
4184
4185 /* See target.h. */
4186
4187 CORE_ADDR
4188 forward_target_decr_pc_after_break (struct target_ops *ops,
4189 struct gdbarch *gdbarch)
4190 {
4191 for (; ops != NULL; ops = ops->beneath)
4192 if (ops->to_decr_pc_after_break != NULL)
4193 return ops->to_decr_pc_after_break (ops, gdbarch);
4194
4195 return gdbarch_decr_pc_after_break (gdbarch);
4196 }
4197
4198 /* See target.h. */
4199
4200 CORE_ADDR
4201 target_decr_pc_after_break (struct gdbarch *gdbarch)
4202 {
4203 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4204 }
4205
4206 static int
4207 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4208 int write, struct mem_attrib *attrib,
4209 struct target_ops *target)
4210 {
4211 int retval;
4212
4213 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4214 attrib, target);
4215
4216 fprintf_unfiltered (gdb_stdlog,
4217 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4218 paddress (target_gdbarch (), memaddr), len,
4219 write ? "write" : "read", retval);
4220
4221 if (retval > 0)
4222 {
4223 int i;
4224
4225 fputs_unfiltered (", bytes =", gdb_stdlog);
4226 for (i = 0; i < retval; i++)
4227 {
4228 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4229 {
4230 if (targetdebug < 2 && i > 0)
4231 {
4232 fprintf_unfiltered (gdb_stdlog, " ...");
4233 break;
4234 }
4235 fprintf_unfiltered (gdb_stdlog, "\n");
4236 }
4237
4238 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4239 }
4240 }
4241
4242 fputc_unfiltered ('\n', gdb_stdlog);
4243
4244 return retval;
4245 }
4246
4247 static void
4248 debug_to_files_info (struct target_ops *target)
4249 {
4250 debug_target.to_files_info (target);
4251
4252 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4253 }
4254
4255 static int
4256 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4257 struct bp_target_info *bp_tgt)
4258 {
4259 int retval;
4260
4261 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4262
4263 fprintf_unfiltered (gdb_stdlog,
4264 "target_insert_breakpoint (%s, xxx) = %ld\n",
4265 core_addr_to_string (bp_tgt->placed_address),
4266 (unsigned long) retval);
4267 return retval;
4268 }
4269
4270 static int
4271 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4272 struct bp_target_info *bp_tgt)
4273 {
4274 int retval;
4275
4276 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4277
4278 fprintf_unfiltered (gdb_stdlog,
4279 "target_remove_breakpoint (%s, xxx) = %ld\n",
4280 core_addr_to_string (bp_tgt->placed_address),
4281 (unsigned long) retval);
4282 return retval;
4283 }
4284
4285 static int
4286 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4287 int type, int cnt, int from_tty)
4288 {
4289 int retval;
4290
4291 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4292 type, cnt, from_tty);
4293
4294 fprintf_unfiltered (gdb_stdlog,
4295 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4296 (unsigned long) type,
4297 (unsigned long) cnt,
4298 (unsigned long) from_tty,
4299 (unsigned long) retval);
4300 return retval;
4301 }
4302
4303 static int
4304 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4305 CORE_ADDR addr, int len)
4306 {
4307 CORE_ADDR retval;
4308
4309 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4310 addr, len);
4311
4312 fprintf_unfiltered (gdb_stdlog,
4313 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4314 core_addr_to_string (addr), (unsigned long) len,
4315 core_addr_to_string (retval));
4316 return retval;
4317 }
4318
4319 static int
4320 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4321 CORE_ADDR addr, int len, int rw,
4322 struct expression *cond)
4323 {
4324 int retval;
4325
4326 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4327 addr, len,
4328 rw, cond);
4329
4330 fprintf_unfiltered (gdb_stdlog,
4331 "target_can_accel_watchpoint_condition "
4332 "(%s, %d, %d, %s) = %ld\n",
4333 core_addr_to_string (addr), len, rw,
4334 host_address_to_string (cond), (unsigned long) retval);
4335 return retval;
4336 }
4337
4338 static int
4339 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4340 {
4341 int retval;
4342
4343 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4344
4345 fprintf_unfiltered (gdb_stdlog,
4346 "target_stopped_by_watchpoint () = %ld\n",
4347 (unsigned long) retval);
4348 return retval;
4349 }
4350
4351 static int
4352 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4353 {
4354 int retval;
4355
4356 retval = debug_target.to_stopped_data_address (target, addr);
4357
4358 fprintf_unfiltered (gdb_stdlog,
4359 "target_stopped_data_address ([%s]) = %ld\n",
4360 core_addr_to_string (*addr),
4361 (unsigned long)retval);
4362 return retval;
4363 }
4364
4365 static int
4366 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4367 CORE_ADDR addr,
4368 CORE_ADDR start, int length)
4369 {
4370 int retval;
4371
4372 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4373 start, length);
4374
4375 fprintf_filtered (gdb_stdlog,
4376 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4377 core_addr_to_string (addr), core_addr_to_string (start),
4378 length, retval);
4379 return retval;
4380 }
4381
4382 static int
4383 debug_to_insert_hw_breakpoint (struct target_ops *self,
4384 struct gdbarch *gdbarch,
4385 struct bp_target_info *bp_tgt)
4386 {
4387 int retval;
4388
4389 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4390 gdbarch, bp_tgt);
4391
4392 fprintf_unfiltered (gdb_stdlog,
4393 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4394 core_addr_to_string (bp_tgt->placed_address),
4395 (unsigned long) retval);
4396 return retval;
4397 }
4398
4399 static int
4400 debug_to_remove_hw_breakpoint (struct target_ops *self,
4401 struct gdbarch *gdbarch,
4402 struct bp_target_info *bp_tgt)
4403 {
4404 int retval;
4405
4406 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4407 gdbarch, bp_tgt);
4408
4409 fprintf_unfiltered (gdb_stdlog,
4410 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4411 core_addr_to_string (bp_tgt->placed_address),
4412 (unsigned long) retval);
4413 return retval;
4414 }
4415
4416 static int
4417 debug_to_insert_watchpoint (struct target_ops *self,
4418 CORE_ADDR addr, int len, int type,
4419 struct expression *cond)
4420 {
4421 int retval;
4422
4423 retval = debug_target.to_insert_watchpoint (&debug_target,
4424 addr, len, type, cond);
4425
4426 fprintf_unfiltered (gdb_stdlog,
4427 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4428 core_addr_to_string (addr), len, type,
4429 host_address_to_string (cond), (unsigned long) retval);
4430 return retval;
4431 }
4432
4433 static int
4434 debug_to_remove_watchpoint (struct target_ops *self,
4435 CORE_ADDR addr, int len, int type,
4436 struct expression *cond)
4437 {
4438 int retval;
4439
4440 retval = debug_target.to_remove_watchpoint (&debug_target,
4441 addr, len, type, cond);
4442
4443 fprintf_unfiltered (gdb_stdlog,
4444 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4445 core_addr_to_string (addr), len, type,
4446 host_address_to_string (cond), (unsigned long) retval);
4447 return retval;
4448 }
4449
4450 static void
4451 debug_to_terminal_init (struct target_ops *self)
4452 {
4453 debug_target.to_terminal_init (&debug_target);
4454
4455 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4456 }
4457
4458 static void
4459 debug_to_terminal_inferior (struct target_ops *self)
4460 {
4461 debug_target.to_terminal_inferior (&debug_target);
4462
4463 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4464 }
4465
4466 static void
4467 debug_to_terminal_ours_for_output (struct target_ops *self)
4468 {
4469 debug_target.to_terminal_ours_for_output (&debug_target);
4470
4471 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4472 }
4473
4474 static void
4475 debug_to_terminal_ours (struct target_ops *self)
4476 {
4477 debug_target.to_terminal_ours (&debug_target);
4478
4479 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4480 }
4481
4482 static void
4483 debug_to_terminal_save_ours (struct target_ops *self)
4484 {
4485 debug_target.to_terminal_save_ours (&debug_target);
4486
4487 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4488 }
4489
4490 static void
4491 debug_to_terminal_info (struct target_ops *self,
4492 const char *arg, int from_tty)
4493 {
4494 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4495
4496 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4497 from_tty);
4498 }
4499
4500 static void
4501 debug_to_load (struct target_ops *self, char *args, int from_tty)
4502 {
4503 debug_target.to_load (&debug_target, args, from_tty);
4504
4505 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4506 }
4507
4508 static void
4509 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4510 {
4511 debug_target.to_post_startup_inferior (&debug_target, ptid);
4512
4513 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4514 ptid_get_pid (ptid));
4515 }
4516
4517 static int
4518 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4519 {
4520 int retval;
4521
4522 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4523
4524 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4525 pid, retval);
4526
4527 return retval;
4528 }
4529
4530 static int
4531 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4532 {
4533 int retval;
4534
4535 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4536
4537 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4538 pid, retval);
4539
4540 return retval;
4541 }
4542
4543 static int
4544 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4545 {
4546 int retval;
4547
4548 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4549
4550 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4551 pid, retval);
4552
4553 return retval;
4554 }
4555
4556 static int
4557 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4558 {
4559 int retval;
4560
4561 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4562
4563 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4564 pid, retval);
4565
4566 return retval;
4567 }
4568
4569 static int
4570 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4571 {
4572 int retval;
4573
4574 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4575
4576 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4577 pid, retval);
4578
4579 return retval;
4580 }
4581
4582 static int
4583 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4584 {
4585 int retval;
4586
4587 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4588
4589 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4590 pid, retval);
4591
4592 return retval;
4593 }
4594
4595 static int
4596 debug_to_has_exited (struct target_ops *self,
4597 int pid, int wait_status, int *exit_status)
4598 {
4599 int has_exited;
4600
4601 has_exited = debug_target.to_has_exited (&debug_target,
4602 pid, wait_status, exit_status);
4603
4604 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4605 pid, wait_status, *exit_status, has_exited);
4606
4607 return has_exited;
4608 }
4609
4610 static int
4611 debug_to_can_run (struct target_ops *self)
4612 {
4613 int retval;
4614
4615 retval = debug_target.to_can_run (&debug_target);
4616
4617 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4618
4619 return retval;
4620 }
4621
4622 static struct gdbarch *
4623 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4624 {
4625 struct gdbarch *retval;
4626
4627 retval = debug_target.to_thread_architecture (ops, ptid);
4628
4629 fprintf_unfiltered (gdb_stdlog,
4630 "target_thread_architecture (%s) = %s [%s]\n",
4631 target_pid_to_str (ptid),
4632 host_address_to_string (retval),
4633 gdbarch_bfd_arch_info (retval)->printable_name);
4634 return retval;
4635 }
4636
4637 static void
4638 debug_to_stop (struct target_ops *self, ptid_t ptid)
4639 {
4640 debug_target.to_stop (&debug_target, ptid);
4641
4642 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4643 target_pid_to_str (ptid));
4644 }
4645
4646 static void
4647 debug_to_rcmd (struct target_ops *self, char *command,
4648 struct ui_file *outbuf)
4649 {
4650 debug_target.to_rcmd (&debug_target, command, outbuf);
4651 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4652 }
4653
4654 static char *
4655 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4656 {
4657 char *exec_file;
4658
4659 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4660
4661 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4662 pid, exec_file);
4663
4664 return exec_file;
4665 }
4666
4667 static void
4668 setup_target_debug (void)
4669 {
4670 memcpy (&debug_target, &current_target, sizeof debug_target);
4671
4672 current_target.to_open = debug_to_open;
4673 current_target.to_post_attach = debug_to_post_attach;
4674 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4675 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4676 current_target.to_files_info = debug_to_files_info;
4677 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4678 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4679 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4680 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4681 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4682 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4683 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4684 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4685 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4686 current_target.to_watchpoint_addr_within_range
4687 = debug_to_watchpoint_addr_within_range;
4688 current_target.to_region_ok_for_hw_watchpoint
4689 = debug_to_region_ok_for_hw_watchpoint;
4690 current_target.to_can_accel_watchpoint_condition
4691 = debug_to_can_accel_watchpoint_condition;
4692 current_target.to_terminal_init = debug_to_terminal_init;
4693 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4694 current_target.to_terminal_ours_for_output
4695 = debug_to_terminal_ours_for_output;
4696 current_target.to_terminal_ours = debug_to_terminal_ours;
4697 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4698 current_target.to_terminal_info = debug_to_terminal_info;
4699 current_target.to_load = debug_to_load;
4700 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4701 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4702 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4703 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4704 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4705 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4706 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4707 current_target.to_has_exited = debug_to_has_exited;
4708 current_target.to_can_run = debug_to_can_run;
4709 current_target.to_stop = debug_to_stop;
4710 current_target.to_rcmd = debug_to_rcmd;
4711 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4712 current_target.to_thread_architecture = debug_to_thread_architecture;
4713 }
4714 \f
4715
4716 static char targ_desc[] =
4717 "Names of targets and files being debugged.\nShows the entire \
4718 stack of targets currently in use (including the exec-file,\n\
4719 core-file, and process, if any), as well as the symbol file name.";
4720
4721 static void
4722 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
4723 {
4724 error (_("\"monitor\" command not supported by this target."));
4725 }
4726
4727 static void
4728 do_monitor_command (char *cmd,
4729 int from_tty)
4730 {
4731 target_rcmd (cmd, gdb_stdtarg);
4732 }
4733
4734 /* Print the name of each layers of our target stack. */
4735
4736 static void
4737 maintenance_print_target_stack (char *cmd, int from_tty)
4738 {
4739 struct target_ops *t;
4740
4741 printf_filtered (_("The current target stack is:\n"));
4742
4743 for (t = target_stack; t != NULL; t = t->beneath)
4744 {
4745 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4746 }
4747 }
4748
4749 /* Controls if async mode is permitted. */
4750 int target_async_permitted = 0;
4751
4752 /* The set command writes to this variable. If the inferior is
4753 executing, target_async_permitted is *not* updated. */
4754 static int target_async_permitted_1 = 0;
4755
4756 static void
4757 set_target_async_command (char *args, int from_tty,
4758 struct cmd_list_element *c)
4759 {
4760 if (have_live_inferiors ())
4761 {
4762 target_async_permitted_1 = target_async_permitted;
4763 error (_("Cannot change this setting while the inferior is running."));
4764 }
4765
4766 target_async_permitted = target_async_permitted_1;
4767 }
4768
4769 static void
4770 show_target_async_command (struct ui_file *file, int from_tty,
4771 struct cmd_list_element *c,
4772 const char *value)
4773 {
4774 fprintf_filtered (file,
4775 _("Controlling the inferior in "
4776 "asynchronous mode is %s.\n"), value);
4777 }
4778
4779 /* Temporary copies of permission settings. */
4780
4781 static int may_write_registers_1 = 1;
4782 static int may_write_memory_1 = 1;
4783 static int may_insert_breakpoints_1 = 1;
4784 static int may_insert_tracepoints_1 = 1;
4785 static int may_insert_fast_tracepoints_1 = 1;
4786 static int may_stop_1 = 1;
4787
4788 /* Make the user-set values match the real values again. */
4789
4790 void
4791 update_target_permissions (void)
4792 {
4793 may_write_registers_1 = may_write_registers;
4794 may_write_memory_1 = may_write_memory;
4795 may_insert_breakpoints_1 = may_insert_breakpoints;
4796 may_insert_tracepoints_1 = may_insert_tracepoints;
4797 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4798 may_stop_1 = may_stop;
4799 }
4800
4801 /* The one function handles (most of) the permission flags in the same
4802 way. */
4803
4804 static void
4805 set_target_permissions (char *args, int from_tty,
4806 struct cmd_list_element *c)
4807 {
4808 if (target_has_execution)
4809 {
4810 update_target_permissions ();
4811 error (_("Cannot change this setting while the inferior is running."));
4812 }
4813
4814 /* Make the real values match the user-changed values. */
4815 may_write_registers = may_write_registers_1;
4816 may_insert_breakpoints = may_insert_breakpoints_1;
4817 may_insert_tracepoints = may_insert_tracepoints_1;
4818 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4819 may_stop = may_stop_1;
4820 update_observer_mode ();
4821 }
4822
4823 /* Set memory write permission independently of observer mode. */
4824
4825 static void
4826 set_write_memory_permission (char *args, int from_tty,
4827 struct cmd_list_element *c)
4828 {
4829 /* Make the real values match the user-changed values. */
4830 may_write_memory = may_write_memory_1;
4831 update_observer_mode ();
4832 }
4833
4834
4835 void
4836 initialize_targets (void)
4837 {
4838 init_dummy_target ();
4839 push_target (&dummy_target);
4840
4841 add_info ("target", target_info, targ_desc);
4842 add_info ("files", target_info, targ_desc);
4843
4844 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4845 Set target debugging."), _("\
4846 Show target debugging."), _("\
4847 When non-zero, target debugging is enabled. Higher numbers are more\n\
4848 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4849 command."),
4850 NULL,
4851 show_targetdebug,
4852 &setdebuglist, &showdebuglist);
4853
4854 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4855 &trust_readonly, _("\
4856 Set mode for reading from readonly sections."), _("\
4857 Show mode for reading from readonly sections."), _("\
4858 When this mode is on, memory reads from readonly sections (such as .text)\n\
4859 will be read from the object file instead of from the target. This will\n\
4860 result in significant performance improvement for remote targets."),
4861 NULL,
4862 show_trust_readonly,
4863 &setlist, &showlist);
4864
4865 add_com ("monitor", class_obscure, do_monitor_command,
4866 _("Send a command to the remote monitor (remote targets only)."));
4867
4868 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4869 _("Print the name of each layer of the internal target stack."),
4870 &maintenanceprintlist);
4871
4872 add_setshow_boolean_cmd ("target-async", no_class,
4873 &target_async_permitted_1, _("\
4874 Set whether gdb controls the inferior in asynchronous mode."), _("\
4875 Show whether gdb controls the inferior in asynchronous mode."), _("\
4876 Tells gdb whether to control the inferior in asynchronous mode."),
4877 set_target_async_command,
4878 show_target_async_command,
4879 &setlist,
4880 &showlist);
4881
4882 add_setshow_boolean_cmd ("may-write-registers", class_support,
4883 &may_write_registers_1, _("\
4884 Set permission to write into registers."), _("\
4885 Show permission to write into registers."), _("\
4886 When this permission is on, GDB may write into the target's registers.\n\
4887 Otherwise, any sort of write attempt will result in an error."),
4888 set_target_permissions, NULL,
4889 &setlist, &showlist);
4890
4891 add_setshow_boolean_cmd ("may-write-memory", class_support,
4892 &may_write_memory_1, _("\
4893 Set permission to write into target memory."), _("\
4894 Show permission to write into target memory."), _("\
4895 When this permission is on, GDB may write into the target's memory.\n\
4896 Otherwise, any sort of write attempt will result in an error."),
4897 set_write_memory_permission, NULL,
4898 &setlist, &showlist);
4899
4900 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4901 &may_insert_breakpoints_1, _("\
4902 Set permission to insert breakpoints in the target."), _("\
4903 Show permission to insert breakpoints in the target."), _("\
4904 When this permission is on, GDB may insert breakpoints in the program.\n\
4905 Otherwise, any sort of insertion attempt will result in an error."),
4906 set_target_permissions, NULL,
4907 &setlist, &showlist);
4908
4909 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4910 &may_insert_tracepoints_1, _("\
4911 Set permission to insert tracepoints in the target."), _("\
4912 Show permission to insert tracepoints in the target."), _("\
4913 When this permission is on, GDB may insert tracepoints in the program.\n\
4914 Otherwise, any sort of insertion attempt will result in an error."),
4915 set_target_permissions, NULL,
4916 &setlist, &showlist);
4917
4918 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4919 &may_insert_fast_tracepoints_1, _("\
4920 Set permission to insert fast tracepoints in the target."), _("\
4921 Show permission to insert fast tracepoints in the target."), _("\
4922 When this permission is on, GDB may insert fast tracepoints.\n\
4923 Otherwise, any sort of insertion attempt will result in an error."),
4924 set_target_permissions, NULL,
4925 &setlist, &showlist);
4926
4927 add_setshow_boolean_cmd ("may-interrupt", class_support,
4928 &may_stop_1, _("\
4929 Set permission to interrupt or signal the target."), _("\
4930 Show permission to interrupt or signal the target."), _("\
4931 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4932 Otherwise, any attempt to interrupt or stop will be ignored."),
4933 set_target_permissions, NULL,
4934 &setlist, &showlist);
4935 }