convert to_use_agent
[binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (struct target_ops *, const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
59
60 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
61 long lwp, long tid);
62
63 static void tcomplain (void) ATTRIBUTE_NORETURN;
64
65 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
66
67 static int return_zero (void);
68
69 void target_ignore (void);
70
71 static void target_command (char *, int);
72
73 static struct target_ops *find_default_run_target (char *);
74
75 static target_xfer_partial_ftype default_xfer_partial;
76
77 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
78 ptid_t ptid);
79
80 static int dummy_find_memory_regions (struct target_ops *self,
81 find_memory_region_ftype ignore1,
82 void *ignore2);
83
84 static char *dummy_make_corefile_notes (struct target_ops *self,
85 bfd *ignore1, int *ignore2);
86
87 static int find_default_can_async_p (struct target_ops *ignore);
88
89 static int find_default_is_async_p (struct target_ops *ignore);
90
91 static enum exec_direction_kind default_execution_direction
92 (struct target_ops *self);
93
94 #include "target-delegates.c"
95
96 static void init_dummy_target (void);
97
98 static struct target_ops debug_target;
99
100 static void debug_to_open (char *, int);
101
102 static void debug_to_prepare_to_store (struct target_ops *self,
103 struct regcache *);
104
105 static void debug_to_files_info (struct target_ops *);
106
107 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
108 struct bp_target_info *);
109
110 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
111 struct bp_target_info *);
112
113 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
114 int, int, int);
115
116 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
117 struct gdbarch *,
118 struct bp_target_info *);
119
120 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
121 struct gdbarch *,
122 struct bp_target_info *);
123
124 static int debug_to_insert_watchpoint (struct target_ops *self,
125 CORE_ADDR, int, int,
126 struct expression *);
127
128 static int debug_to_remove_watchpoint (struct target_ops *self,
129 CORE_ADDR, int, int,
130 struct expression *);
131
132 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
133
134 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
135 CORE_ADDR, CORE_ADDR, int);
136
137 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
138 CORE_ADDR, int);
139
140 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
141 CORE_ADDR, int, int,
142 struct expression *);
143
144 static void debug_to_terminal_init (struct target_ops *self);
145
146 static void debug_to_terminal_inferior (struct target_ops *self);
147
148 static void debug_to_terminal_ours_for_output (struct target_ops *self);
149
150 static void debug_to_terminal_save_ours (struct target_ops *self);
151
152 static void debug_to_terminal_ours (struct target_ops *self);
153
154 static void debug_to_load (struct target_ops *self, char *, int);
155
156 static int debug_to_can_run (struct target_ops *self);
157
158 static void debug_to_stop (struct target_ops *self, ptid_t);
159
160 /* Pointer to array of target architecture structures; the size of the
161 array; the current index into the array; the allocated size of the
162 array. */
163 struct target_ops **target_structs;
164 unsigned target_struct_size;
165 unsigned target_struct_allocsize;
166 #define DEFAULT_ALLOCSIZE 10
167
168 /* The initial current target, so that there is always a semi-valid
169 current target. */
170
171 static struct target_ops dummy_target;
172
173 /* Top of target stack. */
174
175 static struct target_ops *target_stack;
176
177 /* The target structure we are currently using to talk to a process
178 or file or whatever "inferior" we have. */
179
180 struct target_ops current_target;
181
182 /* Command list for target. */
183
184 static struct cmd_list_element *targetlist = NULL;
185
186 /* Nonzero if we should trust readonly sections from the
187 executable when reading memory. */
188
189 static int trust_readonly = 0;
190
191 /* Nonzero if we should show true memory content including
192 memory breakpoint inserted by gdb. */
193
194 static int show_memory_breakpoints = 0;
195
196 /* These globals control whether GDB attempts to perform these
197 operations; they are useful for targets that need to prevent
198 inadvertant disruption, such as in non-stop mode. */
199
200 int may_write_registers = 1;
201
202 int may_write_memory = 1;
203
204 int may_insert_breakpoints = 1;
205
206 int may_insert_tracepoints = 1;
207
208 int may_insert_fast_tracepoints = 1;
209
210 int may_stop = 1;
211
212 /* Non-zero if we want to see trace of target level stuff. */
213
214 static unsigned int targetdebug = 0;
215 static void
216 show_targetdebug (struct ui_file *file, int from_tty,
217 struct cmd_list_element *c, const char *value)
218 {
219 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
220 }
221
222 static void setup_target_debug (void);
223
224 /* The user just typed 'target' without the name of a target. */
225
226 static void
227 target_command (char *arg, int from_tty)
228 {
229 fputs_filtered ("Argument required (target name). Try `help target'\n",
230 gdb_stdout);
231 }
232
233 /* Default target_has_* methods for process_stratum targets. */
234
235 int
236 default_child_has_all_memory (struct target_ops *ops)
237 {
238 /* If no inferior selected, then we can't read memory here. */
239 if (ptid_equal (inferior_ptid, null_ptid))
240 return 0;
241
242 return 1;
243 }
244
245 int
246 default_child_has_memory (struct target_ops *ops)
247 {
248 /* If no inferior selected, then we can't read memory here. */
249 if (ptid_equal (inferior_ptid, null_ptid))
250 return 0;
251
252 return 1;
253 }
254
255 int
256 default_child_has_stack (struct target_ops *ops)
257 {
258 /* If no inferior selected, there's no stack. */
259 if (ptid_equal (inferior_ptid, null_ptid))
260 return 0;
261
262 return 1;
263 }
264
265 int
266 default_child_has_registers (struct target_ops *ops)
267 {
268 /* Can't read registers from no inferior. */
269 if (ptid_equal (inferior_ptid, null_ptid))
270 return 0;
271
272 return 1;
273 }
274
275 int
276 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
277 {
278 /* If there's no thread selected, then we can't make it run through
279 hoops. */
280 if (ptid_equal (the_ptid, null_ptid))
281 return 0;
282
283 return 1;
284 }
285
286
287 int
288 target_has_all_memory_1 (void)
289 {
290 struct target_ops *t;
291
292 for (t = current_target.beneath; t != NULL; t = t->beneath)
293 if (t->to_has_all_memory (t))
294 return 1;
295
296 return 0;
297 }
298
299 int
300 target_has_memory_1 (void)
301 {
302 struct target_ops *t;
303
304 for (t = current_target.beneath; t != NULL; t = t->beneath)
305 if (t->to_has_memory (t))
306 return 1;
307
308 return 0;
309 }
310
311 int
312 target_has_stack_1 (void)
313 {
314 struct target_ops *t;
315
316 for (t = current_target.beneath; t != NULL; t = t->beneath)
317 if (t->to_has_stack (t))
318 return 1;
319
320 return 0;
321 }
322
323 int
324 target_has_registers_1 (void)
325 {
326 struct target_ops *t;
327
328 for (t = current_target.beneath; t != NULL; t = t->beneath)
329 if (t->to_has_registers (t))
330 return 1;
331
332 return 0;
333 }
334
335 int
336 target_has_execution_1 (ptid_t the_ptid)
337 {
338 struct target_ops *t;
339
340 for (t = current_target.beneath; t != NULL; t = t->beneath)
341 if (t->to_has_execution (t, the_ptid))
342 return 1;
343
344 return 0;
345 }
346
347 int
348 target_has_execution_current (void)
349 {
350 return target_has_execution_1 (inferior_ptid);
351 }
352
353 /* Complete initialization of T. This ensures that various fields in
354 T are set, if needed by the target implementation. */
355
356 void
357 complete_target_initialization (struct target_ops *t)
358 {
359 /* Provide default values for all "must have" methods. */
360 if (t->to_xfer_partial == NULL)
361 t->to_xfer_partial = default_xfer_partial;
362
363 if (t->to_has_all_memory == NULL)
364 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
365
366 if (t->to_has_memory == NULL)
367 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
368
369 if (t->to_has_stack == NULL)
370 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
371
372 if (t->to_has_registers == NULL)
373 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
374
375 if (t->to_has_execution == NULL)
376 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
377
378 install_delegators (t);
379 }
380
381 /* Add possible target architecture T to the list and add a new
382 command 'target T->to_shortname'. Set COMPLETER as the command's
383 completer if not NULL. */
384
385 void
386 add_target_with_completer (struct target_ops *t,
387 completer_ftype *completer)
388 {
389 struct cmd_list_element *c;
390
391 complete_target_initialization (t);
392
393 if (!target_structs)
394 {
395 target_struct_allocsize = DEFAULT_ALLOCSIZE;
396 target_structs = (struct target_ops **) xmalloc
397 (target_struct_allocsize * sizeof (*target_structs));
398 }
399 if (target_struct_size >= target_struct_allocsize)
400 {
401 target_struct_allocsize *= 2;
402 target_structs = (struct target_ops **)
403 xrealloc ((char *) target_structs,
404 target_struct_allocsize * sizeof (*target_structs));
405 }
406 target_structs[target_struct_size++] = t;
407
408 if (targetlist == NULL)
409 add_prefix_cmd ("target", class_run, target_command, _("\
410 Connect to a target machine or process.\n\
411 The first argument is the type or protocol of the target machine.\n\
412 Remaining arguments are interpreted by the target protocol. For more\n\
413 information on the arguments for a particular protocol, type\n\
414 `help target ' followed by the protocol name."),
415 &targetlist, "target ", 0, &cmdlist);
416 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
417 &targetlist);
418 if (completer != NULL)
419 set_cmd_completer (c, completer);
420 }
421
422 /* Add a possible target architecture to the list. */
423
424 void
425 add_target (struct target_ops *t)
426 {
427 add_target_with_completer (t, NULL);
428 }
429
430 /* See target.h. */
431
432 void
433 add_deprecated_target_alias (struct target_ops *t, char *alias)
434 {
435 struct cmd_list_element *c;
436 char *alt;
437
438 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
439 see PR cli/15104. */
440 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
441 alt = xstrprintf ("target %s", t->to_shortname);
442 deprecate_cmd (c, alt);
443 }
444
445 /* Stub functions */
446
447 void
448 target_ignore (void)
449 {
450 }
451
452 void
453 target_kill (void)
454 {
455 struct target_ops *t;
456
457 for (t = current_target.beneath; t != NULL; t = t->beneath)
458 if (t->to_kill != NULL)
459 {
460 if (targetdebug)
461 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
462
463 t->to_kill (t);
464 return;
465 }
466
467 noprocess ();
468 }
469
470 void
471 target_load (char *arg, int from_tty)
472 {
473 target_dcache_invalidate ();
474 (*current_target.to_load) (&current_target, arg, from_tty);
475 }
476
477 void
478 target_create_inferior (char *exec_file, char *args,
479 char **env, int from_tty)
480 {
481 struct target_ops *t;
482
483 for (t = current_target.beneath; t != NULL; t = t->beneath)
484 {
485 if (t->to_create_inferior != NULL)
486 {
487 t->to_create_inferior (t, exec_file, args, env, from_tty);
488 if (targetdebug)
489 fprintf_unfiltered (gdb_stdlog,
490 "target_create_inferior (%s, %s, xxx, %d)\n",
491 exec_file, args, from_tty);
492 return;
493 }
494 }
495
496 internal_error (__FILE__, __LINE__,
497 _("could not find a target to create inferior"));
498 }
499
500 void
501 target_terminal_inferior (void)
502 {
503 /* A background resume (``run&'') should leave GDB in control of the
504 terminal. Use target_can_async_p, not target_is_async_p, since at
505 this point the target is not async yet. However, if sync_execution
506 is not set, we know it will become async prior to resume. */
507 if (target_can_async_p () && !sync_execution)
508 return;
509
510 /* If GDB is resuming the inferior in the foreground, install
511 inferior's terminal modes. */
512 (*current_target.to_terminal_inferior) (&current_target);
513 }
514
515 static int
516 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
517 struct target_ops *t)
518 {
519 errno = EIO; /* Can't read/write this location. */
520 return 0; /* No bytes handled. */
521 }
522
523 static void
524 tcomplain (void)
525 {
526 error (_("You can't do that when your target is `%s'"),
527 current_target.to_shortname);
528 }
529
530 void
531 noprocess (void)
532 {
533 error (_("You can't do that without a process to debug."));
534 }
535
536 static void
537 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
538 {
539 printf_unfiltered (_("No saved terminal information.\n"));
540 }
541
542 /* A default implementation for the to_get_ada_task_ptid target method.
543
544 This function builds the PTID by using both LWP and TID as part of
545 the PTID lwp and tid elements. The pid used is the pid of the
546 inferior_ptid. */
547
548 static ptid_t
549 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
550 {
551 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
552 }
553
554 static enum exec_direction_kind
555 default_execution_direction (struct target_ops *self)
556 {
557 if (!target_can_execute_reverse)
558 return EXEC_FORWARD;
559 else if (!target_can_async_p ())
560 return EXEC_FORWARD;
561 else
562 gdb_assert_not_reached ("\
563 to_execution_direction must be implemented for reverse async");
564 }
565
566 /* Go through the target stack from top to bottom, copying over zero
567 entries in current_target, then filling in still empty entries. In
568 effect, we are doing class inheritance through the pushed target
569 vectors.
570
571 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
572 is currently implemented, is that it discards any knowledge of
573 which target an inherited method originally belonged to.
574 Consequently, new new target methods should instead explicitly and
575 locally search the target stack for the target that can handle the
576 request. */
577
578 static void
579 update_current_target (void)
580 {
581 struct target_ops *t;
582
583 /* First, reset current's contents. */
584 memset (&current_target, 0, sizeof (current_target));
585
586 /* Install the delegators. */
587 install_delegators (&current_target);
588
589 #define INHERIT(FIELD, TARGET) \
590 if (!current_target.FIELD) \
591 current_target.FIELD = (TARGET)->FIELD
592
593 for (t = target_stack; t; t = t->beneath)
594 {
595 INHERIT (to_shortname, t);
596 INHERIT (to_longname, t);
597 INHERIT (to_doc, t);
598 /* Do not inherit to_open. */
599 /* Do not inherit to_close. */
600 /* Do not inherit to_attach. */
601 /* Do not inherit to_post_attach. */
602 INHERIT (to_attach_no_wait, t);
603 /* Do not inherit to_detach. */
604 /* Do not inherit to_disconnect. */
605 /* Do not inherit to_resume. */
606 /* Do not inherit to_wait. */
607 /* Do not inherit to_fetch_registers. */
608 /* Do not inherit to_store_registers. */
609 /* Do not inherit to_prepare_to_store. */
610 INHERIT (deprecated_xfer_memory, t);
611 /* Do not inherit to_files_info. */
612 /* Do not inherit to_insert_breakpoint. */
613 /* Do not inherit to_remove_breakpoint. */
614 /* Do not inherit to_can_use_hw_breakpoint. */
615 /* Do not inherit to_insert_hw_breakpoint. */
616 /* Do not inherit to_remove_hw_breakpoint. */
617 /* Do not inherit to_ranged_break_num_registers. */
618 /* Do not inherit to_insert_watchpoint. */
619 /* Do not inherit to_remove_watchpoint. */
620 /* Do not inherit to_insert_mask_watchpoint. */
621 /* Do not inherit to_remove_mask_watchpoint. */
622 /* Do not inherit to_stopped_data_address. */
623 INHERIT (to_have_steppable_watchpoint, t);
624 INHERIT (to_have_continuable_watchpoint, t);
625 /* Do not inherit to_stopped_by_watchpoint. */
626 /* Do not inherit to_watchpoint_addr_within_range. */
627 /* Do not inherit to_region_ok_for_hw_watchpoint. */
628 /* Do not inherit to_can_accel_watchpoint_condition. */
629 /* Do not inherit to_masked_watch_num_registers. */
630 /* Do not inherit to_terminal_init. */
631 /* Do not inherit to_terminal_inferior. */
632 /* Do not inherit to_terminal_ours_for_output. */
633 /* Do not inherit to_terminal_ours. */
634 /* Do not inherit to_terminal_save_ours. */
635 /* Do not inherit to_terminal_info. */
636 /* Do not inherit to_kill. */
637 /* Do not inherit to_load. */
638 /* Do no inherit to_create_inferior. */
639 /* Do not inherit to_post_startup_inferior. */
640 /* Do not inherit to_insert_fork_catchpoint. */
641 /* Do not inherit to_remove_fork_catchpoint. */
642 /* Do not inherit to_insert_vfork_catchpoint. */
643 /* Do not inherit to_remove_vfork_catchpoint. */
644 /* Do not inherit to_follow_fork. */
645 /* Do not inherit to_insert_exec_catchpoint. */
646 /* Do not inherit to_remove_exec_catchpoint. */
647 /* Do not inherit to_set_syscall_catchpoint. */
648 /* Do not inherit to_has_exited. */
649 /* Do not inherit to_mourn_inferior. */
650 INHERIT (to_can_run, t);
651 /* Do not inherit to_pass_signals. */
652 /* Do not inherit to_program_signals. */
653 /* Do not inherit to_thread_alive. */
654 /* Do not inherit to_find_new_threads. */
655 /* Do not inherit to_pid_to_str. */
656 /* Do not inherit to_extra_thread_info. */
657 /* Do not inherit to_thread_name. */
658 INHERIT (to_stop, t);
659 /* Do not inherit to_xfer_partial. */
660 /* Do not inherit to_rcmd. */
661 /* Do not inherit to_pid_to_exec_file. */
662 /* Do not inherit to_log_command. */
663 INHERIT (to_stratum, t);
664 /* Do not inherit to_has_all_memory. */
665 /* Do not inherit to_has_memory. */
666 /* Do not inherit to_has_stack. */
667 /* Do not inherit to_has_registers. */
668 /* Do not inherit to_has_execution. */
669 INHERIT (to_has_thread_control, t);
670 /* Do not inherit to_can_async_p. */
671 /* Do not inherit to_is_async_p. */
672 /* Do not inherit to_async. */
673 /* Do not inherit to_find_memory_regions. */
674 /* Do not inherit to_make_corefile_notes. */
675 /* Do not inherit to_get_bookmark. */
676 /* Do not inherit to_goto_bookmark. */
677 /* Do not inherit to_get_thread_local_address. */
678 /* Do not inherit to_can_execute_reverse. */
679 /* Do not inherit to_execution_direction. */
680 /* Do not inherit to_thread_architecture. */
681 /* Do not inherit to_read_description. */
682 /* Do not inherit to_get_ada_task_ptid. */
683 /* Do not inherit to_search_memory. */
684 /* Do not inherit to_supports_multi_process. */
685 /* Do not inherit to_supports_enable_disable_tracepoint. */
686 /* Do not inherit to_supports_string_tracing. */
687 /* Do not inherit to_trace_init. */
688 /* Do not inherit to_download_tracepoint. */
689 /* Do not inherit to_can_download_tracepoint. */
690 /* Do not inherit to_download_trace_state_variable. */
691 /* Do not inherit to_enable_tracepoint. */
692 /* Do not inherit to_disable_tracepoint. */
693 /* Do not inherit to_trace_set_readonly_regions. */
694 /* Do not inherit to_trace_start. */
695 /* Do not inherit to_get_trace_status. */
696 /* Do not inherit to_get_tracepoint_status. */
697 /* Do not inherit to_trace_stop. */
698 /* Do not inherit to_trace_find. */
699 /* Do not inherit to_get_trace_state_variable_value. */
700 /* Do not inherit to_save_trace_data. */
701 /* Do not inherit to_upload_tracepoints. */
702 /* Do not inherit to_upload_trace_state_variables. */
703 /* Do not inherit to_get_raw_trace_data. */
704 /* Do not inherit to_get_min_fast_tracepoint_insn_len. */
705 /* Do not inherit to_set_disconnected_tracing. */
706 /* Do not inherit to_set_circular_trace_buffer. */
707 /* Do not inherit to_set_trace_buffer_size. */
708 /* Do not inherit to_set_trace_notes. */
709 /* Do not inherit to_get_tib_address. */
710 /* Do not inherit to_set_permissions. */
711 /* Do not inherit to_static_tracepoint_marker_at. */
712 /* Do not inherit to_static_tracepoint_markers_by_strid. */
713 /* Do not inherit to_traceframe_info. */
714 /* Do not inherit to_use_agent. */
715 INHERIT (to_can_use_agent, t);
716 INHERIT (to_augmented_libraries_svr4_read, t);
717 INHERIT (to_magic, t);
718 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
719 INHERIT (to_can_run_breakpoint_commands, t);
720 /* Do not inherit to_memory_map. */
721 /* Do not inherit to_flash_erase. */
722 /* Do not inherit to_flash_done. */
723 }
724 #undef INHERIT
725
726 /* Clean up a target struct so it no longer has any zero pointers in
727 it. Some entries are defaulted to a method that print an error,
728 others are hard-wired to a standard recursive default. */
729
730 #define de_fault(field, value) \
731 if (!current_target.field) \
732 current_target.field = value
733
734 de_fault (to_open,
735 (void (*) (char *, int))
736 tcomplain);
737 de_fault (to_close,
738 (void (*) (struct target_ops *))
739 target_ignore);
740 de_fault (deprecated_xfer_memory,
741 (int (*) (CORE_ADDR, gdb_byte *, int, int,
742 struct mem_attrib *, struct target_ops *))
743 nomemory);
744 de_fault (to_can_run,
745 (int (*) (struct target_ops *))
746 return_zero);
747 de_fault (to_stop,
748 (void (*) (struct target_ops *, ptid_t))
749 target_ignore);
750 current_target.to_read_description = NULL;
751 de_fault (to_supports_evaluation_of_breakpoint_conditions,
752 (int (*) (struct target_ops *))
753 return_zero);
754 de_fault (to_can_run_breakpoint_commands,
755 (int (*) (struct target_ops *))
756 return_zero);
757 de_fault (to_can_use_agent,
758 (int (*) (struct target_ops *))
759 return_zero);
760 de_fault (to_augmented_libraries_svr4_read,
761 (int (*) (struct target_ops *))
762 return_zero);
763
764 #undef de_fault
765
766 /* Finally, position the target-stack beneath the squashed
767 "current_target". That way code looking for a non-inherited
768 target method can quickly and simply find it. */
769 current_target.beneath = target_stack;
770
771 if (targetdebug)
772 setup_target_debug ();
773 }
774
775 /* Push a new target type into the stack of the existing target accessors,
776 possibly superseding some of the existing accessors.
777
778 Rather than allow an empty stack, we always have the dummy target at
779 the bottom stratum, so we can call the function vectors without
780 checking them. */
781
782 void
783 push_target (struct target_ops *t)
784 {
785 struct target_ops **cur;
786
787 /* Check magic number. If wrong, it probably means someone changed
788 the struct definition, but not all the places that initialize one. */
789 if (t->to_magic != OPS_MAGIC)
790 {
791 fprintf_unfiltered (gdb_stderr,
792 "Magic number of %s target struct wrong\n",
793 t->to_shortname);
794 internal_error (__FILE__, __LINE__,
795 _("failed internal consistency check"));
796 }
797
798 /* Find the proper stratum to install this target in. */
799 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
800 {
801 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
802 break;
803 }
804
805 /* If there's already targets at this stratum, remove them. */
806 /* FIXME: cagney/2003-10-15: I think this should be popping all
807 targets to CUR, and not just those at this stratum level. */
808 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
809 {
810 /* There's already something at this stratum level. Close it,
811 and un-hook it from the stack. */
812 struct target_ops *tmp = (*cur);
813
814 (*cur) = (*cur)->beneath;
815 tmp->beneath = NULL;
816 target_close (tmp);
817 }
818
819 /* We have removed all targets in our stratum, now add the new one. */
820 t->beneath = (*cur);
821 (*cur) = t;
822
823 update_current_target ();
824 }
825
826 /* Remove a target_ops vector from the stack, wherever it may be.
827 Return how many times it was removed (0 or 1). */
828
829 int
830 unpush_target (struct target_ops *t)
831 {
832 struct target_ops **cur;
833 struct target_ops *tmp;
834
835 if (t->to_stratum == dummy_stratum)
836 internal_error (__FILE__, __LINE__,
837 _("Attempt to unpush the dummy target"));
838
839 /* Look for the specified target. Note that we assume that a target
840 can only occur once in the target stack. */
841
842 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
843 {
844 if ((*cur) == t)
845 break;
846 }
847
848 /* If we don't find target_ops, quit. Only open targets should be
849 closed. */
850 if ((*cur) == NULL)
851 return 0;
852
853 /* Unchain the target. */
854 tmp = (*cur);
855 (*cur) = (*cur)->beneath;
856 tmp->beneath = NULL;
857
858 update_current_target ();
859
860 /* Finally close the target. Note we do this after unchaining, so
861 any target method calls from within the target_close
862 implementation don't end up in T anymore. */
863 target_close (t);
864
865 return 1;
866 }
867
868 void
869 pop_all_targets_above (enum strata above_stratum)
870 {
871 while ((int) (current_target.to_stratum) > (int) above_stratum)
872 {
873 if (!unpush_target (target_stack))
874 {
875 fprintf_unfiltered (gdb_stderr,
876 "pop_all_targets couldn't find target %s\n",
877 target_stack->to_shortname);
878 internal_error (__FILE__, __LINE__,
879 _("failed internal consistency check"));
880 break;
881 }
882 }
883 }
884
885 void
886 pop_all_targets (void)
887 {
888 pop_all_targets_above (dummy_stratum);
889 }
890
891 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
892
893 int
894 target_is_pushed (struct target_ops *t)
895 {
896 struct target_ops **cur;
897
898 /* Check magic number. If wrong, it probably means someone changed
899 the struct definition, but not all the places that initialize one. */
900 if (t->to_magic != OPS_MAGIC)
901 {
902 fprintf_unfiltered (gdb_stderr,
903 "Magic number of %s target struct wrong\n",
904 t->to_shortname);
905 internal_error (__FILE__, __LINE__,
906 _("failed internal consistency check"));
907 }
908
909 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
910 if (*cur == t)
911 return 1;
912
913 return 0;
914 }
915
916 /* Using the objfile specified in OBJFILE, find the address for the
917 current thread's thread-local storage with offset OFFSET. */
918 CORE_ADDR
919 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
920 {
921 volatile CORE_ADDR addr = 0;
922 struct target_ops *target;
923
924 for (target = current_target.beneath;
925 target != NULL;
926 target = target->beneath)
927 {
928 if (target->to_get_thread_local_address != NULL)
929 break;
930 }
931
932 if (target != NULL
933 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
934 {
935 ptid_t ptid = inferior_ptid;
936 volatile struct gdb_exception ex;
937
938 TRY_CATCH (ex, RETURN_MASK_ALL)
939 {
940 CORE_ADDR lm_addr;
941
942 /* Fetch the load module address for this objfile. */
943 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
944 objfile);
945 /* If it's 0, throw the appropriate exception. */
946 if (lm_addr == 0)
947 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
948 _("TLS load module not found"));
949
950 addr = target->to_get_thread_local_address (target, ptid,
951 lm_addr, offset);
952 }
953 /* If an error occurred, print TLS related messages here. Otherwise,
954 throw the error to some higher catcher. */
955 if (ex.reason < 0)
956 {
957 int objfile_is_library = (objfile->flags & OBJF_SHARED);
958
959 switch (ex.error)
960 {
961 case TLS_NO_LIBRARY_SUPPORT_ERROR:
962 error (_("Cannot find thread-local variables "
963 "in this thread library."));
964 break;
965 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
966 if (objfile_is_library)
967 error (_("Cannot find shared library `%s' in dynamic"
968 " linker's load module list"), objfile_name (objfile));
969 else
970 error (_("Cannot find executable file `%s' in dynamic"
971 " linker's load module list"), objfile_name (objfile));
972 break;
973 case TLS_NOT_ALLOCATED_YET_ERROR:
974 if (objfile_is_library)
975 error (_("The inferior has not yet allocated storage for"
976 " thread-local variables in\n"
977 "the shared library `%s'\n"
978 "for %s"),
979 objfile_name (objfile), target_pid_to_str (ptid));
980 else
981 error (_("The inferior has not yet allocated storage for"
982 " thread-local variables in\n"
983 "the executable `%s'\n"
984 "for %s"),
985 objfile_name (objfile), target_pid_to_str (ptid));
986 break;
987 case TLS_GENERIC_ERROR:
988 if (objfile_is_library)
989 error (_("Cannot find thread-local storage for %s, "
990 "shared library %s:\n%s"),
991 target_pid_to_str (ptid),
992 objfile_name (objfile), ex.message);
993 else
994 error (_("Cannot find thread-local storage for %s, "
995 "executable file %s:\n%s"),
996 target_pid_to_str (ptid),
997 objfile_name (objfile), ex.message);
998 break;
999 default:
1000 throw_exception (ex);
1001 break;
1002 }
1003 }
1004 }
1005 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1006 TLS is an ABI-specific thing. But we don't do that yet. */
1007 else
1008 error (_("Cannot find thread-local variables on this target"));
1009
1010 return addr;
1011 }
1012
1013 const char *
1014 target_xfer_status_to_string (enum target_xfer_status err)
1015 {
1016 #define CASE(X) case X: return #X
1017 switch (err)
1018 {
1019 CASE(TARGET_XFER_E_IO);
1020 CASE(TARGET_XFER_E_UNAVAILABLE);
1021 default:
1022 return "<unknown>";
1023 }
1024 #undef CASE
1025 };
1026
1027
1028 #undef MIN
1029 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1030
1031 /* target_read_string -- read a null terminated string, up to LEN bytes,
1032 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1033 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1034 is responsible for freeing it. Return the number of bytes successfully
1035 read. */
1036
1037 int
1038 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1039 {
1040 int tlen, offset, i;
1041 gdb_byte buf[4];
1042 int errcode = 0;
1043 char *buffer;
1044 int buffer_allocated;
1045 char *bufptr;
1046 unsigned int nbytes_read = 0;
1047
1048 gdb_assert (string);
1049
1050 /* Small for testing. */
1051 buffer_allocated = 4;
1052 buffer = xmalloc (buffer_allocated);
1053 bufptr = buffer;
1054
1055 while (len > 0)
1056 {
1057 tlen = MIN (len, 4 - (memaddr & 3));
1058 offset = memaddr & 3;
1059
1060 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1061 if (errcode != 0)
1062 {
1063 /* The transfer request might have crossed the boundary to an
1064 unallocated region of memory. Retry the transfer, requesting
1065 a single byte. */
1066 tlen = 1;
1067 offset = 0;
1068 errcode = target_read_memory (memaddr, buf, 1);
1069 if (errcode != 0)
1070 goto done;
1071 }
1072
1073 if (bufptr - buffer + tlen > buffer_allocated)
1074 {
1075 unsigned int bytes;
1076
1077 bytes = bufptr - buffer;
1078 buffer_allocated *= 2;
1079 buffer = xrealloc (buffer, buffer_allocated);
1080 bufptr = buffer + bytes;
1081 }
1082
1083 for (i = 0; i < tlen; i++)
1084 {
1085 *bufptr++ = buf[i + offset];
1086 if (buf[i + offset] == '\000')
1087 {
1088 nbytes_read += i + 1;
1089 goto done;
1090 }
1091 }
1092
1093 memaddr += tlen;
1094 len -= tlen;
1095 nbytes_read += tlen;
1096 }
1097 done:
1098 *string = buffer;
1099 if (errnop != NULL)
1100 *errnop = errcode;
1101 return nbytes_read;
1102 }
1103
1104 struct target_section_table *
1105 target_get_section_table (struct target_ops *target)
1106 {
1107 struct target_ops *t;
1108
1109 if (targetdebug)
1110 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1111
1112 for (t = target; t != NULL; t = t->beneath)
1113 if (t->to_get_section_table != NULL)
1114 return (*t->to_get_section_table) (t);
1115
1116 return NULL;
1117 }
1118
1119 /* Find a section containing ADDR. */
1120
1121 struct target_section *
1122 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1123 {
1124 struct target_section_table *table = target_get_section_table (target);
1125 struct target_section *secp;
1126
1127 if (table == NULL)
1128 return NULL;
1129
1130 for (secp = table->sections; secp < table->sections_end; secp++)
1131 {
1132 if (addr >= secp->addr && addr < secp->endaddr)
1133 return secp;
1134 }
1135 return NULL;
1136 }
1137
1138 /* Read memory from the live target, even if currently inspecting a
1139 traceframe. The return is the same as that of target_read. */
1140
1141 static enum target_xfer_status
1142 target_read_live_memory (enum target_object object,
1143 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1144 ULONGEST *xfered_len)
1145 {
1146 enum target_xfer_status ret;
1147 struct cleanup *cleanup;
1148
1149 /* Switch momentarily out of tfind mode so to access live memory.
1150 Note that this must not clear global state, such as the frame
1151 cache, which must still remain valid for the previous traceframe.
1152 We may be _building_ the frame cache at this point. */
1153 cleanup = make_cleanup_restore_traceframe_number ();
1154 set_traceframe_number (-1);
1155
1156 ret = target_xfer_partial (current_target.beneath, object, NULL,
1157 myaddr, NULL, memaddr, len, xfered_len);
1158
1159 do_cleanups (cleanup);
1160 return ret;
1161 }
1162
1163 /* Using the set of read-only target sections of OPS, read live
1164 read-only memory. Note that the actual reads start from the
1165 top-most target again.
1166
1167 For interface/parameters/return description see target.h,
1168 to_xfer_partial. */
1169
1170 static enum target_xfer_status
1171 memory_xfer_live_readonly_partial (struct target_ops *ops,
1172 enum target_object object,
1173 gdb_byte *readbuf, ULONGEST memaddr,
1174 ULONGEST len, ULONGEST *xfered_len)
1175 {
1176 struct target_section *secp;
1177 struct target_section_table *table;
1178
1179 secp = target_section_by_addr (ops, memaddr);
1180 if (secp != NULL
1181 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1182 secp->the_bfd_section)
1183 & SEC_READONLY))
1184 {
1185 struct target_section *p;
1186 ULONGEST memend = memaddr + len;
1187
1188 table = target_get_section_table (ops);
1189
1190 for (p = table->sections; p < table->sections_end; p++)
1191 {
1192 if (memaddr >= p->addr)
1193 {
1194 if (memend <= p->endaddr)
1195 {
1196 /* Entire transfer is within this section. */
1197 return target_read_live_memory (object, memaddr,
1198 readbuf, len, xfered_len);
1199 }
1200 else if (memaddr >= p->endaddr)
1201 {
1202 /* This section ends before the transfer starts. */
1203 continue;
1204 }
1205 else
1206 {
1207 /* This section overlaps the transfer. Just do half. */
1208 len = p->endaddr - memaddr;
1209 return target_read_live_memory (object, memaddr,
1210 readbuf, len, xfered_len);
1211 }
1212 }
1213 }
1214 }
1215
1216 return TARGET_XFER_EOF;
1217 }
1218
1219 /* Read memory from more than one valid target. A core file, for
1220 instance, could have some of memory but delegate other bits to
1221 the target below it. So, we must manually try all targets. */
1222
1223 static enum target_xfer_status
1224 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1225 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1226 ULONGEST *xfered_len)
1227 {
1228 enum target_xfer_status res;
1229
1230 do
1231 {
1232 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1233 readbuf, writebuf, memaddr, len,
1234 xfered_len);
1235 if (res == TARGET_XFER_OK)
1236 break;
1237
1238 /* Stop if the target reports that the memory is not available. */
1239 if (res == TARGET_XFER_E_UNAVAILABLE)
1240 break;
1241
1242 /* We want to continue past core files to executables, but not
1243 past a running target's memory. */
1244 if (ops->to_has_all_memory (ops))
1245 break;
1246
1247 ops = ops->beneath;
1248 }
1249 while (ops != NULL);
1250
1251 return res;
1252 }
1253
1254 /* Perform a partial memory transfer.
1255 For docs see target.h, to_xfer_partial. */
1256
1257 static enum target_xfer_status
1258 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1259 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1260 ULONGEST len, ULONGEST *xfered_len)
1261 {
1262 enum target_xfer_status res;
1263 int reg_len;
1264 struct mem_region *region;
1265 struct inferior *inf;
1266
1267 /* For accesses to unmapped overlay sections, read directly from
1268 files. Must do this first, as MEMADDR may need adjustment. */
1269 if (readbuf != NULL && overlay_debugging)
1270 {
1271 struct obj_section *section = find_pc_overlay (memaddr);
1272
1273 if (pc_in_unmapped_range (memaddr, section))
1274 {
1275 struct target_section_table *table
1276 = target_get_section_table (ops);
1277 const char *section_name = section->the_bfd_section->name;
1278
1279 memaddr = overlay_mapped_address (memaddr, section);
1280 return section_table_xfer_memory_partial (readbuf, writebuf,
1281 memaddr, len, xfered_len,
1282 table->sections,
1283 table->sections_end,
1284 section_name);
1285 }
1286 }
1287
1288 /* Try the executable files, if "trust-readonly-sections" is set. */
1289 if (readbuf != NULL && trust_readonly)
1290 {
1291 struct target_section *secp;
1292 struct target_section_table *table;
1293
1294 secp = target_section_by_addr (ops, memaddr);
1295 if (secp != NULL
1296 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1297 secp->the_bfd_section)
1298 & SEC_READONLY))
1299 {
1300 table = target_get_section_table (ops);
1301 return section_table_xfer_memory_partial (readbuf, writebuf,
1302 memaddr, len, xfered_len,
1303 table->sections,
1304 table->sections_end,
1305 NULL);
1306 }
1307 }
1308
1309 /* If reading unavailable memory in the context of traceframes, and
1310 this address falls within a read-only section, fallback to
1311 reading from live memory. */
1312 if (readbuf != NULL && get_traceframe_number () != -1)
1313 {
1314 VEC(mem_range_s) *available;
1315
1316 /* If we fail to get the set of available memory, then the
1317 target does not support querying traceframe info, and so we
1318 attempt reading from the traceframe anyway (assuming the
1319 target implements the old QTro packet then). */
1320 if (traceframe_available_memory (&available, memaddr, len))
1321 {
1322 struct cleanup *old_chain;
1323
1324 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1325
1326 if (VEC_empty (mem_range_s, available)
1327 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1328 {
1329 /* Don't read into the traceframe's available
1330 memory. */
1331 if (!VEC_empty (mem_range_s, available))
1332 {
1333 LONGEST oldlen = len;
1334
1335 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1336 gdb_assert (len <= oldlen);
1337 }
1338
1339 do_cleanups (old_chain);
1340
1341 /* This goes through the topmost target again. */
1342 res = memory_xfer_live_readonly_partial (ops, object,
1343 readbuf, memaddr,
1344 len, xfered_len);
1345 if (res == TARGET_XFER_OK)
1346 return TARGET_XFER_OK;
1347 else
1348 {
1349 /* No use trying further, we know some memory starting
1350 at MEMADDR isn't available. */
1351 *xfered_len = len;
1352 return TARGET_XFER_E_UNAVAILABLE;
1353 }
1354 }
1355
1356 /* Don't try to read more than how much is available, in
1357 case the target implements the deprecated QTro packet to
1358 cater for older GDBs (the target's knowledge of read-only
1359 sections may be outdated by now). */
1360 len = VEC_index (mem_range_s, available, 0)->length;
1361
1362 do_cleanups (old_chain);
1363 }
1364 }
1365
1366 /* Try GDB's internal data cache. */
1367 region = lookup_mem_region (memaddr);
1368 /* region->hi == 0 means there's no upper bound. */
1369 if (memaddr + len < region->hi || region->hi == 0)
1370 reg_len = len;
1371 else
1372 reg_len = region->hi - memaddr;
1373
1374 switch (region->attrib.mode)
1375 {
1376 case MEM_RO:
1377 if (writebuf != NULL)
1378 return TARGET_XFER_E_IO;
1379 break;
1380
1381 case MEM_WO:
1382 if (readbuf != NULL)
1383 return TARGET_XFER_E_IO;
1384 break;
1385
1386 case MEM_FLASH:
1387 /* We only support writing to flash during "load" for now. */
1388 if (writebuf != NULL)
1389 error (_("Writing to flash memory forbidden in this context"));
1390 break;
1391
1392 case MEM_NONE:
1393 return TARGET_XFER_E_IO;
1394 }
1395
1396 if (!ptid_equal (inferior_ptid, null_ptid))
1397 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1398 else
1399 inf = NULL;
1400
1401 if (inf != NULL
1402 /* The dcache reads whole cache lines; that doesn't play well
1403 with reading from a trace buffer, because reading outside of
1404 the collected memory range fails. */
1405 && get_traceframe_number () == -1
1406 && (region->attrib.cache
1407 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1408 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1409 {
1410 DCACHE *dcache = target_dcache_get_or_init ();
1411 int l;
1412
1413 if (readbuf != NULL)
1414 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1415 else
1416 /* FIXME drow/2006-08-09: If we're going to preserve const
1417 correctness dcache_xfer_memory should take readbuf and
1418 writebuf. */
1419 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1420 reg_len, 1);
1421 if (l <= 0)
1422 return TARGET_XFER_E_IO;
1423 else
1424 {
1425 *xfered_len = (ULONGEST) l;
1426 return TARGET_XFER_OK;
1427 }
1428 }
1429
1430 /* If none of those methods found the memory we wanted, fall back
1431 to a target partial transfer. Normally a single call to
1432 to_xfer_partial is enough; if it doesn't recognize an object
1433 it will call the to_xfer_partial of the next target down.
1434 But for memory this won't do. Memory is the only target
1435 object which can be read from more than one valid target.
1436 A core file, for instance, could have some of memory but
1437 delegate other bits to the target below it. So, we must
1438 manually try all targets. */
1439
1440 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1441 xfered_len);
1442
1443 /* Make sure the cache gets updated no matter what - if we are writing
1444 to the stack. Even if this write is not tagged as such, we still need
1445 to update the cache. */
1446
1447 if (res == TARGET_XFER_OK
1448 && inf != NULL
1449 && writebuf != NULL
1450 && target_dcache_init_p ()
1451 && !region->attrib.cache
1452 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1453 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1454 {
1455 DCACHE *dcache = target_dcache_get ();
1456
1457 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1458 }
1459
1460 /* If we still haven't got anything, return the last error. We
1461 give up. */
1462 return res;
1463 }
1464
1465 /* Perform a partial memory transfer. For docs see target.h,
1466 to_xfer_partial. */
1467
1468 static enum target_xfer_status
1469 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1470 gdb_byte *readbuf, const gdb_byte *writebuf,
1471 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1472 {
1473 enum target_xfer_status res;
1474
1475 /* Zero length requests are ok and require no work. */
1476 if (len == 0)
1477 return TARGET_XFER_EOF;
1478
1479 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1480 breakpoint insns, thus hiding out from higher layers whether
1481 there are software breakpoints inserted in the code stream. */
1482 if (readbuf != NULL)
1483 {
1484 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1485 xfered_len);
1486
1487 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1488 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1489 }
1490 else
1491 {
1492 void *buf;
1493 struct cleanup *old_chain;
1494
1495 /* A large write request is likely to be partially satisfied
1496 by memory_xfer_partial_1. We will continually malloc
1497 and free a copy of the entire write request for breakpoint
1498 shadow handling even though we only end up writing a small
1499 subset of it. Cap writes to 4KB to mitigate this. */
1500 len = min (4096, len);
1501
1502 buf = xmalloc (len);
1503 old_chain = make_cleanup (xfree, buf);
1504 memcpy (buf, writebuf, len);
1505
1506 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1507 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1508 xfered_len);
1509
1510 do_cleanups (old_chain);
1511 }
1512
1513 return res;
1514 }
1515
1516 static void
1517 restore_show_memory_breakpoints (void *arg)
1518 {
1519 show_memory_breakpoints = (uintptr_t) arg;
1520 }
1521
1522 struct cleanup *
1523 make_show_memory_breakpoints_cleanup (int show)
1524 {
1525 int current = show_memory_breakpoints;
1526
1527 show_memory_breakpoints = show;
1528 return make_cleanup (restore_show_memory_breakpoints,
1529 (void *) (uintptr_t) current);
1530 }
1531
1532 /* For docs see target.h, to_xfer_partial. */
1533
1534 enum target_xfer_status
1535 target_xfer_partial (struct target_ops *ops,
1536 enum target_object object, const char *annex,
1537 gdb_byte *readbuf, const gdb_byte *writebuf,
1538 ULONGEST offset, ULONGEST len,
1539 ULONGEST *xfered_len)
1540 {
1541 enum target_xfer_status retval;
1542
1543 gdb_assert (ops->to_xfer_partial != NULL);
1544
1545 /* Transfer is done when LEN is zero. */
1546 if (len == 0)
1547 return TARGET_XFER_EOF;
1548
1549 if (writebuf && !may_write_memory)
1550 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1551 core_addr_to_string_nz (offset), plongest (len));
1552
1553 *xfered_len = 0;
1554
1555 /* If this is a memory transfer, let the memory-specific code
1556 have a look at it instead. Memory transfers are more
1557 complicated. */
1558 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1559 || object == TARGET_OBJECT_CODE_MEMORY)
1560 retval = memory_xfer_partial (ops, object, readbuf,
1561 writebuf, offset, len, xfered_len);
1562 else if (object == TARGET_OBJECT_RAW_MEMORY)
1563 {
1564 /* Request the normal memory object from other layers. */
1565 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1566 xfered_len);
1567 }
1568 else
1569 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1570 writebuf, offset, len, xfered_len);
1571
1572 if (targetdebug)
1573 {
1574 const unsigned char *myaddr = NULL;
1575
1576 fprintf_unfiltered (gdb_stdlog,
1577 "%s:target_xfer_partial "
1578 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1579 ops->to_shortname,
1580 (int) object,
1581 (annex ? annex : "(null)"),
1582 host_address_to_string (readbuf),
1583 host_address_to_string (writebuf),
1584 core_addr_to_string_nz (offset),
1585 pulongest (len), retval,
1586 pulongest (*xfered_len));
1587
1588 if (readbuf)
1589 myaddr = readbuf;
1590 if (writebuf)
1591 myaddr = writebuf;
1592 if (retval == TARGET_XFER_OK && myaddr != NULL)
1593 {
1594 int i;
1595
1596 fputs_unfiltered (", bytes =", gdb_stdlog);
1597 for (i = 0; i < *xfered_len; i++)
1598 {
1599 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1600 {
1601 if (targetdebug < 2 && i > 0)
1602 {
1603 fprintf_unfiltered (gdb_stdlog, " ...");
1604 break;
1605 }
1606 fprintf_unfiltered (gdb_stdlog, "\n");
1607 }
1608
1609 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1610 }
1611 }
1612
1613 fputc_unfiltered ('\n', gdb_stdlog);
1614 }
1615
1616 /* Check implementations of to_xfer_partial update *XFERED_LEN
1617 properly. Do assertion after printing debug messages, so that we
1618 can find more clues on assertion failure from debugging messages. */
1619 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1620 gdb_assert (*xfered_len > 0);
1621
1622 return retval;
1623 }
1624
1625 /* Read LEN bytes of target memory at address MEMADDR, placing the
1626 results in GDB's memory at MYADDR. Returns either 0 for success or
1627 TARGET_XFER_E_IO if any error occurs.
1628
1629 If an error occurs, no guarantee is made about the contents of the data at
1630 MYADDR. In particular, the caller should not depend upon partial reads
1631 filling the buffer with good data. There is no way for the caller to know
1632 how much good data might have been transfered anyway. Callers that can
1633 deal with partial reads should call target_read (which will retry until
1634 it makes no progress, and then return how much was transferred). */
1635
1636 int
1637 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1638 {
1639 /* Dispatch to the topmost target, not the flattened current_target.
1640 Memory accesses check target->to_has_(all_)memory, and the
1641 flattened target doesn't inherit those. */
1642 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1643 myaddr, memaddr, len) == len)
1644 return 0;
1645 else
1646 return TARGET_XFER_E_IO;
1647 }
1648
1649 /* Like target_read_memory, but specify explicitly that this is a read
1650 from the target's raw memory. That is, this read bypasses the
1651 dcache, breakpoint shadowing, etc. */
1652
1653 int
1654 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1655 {
1656 /* See comment in target_read_memory about why the request starts at
1657 current_target.beneath. */
1658 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1659 myaddr, memaddr, len) == len)
1660 return 0;
1661 else
1662 return TARGET_XFER_E_IO;
1663 }
1664
1665 /* Like target_read_memory, but specify explicitly that this is a read from
1666 the target's stack. This may trigger different cache behavior. */
1667
1668 int
1669 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1670 {
1671 /* See comment in target_read_memory about why the request starts at
1672 current_target.beneath. */
1673 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1674 myaddr, memaddr, len) == len)
1675 return 0;
1676 else
1677 return TARGET_XFER_E_IO;
1678 }
1679
1680 /* Like target_read_memory, but specify explicitly that this is a read from
1681 the target's code. This may trigger different cache behavior. */
1682
1683 int
1684 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1685 {
1686 /* See comment in target_read_memory about why the request starts at
1687 current_target.beneath. */
1688 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1689 myaddr, memaddr, len) == len)
1690 return 0;
1691 else
1692 return TARGET_XFER_E_IO;
1693 }
1694
1695 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1696 Returns either 0 for success or TARGET_XFER_E_IO if any
1697 error occurs. If an error occurs, no guarantee is made about how
1698 much data got written. Callers that can deal with partial writes
1699 should call target_write. */
1700
1701 int
1702 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1703 {
1704 /* See comment in target_read_memory about why the request starts at
1705 current_target.beneath. */
1706 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1707 myaddr, memaddr, len) == len)
1708 return 0;
1709 else
1710 return TARGET_XFER_E_IO;
1711 }
1712
1713 /* Write LEN bytes from MYADDR to target raw memory at address
1714 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1715 if any error occurs. If an error occurs, no guarantee is made
1716 about how much data got written. Callers that can deal with
1717 partial writes should call target_write. */
1718
1719 int
1720 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1721 {
1722 /* See comment in target_read_memory about why the request starts at
1723 current_target.beneath. */
1724 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1725 myaddr, memaddr, len) == len)
1726 return 0;
1727 else
1728 return TARGET_XFER_E_IO;
1729 }
1730
1731 /* Fetch the target's memory map. */
1732
1733 VEC(mem_region_s) *
1734 target_memory_map (void)
1735 {
1736 VEC(mem_region_s) *result;
1737 struct mem_region *last_one, *this_one;
1738 int ix;
1739 struct target_ops *t;
1740
1741 if (targetdebug)
1742 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1743
1744 for (t = current_target.beneath; t != NULL; t = t->beneath)
1745 if (t->to_memory_map != NULL)
1746 break;
1747
1748 if (t == NULL)
1749 return NULL;
1750
1751 result = t->to_memory_map (t);
1752 if (result == NULL)
1753 return NULL;
1754
1755 qsort (VEC_address (mem_region_s, result),
1756 VEC_length (mem_region_s, result),
1757 sizeof (struct mem_region), mem_region_cmp);
1758
1759 /* Check that regions do not overlap. Simultaneously assign
1760 a numbering for the "mem" commands to use to refer to
1761 each region. */
1762 last_one = NULL;
1763 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1764 {
1765 this_one->number = ix;
1766
1767 if (last_one && last_one->hi > this_one->lo)
1768 {
1769 warning (_("Overlapping regions in memory map: ignoring"));
1770 VEC_free (mem_region_s, result);
1771 return NULL;
1772 }
1773 last_one = this_one;
1774 }
1775
1776 return result;
1777 }
1778
1779 void
1780 target_flash_erase (ULONGEST address, LONGEST length)
1781 {
1782 struct target_ops *t;
1783
1784 for (t = current_target.beneath; t != NULL; t = t->beneath)
1785 if (t->to_flash_erase != NULL)
1786 {
1787 if (targetdebug)
1788 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1789 hex_string (address), phex (length, 0));
1790 t->to_flash_erase (t, address, length);
1791 return;
1792 }
1793
1794 tcomplain ();
1795 }
1796
1797 void
1798 target_flash_done (void)
1799 {
1800 struct target_ops *t;
1801
1802 for (t = current_target.beneath; t != NULL; t = t->beneath)
1803 if (t->to_flash_done != NULL)
1804 {
1805 if (targetdebug)
1806 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1807 t->to_flash_done (t);
1808 return;
1809 }
1810
1811 tcomplain ();
1812 }
1813
1814 static void
1815 show_trust_readonly (struct ui_file *file, int from_tty,
1816 struct cmd_list_element *c, const char *value)
1817 {
1818 fprintf_filtered (file,
1819 _("Mode for reading from readonly sections is %s.\n"),
1820 value);
1821 }
1822
1823 /* More generic transfers. */
1824
1825 static enum target_xfer_status
1826 default_xfer_partial (struct target_ops *ops, enum target_object object,
1827 const char *annex, gdb_byte *readbuf,
1828 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
1829 ULONGEST *xfered_len)
1830 {
1831 if (object == TARGET_OBJECT_MEMORY
1832 && ops->deprecated_xfer_memory != NULL)
1833 /* If available, fall back to the target's
1834 "deprecated_xfer_memory" method. */
1835 {
1836 int xfered = -1;
1837
1838 errno = 0;
1839 if (writebuf != NULL)
1840 {
1841 void *buffer = xmalloc (len);
1842 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1843
1844 memcpy (buffer, writebuf, len);
1845 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1846 1/*write*/, NULL, ops);
1847 do_cleanups (cleanup);
1848 }
1849 if (readbuf != NULL)
1850 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1851 0/*read*/, NULL, ops);
1852 if (xfered > 0)
1853 {
1854 *xfered_len = (ULONGEST) xfered;
1855 return TARGET_XFER_E_IO;
1856 }
1857 else if (xfered == 0 && errno == 0)
1858 /* "deprecated_xfer_memory" uses 0, cross checked against
1859 ERRNO as one indication of an error. */
1860 return TARGET_XFER_EOF;
1861 else
1862 return TARGET_XFER_E_IO;
1863 }
1864 else
1865 {
1866 gdb_assert (ops->beneath != NULL);
1867 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1868 readbuf, writebuf, offset, len,
1869 xfered_len);
1870 }
1871 }
1872
1873 /* Target vector read/write partial wrapper functions. */
1874
1875 static enum target_xfer_status
1876 target_read_partial (struct target_ops *ops,
1877 enum target_object object,
1878 const char *annex, gdb_byte *buf,
1879 ULONGEST offset, ULONGEST len,
1880 ULONGEST *xfered_len)
1881 {
1882 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1883 xfered_len);
1884 }
1885
1886 static enum target_xfer_status
1887 target_write_partial (struct target_ops *ops,
1888 enum target_object object,
1889 const char *annex, const gdb_byte *buf,
1890 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1891 {
1892 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1893 xfered_len);
1894 }
1895
1896 /* Wrappers to perform the full transfer. */
1897
1898 /* For docs on target_read see target.h. */
1899
1900 LONGEST
1901 target_read (struct target_ops *ops,
1902 enum target_object object,
1903 const char *annex, gdb_byte *buf,
1904 ULONGEST offset, LONGEST len)
1905 {
1906 LONGEST xfered = 0;
1907
1908 while (xfered < len)
1909 {
1910 ULONGEST xfered_len;
1911 enum target_xfer_status status;
1912
1913 status = target_read_partial (ops, object, annex,
1914 (gdb_byte *) buf + xfered,
1915 offset + xfered, len - xfered,
1916 &xfered_len);
1917
1918 /* Call an observer, notifying them of the xfer progress? */
1919 if (status == TARGET_XFER_EOF)
1920 return xfered;
1921 else if (status == TARGET_XFER_OK)
1922 {
1923 xfered += xfered_len;
1924 QUIT;
1925 }
1926 else
1927 return -1;
1928
1929 }
1930 return len;
1931 }
1932
1933 /* Assuming that the entire [begin, end) range of memory cannot be
1934 read, try to read whatever subrange is possible to read.
1935
1936 The function returns, in RESULT, either zero or one memory block.
1937 If there's a readable subrange at the beginning, it is completely
1938 read and returned. Any further readable subrange will not be read.
1939 Otherwise, if there's a readable subrange at the end, it will be
1940 completely read and returned. Any readable subranges before it
1941 (obviously, not starting at the beginning), will be ignored. In
1942 other cases -- either no readable subrange, or readable subrange(s)
1943 that is neither at the beginning, or end, nothing is returned.
1944
1945 The purpose of this function is to handle a read across a boundary
1946 of accessible memory in a case when memory map is not available.
1947 The above restrictions are fine for this case, but will give
1948 incorrect results if the memory is 'patchy'. However, supporting
1949 'patchy' memory would require trying to read every single byte,
1950 and it seems unacceptable solution. Explicit memory map is
1951 recommended for this case -- and target_read_memory_robust will
1952 take care of reading multiple ranges then. */
1953
1954 static void
1955 read_whatever_is_readable (struct target_ops *ops,
1956 ULONGEST begin, ULONGEST end,
1957 VEC(memory_read_result_s) **result)
1958 {
1959 gdb_byte *buf = xmalloc (end - begin);
1960 ULONGEST current_begin = begin;
1961 ULONGEST current_end = end;
1962 int forward;
1963 memory_read_result_s r;
1964 ULONGEST xfered_len;
1965
1966 /* If we previously failed to read 1 byte, nothing can be done here. */
1967 if (end - begin <= 1)
1968 {
1969 xfree (buf);
1970 return;
1971 }
1972
1973 /* Check that either first or the last byte is readable, and give up
1974 if not. This heuristic is meant to permit reading accessible memory
1975 at the boundary of accessible region. */
1976 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1977 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
1978 {
1979 forward = 1;
1980 ++current_begin;
1981 }
1982 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1983 buf + (end-begin) - 1, end - 1, 1,
1984 &xfered_len) == TARGET_XFER_OK)
1985 {
1986 forward = 0;
1987 --current_end;
1988 }
1989 else
1990 {
1991 xfree (buf);
1992 return;
1993 }
1994
1995 /* Loop invariant is that the [current_begin, current_end) was previously
1996 found to be not readable as a whole.
1997
1998 Note loop condition -- if the range has 1 byte, we can't divide the range
1999 so there's no point trying further. */
2000 while (current_end - current_begin > 1)
2001 {
2002 ULONGEST first_half_begin, first_half_end;
2003 ULONGEST second_half_begin, second_half_end;
2004 LONGEST xfer;
2005 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2006
2007 if (forward)
2008 {
2009 first_half_begin = current_begin;
2010 first_half_end = middle;
2011 second_half_begin = middle;
2012 second_half_end = current_end;
2013 }
2014 else
2015 {
2016 first_half_begin = middle;
2017 first_half_end = current_end;
2018 second_half_begin = current_begin;
2019 second_half_end = middle;
2020 }
2021
2022 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2023 buf + (first_half_begin - begin),
2024 first_half_begin,
2025 first_half_end - first_half_begin);
2026
2027 if (xfer == first_half_end - first_half_begin)
2028 {
2029 /* This half reads up fine. So, the error must be in the
2030 other half. */
2031 current_begin = second_half_begin;
2032 current_end = second_half_end;
2033 }
2034 else
2035 {
2036 /* This half is not readable. Because we've tried one byte, we
2037 know some part of this half if actually redable. Go to the next
2038 iteration to divide again and try to read.
2039
2040 We don't handle the other half, because this function only tries
2041 to read a single readable subrange. */
2042 current_begin = first_half_begin;
2043 current_end = first_half_end;
2044 }
2045 }
2046
2047 if (forward)
2048 {
2049 /* The [begin, current_begin) range has been read. */
2050 r.begin = begin;
2051 r.end = current_begin;
2052 r.data = buf;
2053 }
2054 else
2055 {
2056 /* The [current_end, end) range has been read. */
2057 LONGEST rlen = end - current_end;
2058
2059 r.data = xmalloc (rlen);
2060 memcpy (r.data, buf + current_end - begin, rlen);
2061 r.begin = current_end;
2062 r.end = end;
2063 xfree (buf);
2064 }
2065 VEC_safe_push(memory_read_result_s, (*result), &r);
2066 }
2067
2068 void
2069 free_memory_read_result_vector (void *x)
2070 {
2071 VEC(memory_read_result_s) *v = x;
2072 memory_read_result_s *current;
2073 int ix;
2074
2075 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2076 {
2077 xfree (current->data);
2078 }
2079 VEC_free (memory_read_result_s, v);
2080 }
2081
2082 VEC(memory_read_result_s) *
2083 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2084 {
2085 VEC(memory_read_result_s) *result = 0;
2086
2087 LONGEST xfered = 0;
2088 while (xfered < len)
2089 {
2090 struct mem_region *region = lookup_mem_region (offset + xfered);
2091 LONGEST rlen;
2092
2093 /* If there is no explicit region, a fake one should be created. */
2094 gdb_assert (region);
2095
2096 if (region->hi == 0)
2097 rlen = len - xfered;
2098 else
2099 rlen = region->hi - offset;
2100
2101 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2102 {
2103 /* Cannot read this region. Note that we can end up here only
2104 if the region is explicitly marked inaccessible, or
2105 'inaccessible-by-default' is in effect. */
2106 xfered += rlen;
2107 }
2108 else
2109 {
2110 LONGEST to_read = min (len - xfered, rlen);
2111 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2112
2113 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2114 (gdb_byte *) buffer,
2115 offset + xfered, to_read);
2116 /* Call an observer, notifying them of the xfer progress? */
2117 if (xfer <= 0)
2118 {
2119 /* Got an error reading full chunk. See if maybe we can read
2120 some subrange. */
2121 xfree (buffer);
2122 read_whatever_is_readable (ops, offset + xfered,
2123 offset + xfered + to_read, &result);
2124 xfered += to_read;
2125 }
2126 else
2127 {
2128 struct memory_read_result r;
2129 r.data = buffer;
2130 r.begin = offset + xfered;
2131 r.end = r.begin + xfer;
2132 VEC_safe_push (memory_read_result_s, result, &r);
2133 xfered += xfer;
2134 }
2135 QUIT;
2136 }
2137 }
2138 return result;
2139 }
2140
2141
2142 /* An alternative to target_write with progress callbacks. */
2143
2144 LONGEST
2145 target_write_with_progress (struct target_ops *ops,
2146 enum target_object object,
2147 const char *annex, const gdb_byte *buf,
2148 ULONGEST offset, LONGEST len,
2149 void (*progress) (ULONGEST, void *), void *baton)
2150 {
2151 LONGEST xfered = 0;
2152
2153 /* Give the progress callback a chance to set up. */
2154 if (progress)
2155 (*progress) (0, baton);
2156
2157 while (xfered < len)
2158 {
2159 ULONGEST xfered_len;
2160 enum target_xfer_status status;
2161
2162 status = target_write_partial (ops, object, annex,
2163 (gdb_byte *) buf + xfered,
2164 offset + xfered, len - xfered,
2165 &xfered_len);
2166
2167 if (status == TARGET_XFER_EOF)
2168 return xfered;
2169 if (TARGET_XFER_STATUS_ERROR_P (status))
2170 return -1;
2171
2172 gdb_assert (status == TARGET_XFER_OK);
2173 if (progress)
2174 (*progress) (xfered_len, baton);
2175
2176 xfered += xfered_len;
2177 QUIT;
2178 }
2179 return len;
2180 }
2181
2182 /* For docs on target_write see target.h. */
2183
2184 LONGEST
2185 target_write (struct target_ops *ops,
2186 enum target_object object,
2187 const char *annex, const gdb_byte *buf,
2188 ULONGEST offset, LONGEST len)
2189 {
2190 return target_write_with_progress (ops, object, annex, buf, offset, len,
2191 NULL, NULL);
2192 }
2193
2194 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2195 the size of the transferred data. PADDING additional bytes are
2196 available in *BUF_P. This is a helper function for
2197 target_read_alloc; see the declaration of that function for more
2198 information. */
2199
2200 static LONGEST
2201 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2202 const char *annex, gdb_byte **buf_p, int padding)
2203 {
2204 size_t buf_alloc, buf_pos;
2205 gdb_byte *buf;
2206
2207 /* This function does not have a length parameter; it reads the
2208 entire OBJECT). Also, it doesn't support objects fetched partly
2209 from one target and partly from another (in a different stratum,
2210 e.g. a core file and an executable). Both reasons make it
2211 unsuitable for reading memory. */
2212 gdb_assert (object != TARGET_OBJECT_MEMORY);
2213
2214 /* Start by reading up to 4K at a time. The target will throttle
2215 this number down if necessary. */
2216 buf_alloc = 4096;
2217 buf = xmalloc (buf_alloc);
2218 buf_pos = 0;
2219 while (1)
2220 {
2221 ULONGEST xfered_len;
2222 enum target_xfer_status status;
2223
2224 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2225 buf_pos, buf_alloc - buf_pos - padding,
2226 &xfered_len);
2227
2228 if (status == TARGET_XFER_EOF)
2229 {
2230 /* Read all there was. */
2231 if (buf_pos == 0)
2232 xfree (buf);
2233 else
2234 *buf_p = buf;
2235 return buf_pos;
2236 }
2237 else if (status != TARGET_XFER_OK)
2238 {
2239 /* An error occurred. */
2240 xfree (buf);
2241 return TARGET_XFER_E_IO;
2242 }
2243
2244 buf_pos += xfered_len;
2245
2246 /* If the buffer is filling up, expand it. */
2247 if (buf_alloc < buf_pos * 2)
2248 {
2249 buf_alloc *= 2;
2250 buf = xrealloc (buf, buf_alloc);
2251 }
2252
2253 QUIT;
2254 }
2255 }
2256
2257 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2258 the size of the transferred data. See the declaration in "target.h"
2259 function for more information about the return value. */
2260
2261 LONGEST
2262 target_read_alloc (struct target_ops *ops, enum target_object object,
2263 const char *annex, gdb_byte **buf_p)
2264 {
2265 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2266 }
2267
2268 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2269 returned as a string, allocated using xmalloc. If an error occurs
2270 or the transfer is unsupported, NULL is returned. Empty objects
2271 are returned as allocated but empty strings. A warning is issued
2272 if the result contains any embedded NUL bytes. */
2273
2274 char *
2275 target_read_stralloc (struct target_ops *ops, enum target_object object,
2276 const char *annex)
2277 {
2278 gdb_byte *buffer;
2279 char *bufstr;
2280 LONGEST i, transferred;
2281
2282 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2283 bufstr = (char *) buffer;
2284
2285 if (transferred < 0)
2286 return NULL;
2287
2288 if (transferred == 0)
2289 return xstrdup ("");
2290
2291 bufstr[transferred] = 0;
2292
2293 /* Check for embedded NUL bytes; but allow trailing NULs. */
2294 for (i = strlen (bufstr); i < transferred; i++)
2295 if (bufstr[i] != 0)
2296 {
2297 warning (_("target object %d, annex %s, "
2298 "contained unexpected null characters"),
2299 (int) object, annex ? annex : "(none)");
2300 break;
2301 }
2302
2303 return bufstr;
2304 }
2305
2306 /* Memory transfer methods. */
2307
2308 void
2309 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2310 LONGEST len)
2311 {
2312 /* This method is used to read from an alternate, non-current
2313 target. This read must bypass the overlay support (as symbols
2314 don't match this target), and GDB's internal cache (wrong cache
2315 for this target). */
2316 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2317 != len)
2318 memory_error (TARGET_XFER_E_IO, addr);
2319 }
2320
2321 ULONGEST
2322 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2323 int len, enum bfd_endian byte_order)
2324 {
2325 gdb_byte buf[sizeof (ULONGEST)];
2326
2327 gdb_assert (len <= sizeof (buf));
2328 get_target_memory (ops, addr, buf, len);
2329 return extract_unsigned_integer (buf, len, byte_order);
2330 }
2331
2332 /* See target.h. */
2333
2334 int
2335 target_insert_breakpoint (struct gdbarch *gdbarch,
2336 struct bp_target_info *bp_tgt)
2337 {
2338 if (!may_insert_breakpoints)
2339 {
2340 warning (_("May not insert breakpoints"));
2341 return 1;
2342 }
2343
2344 return current_target.to_insert_breakpoint (&current_target,
2345 gdbarch, bp_tgt);
2346 }
2347
2348 /* See target.h. */
2349
2350 int
2351 target_remove_breakpoint (struct gdbarch *gdbarch,
2352 struct bp_target_info *bp_tgt)
2353 {
2354 /* This is kind of a weird case to handle, but the permission might
2355 have been changed after breakpoints were inserted - in which case
2356 we should just take the user literally and assume that any
2357 breakpoints should be left in place. */
2358 if (!may_insert_breakpoints)
2359 {
2360 warning (_("May not remove breakpoints"));
2361 return 1;
2362 }
2363
2364 return current_target.to_remove_breakpoint (&current_target,
2365 gdbarch, bp_tgt);
2366 }
2367
2368 static void
2369 target_info (char *args, int from_tty)
2370 {
2371 struct target_ops *t;
2372 int has_all_mem = 0;
2373
2374 if (symfile_objfile != NULL)
2375 printf_unfiltered (_("Symbols from \"%s\".\n"),
2376 objfile_name (symfile_objfile));
2377
2378 for (t = target_stack; t != NULL; t = t->beneath)
2379 {
2380 if (!(*t->to_has_memory) (t))
2381 continue;
2382
2383 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2384 continue;
2385 if (has_all_mem)
2386 printf_unfiltered (_("\tWhile running this, "
2387 "GDB does not access memory from...\n"));
2388 printf_unfiltered ("%s:\n", t->to_longname);
2389 (t->to_files_info) (t);
2390 has_all_mem = (*t->to_has_all_memory) (t);
2391 }
2392 }
2393
2394 /* This function is called before any new inferior is created, e.g.
2395 by running a program, attaching, or connecting to a target.
2396 It cleans up any state from previous invocations which might
2397 change between runs. This is a subset of what target_preopen
2398 resets (things which might change between targets). */
2399
2400 void
2401 target_pre_inferior (int from_tty)
2402 {
2403 /* Clear out solib state. Otherwise the solib state of the previous
2404 inferior might have survived and is entirely wrong for the new
2405 target. This has been observed on GNU/Linux using glibc 2.3. How
2406 to reproduce:
2407
2408 bash$ ./foo&
2409 [1] 4711
2410 bash$ ./foo&
2411 [1] 4712
2412 bash$ gdb ./foo
2413 [...]
2414 (gdb) attach 4711
2415 (gdb) detach
2416 (gdb) attach 4712
2417 Cannot access memory at address 0xdeadbeef
2418 */
2419
2420 /* In some OSs, the shared library list is the same/global/shared
2421 across inferiors. If code is shared between processes, so are
2422 memory regions and features. */
2423 if (!gdbarch_has_global_solist (target_gdbarch ()))
2424 {
2425 no_shared_libraries (NULL, from_tty);
2426
2427 invalidate_target_mem_regions ();
2428
2429 target_clear_description ();
2430 }
2431
2432 agent_capability_invalidate ();
2433 }
2434
2435 /* Callback for iterate_over_inferiors. Gets rid of the given
2436 inferior. */
2437
2438 static int
2439 dispose_inferior (struct inferior *inf, void *args)
2440 {
2441 struct thread_info *thread;
2442
2443 thread = any_thread_of_process (inf->pid);
2444 if (thread)
2445 {
2446 switch_to_thread (thread->ptid);
2447
2448 /* Core inferiors actually should be detached, not killed. */
2449 if (target_has_execution)
2450 target_kill ();
2451 else
2452 target_detach (NULL, 0);
2453 }
2454
2455 return 0;
2456 }
2457
2458 /* This is to be called by the open routine before it does
2459 anything. */
2460
2461 void
2462 target_preopen (int from_tty)
2463 {
2464 dont_repeat ();
2465
2466 if (have_inferiors ())
2467 {
2468 if (!from_tty
2469 || !have_live_inferiors ()
2470 || query (_("A program is being debugged already. Kill it? ")))
2471 iterate_over_inferiors (dispose_inferior, NULL);
2472 else
2473 error (_("Program not killed."));
2474 }
2475
2476 /* Calling target_kill may remove the target from the stack. But if
2477 it doesn't (which seems like a win for UDI), remove it now. */
2478 /* Leave the exec target, though. The user may be switching from a
2479 live process to a core of the same program. */
2480 pop_all_targets_above (file_stratum);
2481
2482 target_pre_inferior (from_tty);
2483 }
2484
2485 /* Detach a target after doing deferred register stores. */
2486
2487 void
2488 target_detach (const char *args, int from_tty)
2489 {
2490 struct target_ops* t;
2491
2492 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2493 /* Don't remove global breakpoints here. They're removed on
2494 disconnection from the target. */
2495 ;
2496 else
2497 /* If we're in breakpoints-always-inserted mode, have to remove
2498 them before detaching. */
2499 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2500
2501 prepare_for_detach ();
2502
2503 current_target.to_detach (&current_target, args, from_tty);
2504 if (targetdebug)
2505 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2506 args, from_tty);
2507 }
2508
2509 void
2510 target_disconnect (char *args, int from_tty)
2511 {
2512 struct target_ops *t;
2513
2514 /* If we're in breakpoints-always-inserted mode or if breakpoints
2515 are global across processes, we have to remove them before
2516 disconnecting. */
2517 remove_breakpoints ();
2518
2519 for (t = current_target.beneath; t != NULL; t = t->beneath)
2520 if (t->to_disconnect != NULL)
2521 {
2522 if (targetdebug)
2523 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2524 args, from_tty);
2525 t->to_disconnect (t, args, from_tty);
2526 return;
2527 }
2528
2529 tcomplain ();
2530 }
2531
2532 ptid_t
2533 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2534 {
2535 struct target_ops *t;
2536 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2537 status, options);
2538
2539 if (targetdebug)
2540 {
2541 char *status_string;
2542 char *options_string;
2543
2544 status_string = target_waitstatus_to_string (status);
2545 options_string = target_options_to_string (options);
2546 fprintf_unfiltered (gdb_stdlog,
2547 "target_wait (%d, status, options={%s})"
2548 " = %d, %s\n",
2549 ptid_get_pid (ptid), options_string,
2550 ptid_get_pid (retval), status_string);
2551 xfree (status_string);
2552 xfree (options_string);
2553 }
2554
2555 return retval;
2556 }
2557
2558 char *
2559 target_pid_to_str (ptid_t ptid)
2560 {
2561 struct target_ops *t;
2562
2563 for (t = current_target.beneath; t != NULL; t = t->beneath)
2564 {
2565 if (t->to_pid_to_str != NULL)
2566 return (*t->to_pid_to_str) (t, ptid);
2567 }
2568
2569 return normal_pid_to_str (ptid);
2570 }
2571
2572 char *
2573 target_thread_name (struct thread_info *info)
2574 {
2575 return current_target.to_thread_name (&current_target, info);
2576 }
2577
2578 void
2579 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2580 {
2581 struct target_ops *t;
2582
2583 target_dcache_invalidate ();
2584
2585 current_target.to_resume (&current_target, ptid, step, signal);
2586 if (targetdebug)
2587 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2588 ptid_get_pid (ptid),
2589 step ? "step" : "continue",
2590 gdb_signal_to_name (signal));
2591
2592 registers_changed_ptid (ptid);
2593 set_executing (ptid, 1);
2594 set_running (ptid, 1);
2595 clear_inline_frame_state (ptid);
2596 }
2597
2598 void
2599 target_pass_signals (int numsigs, unsigned char *pass_signals)
2600 {
2601 struct target_ops *t;
2602
2603 for (t = current_target.beneath; t != NULL; t = t->beneath)
2604 {
2605 if (t->to_pass_signals != NULL)
2606 {
2607 if (targetdebug)
2608 {
2609 int i;
2610
2611 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2612 numsigs);
2613
2614 for (i = 0; i < numsigs; i++)
2615 if (pass_signals[i])
2616 fprintf_unfiltered (gdb_stdlog, " %s",
2617 gdb_signal_to_name (i));
2618
2619 fprintf_unfiltered (gdb_stdlog, " })\n");
2620 }
2621
2622 (*t->to_pass_signals) (t, numsigs, pass_signals);
2623 return;
2624 }
2625 }
2626 }
2627
2628 void
2629 target_program_signals (int numsigs, unsigned char *program_signals)
2630 {
2631 struct target_ops *t;
2632
2633 for (t = current_target.beneath; t != NULL; t = t->beneath)
2634 {
2635 if (t->to_program_signals != NULL)
2636 {
2637 if (targetdebug)
2638 {
2639 int i;
2640
2641 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2642 numsigs);
2643
2644 for (i = 0; i < numsigs; i++)
2645 if (program_signals[i])
2646 fprintf_unfiltered (gdb_stdlog, " %s",
2647 gdb_signal_to_name (i));
2648
2649 fprintf_unfiltered (gdb_stdlog, " })\n");
2650 }
2651
2652 (*t->to_program_signals) (t, numsigs, program_signals);
2653 return;
2654 }
2655 }
2656 }
2657
2658 /* Look through the list of possible targets for a target that can
2659 follow forks. */
2660
2661 int
2662 target_follow_fork (int follow_child, int detach_fork)
2663 {
2664 struct target_ops *t;
2665
2666 for (t = current_target.beneath; t != NULL; t = t->beneath)
2667 {
2668 if (t->to_follow_fork != NULL)
2669 {
2670 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2671
2672 if (targetdebug)
2673 fprintf_unfiltered (gdb_stdlog,
2674 "target_follow_fork (%d, %d) = %d\n",
2675 follow_child, detach_fork, retval);
2676 return retval;
2677 }
2678 }
2679
2680 /* Some target returned a fork event, but did not know how to follow it. */
2681 internal_error (__FILE__, __LINE__,
2682 _("could not find a target to follow fork"));
2683 }
2684
2685 void
2686 target_mourn_inferior (void)
2687 {
2688 struct target_ops *t;
2689
2690 for (t = current_target.beneath; t != NULL; t = t->beneath)
2691 {
2692 if (t->to_mourn_inferior != NULL)
2693 {
2694 t->to_mourn_inferior (t);
2695 if (targetdebug)
2696 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2697
2698 /* We no longer need to keep handles on any of the object files.
2699 Make sure to release them to avoid unnecessarily locking any
2700 of them while we're not actually debugging. */
2701 bfd_cache_close_all ();
2702
2703 return;
2704 }
2705 }
2706
2707 internal_error (__FILE__, __LINE__,
2708 _("could not find a target to follow mourn inferior"));
2709 }
2710
2711 /* Look for a target which can describe architectural features, starting
2712 from TARGET. If we find one, return its description. */
2713
2714 const struct target_desc *
2715 target_read_description (struct target_ops *target)
2716 {
2717 struct target_ops *t;
2718
2719 for (t = target; t != NULL; t = t->beneath)
2720 if (t->to_read_description != NULL)
2721 {
2722 const struct target_desc *tdesc;
2723
2724 tdesc = t->to_read_description (t);
2725 if (tdesc)
2726 return tdesc;
2727 }
2728
2729 return NULL;
2730 }
2731
2732 /* The default implementation of to_search_memory.
2733 This implements a basic search of memory, reading target memory and
2734 performing the search here (as opposed to performing the search in on the
2735 target side with, for example, gdbserver). */
2736
2737 int
2738 simple_search_memory (struct target_ops *ops,
2739 CORE_ADDR start_addr, ULONGEST search_space_len,
2740 const gdb_byte *pattern, ULONGEST pattern_len,
2741 CORE_ADDR *found_addrp)
2742 {
2743 /* NOTE: also defined in find.c testcase. */
2744 #define SEARCH_CHUNK_SIZE 16000
2745 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2746 /* Buffer to hold memory contents for searching. */
2747 gdb_byte *search_buf;
2748 unsigned search_buf_size;
2749 struct cleanup *old_cleanups;
2750
2751 search_buf_size = chunk_size + pattern_len - 1;
2752
2753 /* No point in trying to allocate a buffer larger than the search space. */
2754 if (search_space_len < search_buf_size)
2755 search_buf_size = search_space_len;
2756
2757 search_buf = malloc (search_buf_size);
2758 if (search_buf == NULL)
2759 error (_("Unable to allocate memory to perform the search."));
2760 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2761
2762 /* Prime the search buffer. */
2763
2764 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2765 search_buf, start_addr, search_buf_size) != search_buf_size)
2766 {
2767 warning (_("Unable to access %s bytes of target "
2768 "memory at %s, halting search."),
2769 pulongest (search_buf_size), hex_string (start_addr));
2770 do_cleanups (old_cleanups);
2771 return -1;
2772 }
2773
2774 /* Perform the search.
2775
2776 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2777 When we've scanned N bytes we copy the trailing bytes to the start and
2778 read in another N bytes. */
2779
2780 while (search_space_len >= pattern_len)
2781 {
2782 gdb_byte *found_ptr;
2783 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2784
2785 found_ptr = memmem (search_buf, nr_search_bytes,
2786 pattern, pattern_len);
2787
2788 if (found_ptr != NULL)
2789 {
2790 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2791
2792 *found_addrp = found_addr;
2793 do_cleanups (old_cleanups);
2794 return 1;
2795 }
2796
2797 /* Not found in this chunk, skip to next chunk. */
2798
2799 /* Don't let search_space_len wrap here, it's unsigned. */
2800 if (search_space_len >= chunk_size)
2801 search_space_len -= chunk_size;
2802 else
2803 search_space_len = 0;
2804
2805 if (search_space_len >= pattern_len)
2806 {
2807 unsigned keep_len = search_buf_size - chunk_size;
2808 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2809 int nr_to_read;
2810
2811 /* Copy the trailing part of the previous iteration to the front
2812 of the buffer for the next iteration. */
2813 gdb_assert (keep_len == pattern_len - 1);
2814 memcpy (search_buf, search_buf + chunk_size, keep_len);
2815
2816 nr_to_read = min (search_space_len - keep_len, chunk_size);
2817
2818 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2819 search_buf + keep_len, read_addr,
2820 nr_to_read) != nr_to_read)
2821 {
2822 warning (_("Unable to access %s bytes of target "
2823 "memory at %s, halting search."),
2824 plongest (nr_to_read),
2825 hex_string (read_addr));
2826 do_cleanups (old_cleanups);
2827 return -1;
2828 }
2829
2830 start_addr += chunk_size;
2831 }
2832 }
2833
2834 /* Not found. */
2835
2836 do_cleanups (old_cleanups);
2837 return 0;
2838 }
2839
2840 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2841 sequence of bytes in PATTERN with length PATTERN_LEN.
2842
2843 The result is 1 if found, 0 if not found, and -1 if there was an error
2844 requiring halting of the search (e.g. memory read error).
2845 If the pattern is found the address is recorded in FOUND_ADDRP. */
2846
2847 int
2848 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2849 const gdb_byte *pattern, ULONGEST pattern_len,
2850 CORE_ADDR *found_addrp)
2851 {
2852 struct target_ops *t;
2853 int found;
2854
2855 /* We don't use INHERIT to set current_target.to_search_memory,
2856 so we have to scan the target stack and handle targetdebug
2857 ourselves. */
2858
2859 if (targetdebug)
2860 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2861 hex_string (start_addr));
2862
2863 for (t = current_target.beneath; t != NULL; t = t->beneath)
2864 if (t->to_search_memory != NULL)
2865 break;
2866
2867 if (t != NULL)
2868 {
2869 found = t->to_search_memory (t, start_addr, search_space_len,
2870 pattern, pattern_len, found_addrp);
2871 }
2872 else
2873 {
2874 /* If a special version of to_search_memory isn't available, use the
2875 simple version. */
2876 found = simple_search_memory (current_target.beneath,
2877 start_addr, search_space_len,
2878 pattern, pattern_len, found_addrp);
2879 }
2880
2881 if (targetdebug)
2882 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2883
2884 return found;
2885 }
2886
2887 /* Look through the currently pushed targets. If none of them will
2888 be able to restart the currently running process, issue an error
2889 message. */
2890
2891 void
2892 target_require_runnable (void)
2893 {
2894 struct target_ops *t;
2895
2896 for (t = target_stack; t != NULL; t = t->beneath)
2897 {
2898 /* If this target knows how to create a new program, then
2899 assume we will still be able to after killing the current
2900 one. Either killing and mourning will not pop T, or else
2901 find_default_run_target will find it again. */
2902 if (t->to_create_inferior != NULL)
2903 return;
2904
2905 /* Do not worry about thread_stratum targets that can not
2906 create inferiors. Assume they will be pushed again if
2907 necessary, and continue to the process_stratum. */
2908 if (t->to_stratum == thread_stratum
2909 || t->to_stratum == arch_stratum)
2910 continue;
2911
2912 error (_("The \"%s\" target does not support \"run\". "
2913 "Try \"help target\" or \"continue\"."),
2914 t->to_shortname);
2915 }
2916
2917 /* This function is only called if the target is running. In that
2918 case there should have been a process_stratum target and it
2919 should either know how to create inferiors, or not... */
2920 internal_error (__FILE__, __LINE__, _("No targets found"));
2921 }
2922
2923 /* Look through the list of possible targets for a target that can
2924 execute a run or attach command without any other data. This is
2925 used to locate the default process stratum.
2926
2927 If DO_MESG is not NULL, the result is always valid (error() is
2928 called for errors); else, return NULL on error. */
2929
2930 static struct target_ops *
2931 find_default_run_target (char *do_mesg)
2932 {
2933 struct target_ops **t;
2934 struct target_ops *runable = NULL;
2935 int count;
2936
2937 count = 0;
2938
2939 for (t = target_structs; t < target_structs + target_struct_size;
2940 ++t)
2941 {
2942 if ((*t)->to_can_run && target_can_run (*t))
2943 {
2944 runable = *t;
2945 ++count;
2946 }
2947 }
2948
2949 if (count != 1)
2950 {
2951 if (do_mesg)
2952 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2953 else
2954 return NULL;
2955 }
2956
2957 return runable;
2958 }
2959
2960 void
2961 find_default_attach (struct target_ops *ops, char *args, int from_tty)
2962 {
2963 struct target_ops *t;
2964
2965 t = find_default_run_target ("attach");
2966 (t->to_attach) (t, args, from_tty);
2967 return;
2968 }
2969
2970 void
2971 find_default_create_inferior (struct target_ops *ops,
2972 char *exec_file, char *allargs, char **env,
2973 int from_tty)
2974 {
2975 struct target_ops *t;
2976
2977 t = find_default_run_target ("run");
2978 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
2979 return;
2980 }
2981
2982 static int
2983 find_default_can_async_p (struct target_ops *ignore)
2984 {
2985 struct target_ops *t;
2986
2987 /* This may be called before the target is pushed on the stack;
2988 look for the default process stratum. If there's none, gdb isn't
2989 configured with a native debugger, and target remote isn't
2990 connected yet. */
2991 t = find_default_run_target (NULL);
2992 if (t && t->to_can_async_p != delegate_can_async_p)
2993 return (t->to_can_async_p) (t);
2994 return 0;
2995 }
2996
2997 static int
2998 find_default_is_async_p (struct target_ops *ignore)
2999 {
3000 struct target_ops *t;
3001
3002 /* This may be called before the target is pushed on the stack;
3003 look for the default process stratum. If there's none, gdb isn't
3004 configured with a native debugger, and target remote isn't
3005 connected yet. */
3006 t = find_default_run_target (NULL);
3007 if (t && t->to_is_async_p != delegate_is_async_p)
3008 return (t->to_is_async_p) (t);
3009 return 0;
3010 }
3011
3012 static int
3013 find_default_supports_non_stop (struct target_ops *self)
3014 {
3015 struct target_ops *t;
3016
3017 t = find_default_run_target (NULL);
3018 if (t && t->to_supports_non_stop)
3019 return (t->to_supports_non_stop) (t);
3020 return 0;
3021 }
3022
3023 int
3024 target_supports_non_stop (void)
3025 {
3026 struct target_ops *t;
3027
3028 for (t = &current_target; t != NULL; t = t->beneath)
3029 if (t->to_supports_non_stop)
3030 return t->to_supports_non_stop (t);
3031
3032 return 0;
3033 }
3034
3035 /* Implement the "info proc" command. */
3036
3037 int
3038 target_info_proc (char *args, enum info_proc_what what)
3039 {
3040 struct target_ops *t;
3041
3042 /* If we're already connected to something that can get us OS
3043 related data, use it. Otherwise, try using the native
3044 target. */
3045 if (current_target.to_stratum >= process_stratum)
3046 t = current_target.beneath;
3047 else
3048 t = find_default_run_target (NULL);
3049
3050 for (; t != NULL; t = t->beneath)
3051 {
3052 if (t->to_info_proc != NULL)
3053 {
3054 t->to_info_proc (t, args, what);
3055
3056 if (targetdebug)
3057 fprintf_unfiltered (gdb_stdlog,
3058 "target_info_proc (\"%s\", %d)\n", args, what);
3059
3060 return 1;
3061 }
3062 }
3063
3064 return 0;
3065 }
3066
3067 static int
3068 find_default_supports_disable_randomization (struct target_ops *self)
3069 {
3070 struct target_ops *t;
3071
3072 t = find_default_run_target (NULL);
3073 if (t && t->to_supports_disable_randomization)
3074 return (t->to_supports_disable_randomization) (t);
3075 return 0;
3076 }
3077
3078 int
3079 target_supports_disable_randomization (void)
3080 {
3081 struct target_ops *t;
3082
3083 for (t = &current_target; t != NULL; t = t->beneath)
3084 if (t->to_supports_disable_randomization)
3085 return t->to_supports_disable_randomization (t);
3086
3087 return 0;
3088 }
3089
3090 char *
3091 target_get_osdata (const char *type)
3092 {
3093 struct target_ops *t;
3094
3095 /* If we're already connected to something that can get us OS
3096 related data, use it. Otherwise, try using the native
3097 target. */
3098 if (current_target.to_stratum >= process_stratum)
3099 t = current_target.beneath;
3100 else
3101 t = find_default_run_target ("get OS data");
3102
3103 if (!t)
3104 return NULL;
3105
3106 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3107 }
3108
3109 /* Determine the current address space of thread PTID. */
3110
3111 struct address_space *
3112 target_thread_address_space (ptid_t ptid)
3113 {
3114 struct address_space *aspace;
3115 struct inferior *inf;
3116 struct target_ops *t;
3117
3118 for (t = current_target.beneath; t != NULL; t = t->beneath)
3119 {
3120 if (t->to_thread_address_space != NULL)
3121 {
3122 aspace = t->to_thread_address_space (t, ptid);
3123 gdb_assert (aspace);
3124
3125 if (targetdebug)
3126 fprintf_unfiltered (gdb_stdlog,
3127 "target_thread_address_space (%s) = %d\n",
3128 target_pid_to_str (ptid),
3129 address_space_num (aspace));
3130 return aspace;
3131 }
3132 }
3133
3134 /* Fall-back to the "main" address space of the inferior. */
3135 inf = find_inferior_pid (ptid_get_pid (ptid));
3136
3137 if (inf == NULL || inf->aspace == NULL)
3138 internal_error (__FILE__, __LINE__,
3139 _("Can't determine the current "
3140 "address space of thread %s\n"),
3141 target_pid_to_str (ptid));
3142
3143 return inf->aspace;
3144 }
3145
3146
3147 /* Target file operations. */
3148
3149 static struct target_ops *
3150 default_fileio_target (void)
3151 {
3152 /* If we're already connected to something that can perform
3153 file I/O, use it. Otherwise, try using the native target. */
3154 if (current_target.to_stratum >= process_stratum)
3155 return current_target.beneath;
3156 else
3157 return find_default_run_target ("file I/O");
3158 }
3159
3160 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3161 target file descriptor, or -1 if an error occurs (and set
3162 *TARGET_ERRNO). */
3163 int
3164 target_fileio_open (const char *filename, int flags, int mode,
3165 int *target_errno)
3166 {
3167 struct target_ops *t;
3168
3169 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3170 {
3171 if (t->to_fileio_open != NULL)
3172 {
3173 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
3174
3175 if (targetdebug)
3176 fprintf_unfiltered (gdb_stdlog,
3177 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3178 filename, flags, mode,
3179 fd, fd != -1 ? 0 : *target_errno);
3180 return fd;
3181 }
3182 }
3183
3184 *target_errno = FILEIO_ENOSYS;
3185 return -1;
3186 }
3187
3188 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3189 Return the number of bytes written, or -1 if an error occurs
3190 (and set *TARGET_ERRNO). */
3191 int
3192 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3193 ULONGEST offset, int *target_errno)
3194 {
3195 struct target_ops *t;
3196
3197 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3198 {
3199 if (t->to_fileio_pwrite != NULL)
3200 {
3201 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
3202 target_errno);
3203
3204 if (targetdebug)
3205 fprintf_unfiltered (gdb_stdlog,
3206 "target_fileio_pwrite (%d,...,%d,%s) "
3207 "= %d (%d)\n",
3208 fd, len, pulongest (offset),
3209 ret, ret != -1 ? 0 : *target_errno);
3210 return ret;
3211 }
3212 }
3213
3214 *target_errno = FILEIO_ENOSYS;
3215 return -1;
3216 }
3217
3218 /* Read up to LEN bytes FD on the target into READ_BUF.
3219 Return the number of bytes read, or -1 if an error occurs
3220 (and set *TARGET_ERRNO). */
3221 int
3222 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3223 ULONGEST offset, int *target_errno)
3224 {
3225 struct target_ops *t;
3226
3227 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3228 {
3229 if (t->to_fileio_pread != NULL)
3230 {
3231 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3232 target_errno);
3233
3234 if (targetdebug)
3235 fprintf_unfiltered (gdb_stdlog,
3236 "target_fileio_pread (%d,...,%d,%s) "
3237 "= %d (%d)\n",
3238 fd, len, pulongest (offset),
3239 ret, ret != -1 ? 0 : *target_errno);
3240 return ret;
3241 }
3242 }
3243
3244 *target_errno = FILEIO_ENOSYS;
3245 return -1;
3246 }
3247
3248 /* Close FD on the target. Return 0, or -1 if an error occurs
3249 (and set *TARGET_ERRNO). */
3250 int
3251 target_fileio_close (int fd, int *target_errno)
3252 {
3253 struct target_ops *t;
3254
3255 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3256 {
3257 if (t->to_fileio_close != NULL)
3258 {
3259 int ret = t->to_fileio_close (t, fd, target_errno);
3260
3261 if (targetdebug)
3262 fprintf_unfiltered (gdb_stdlog,
3263 "target_fileio_close (%d) = %d (%d)\n",
3264 fd, ret, ret != -1 ? 0 : *target_errno);
3265 return ret;
3266 }
3267 }
3268
3269 *target_errno = FILEIO_ENOSYS;
3270 return -1;
3271 }
3272
3273 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3274 occurs (and set *TARGET_ERRNO). */
3275 int
3276 target_fileio_unlink (const char *filename, int *target_errno)
3277 {
3278 struct target_ops *t;
3279
3280 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3281 {
3282 if (t->to_fileio_unlink != NULL)
3283 {
3284 int ret = t->to_fileio_unlink (t, filename, target_errno);
3285
3286 if (targetdebug)
3287 fprintf_unfiltered (gdb_stdlog,
3288 "target_fileio_unlink (%s) = %d (%d)\n",
3289 filename, ret, ret != -1 ? 0 : *target_errno);
3290 return ret;
3291 }
3292 }
3293
3294 *target_errno = FILEIO_ENOSYS;
3295 return -1;
3296 }
3297
3298 /* Read value of symbolic link FILENAME on the target. Return a
3299 null-terminated string allocated via xmalloc, or NULL if an error
3300 occurs (and set *TARGET_ERRNO). */
3301 char *
3302 target_fileio_readlink (const char *filename, int *target_errno)
3303 {
3304 struct target_ops *t;
3305
3306 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3307 {
3308 if (t->to_fileio_readlink != NULL)
3309 {
3310 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3311
3312 if (targetdebug)
3313 fprintf_unfiltered (gdb_stdlog,
3314 "target_fileio_readlink (%s) = %s (%d)\n",
3315 filename, ret? ret : "(nil)",
3316 ret? 0 : *target_errno);
3317 return ret;
3318 }
3319 }
3320
3321 *target_errno = FILEIO_ENOSYS;
3322 return NULL;
3323 }
3324
3325 static void
3326 target_fileio_close_cleanup (void *opaque)
3327 {
3328 int fd = *(int *) opaque;
3329 int target_errno;
3330
3331 target_fileio_close (fd, &target_errno);
3332 }
3333
3334 /* Read target file FILENAME. Store the result in *BUF_P and
3335 return the size of the transferred data. PADDING additional bytes are
3336 available in *BUF_P. This is a helper function for
3337 target_fileio_read_alloc; see the declaration of that function for more
3338 information. */
3339
3340 static LONGEST
3341 target_fileio_read_alloc_1 (const char *filename,
3342 gdb_byte **buf_p, int padding)
3343 {
3344 struct cleanup *close_cleanup;
3345 size_t buf_alloc, buf_pos;
3346 gdb_byte *buf;
3347 LONGEST n;
3348 int fd;
3349 int target_errno;
3350
3351 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3352 if (fd == -1)
3353 return -1;
3354
3355 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3356
3357 /* Start by reading up to 4K at a time. The target will throttle
3358 this number down if necessary. */
3359 buf_alloc = 4096;
3360 buf = xmalloc (buf_alloc);
3361 buf_pos = 0;
3362 while (1)
3363 {
3364 n = target_fileio_pread (fd, &buf[buf_pos],
3365 buf_alloc - buf_pos - padding, buf_pos,
3366 &target_errno);
3367 if (n < 0)
3368 {
3369 /* An error occurred. */
3370 do_cleanups (close_cleanup);
3371 xfree (buf);
3372 return -1;
3373 }
3374 else if (n == 0)
3375 {
3376 /* Read all there was. */
3377 do_cleanups (close_cleanup);
3378 if (buf_pos == 0)
3379 xfree (buf);
3380 else
3381 *buf_p = buf;
3382 return buf_pos;
3383 }
3384
3385 buf_pos += n;
3386
3387 /* If the buffer is filling up, expand it. */
3388 if (buf_alloc < buf_pos * 2)
3389 {
3390 buf_alloc *= 2;
3391 buf = xrealloc (buf, buf_alloc);
3392 }
3393
3394 QUIT;
3395 }
3396 }
3397
3398 /* Read target file FILENAME. Store the result in *BUF_P and return
3399 the size of the transferred data. See the declaration in "target.h"
3400 function for more information about the return value. */
3401
3402 LONGEST
3403 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3404 {
3405 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3406 }
3407
3408 /* Read target file FILENAME. The result is NUL-terminated and
3409 returned as a string, allocated using xmalloc. If an error occurs
3410 or the transfer is unsupported, NULL is returned. Empty objects
3411 are returned as allocated but empty strings. A warning is issued
3412 if the result contains any embedded NUL bytes. */
3413
3414 char *
3415 target_fileio_read_stralloc (const char *filename)
3416 {
3417 gdb_byte *buffer;
3418 char *bufstr;
3419 LONGEST i, transferred;
3420
3421 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3422 bufstr = (char *) buffer;
3423
3424 if (transferred < 0)
3425 return NULL;
3426
3427 if (transferred == 0)
3428 return xstrdup ("");
3429
3430 bufstr[transferred] = 0;
3431
3432 /* Check for embedded NUL bytes; but allow trailing NULs. */
3433 for (i = strlen (bufstr); i < transferred; i++)
3434 if (bufstr[i] != 0)
3435 {
3436 warning (_("target file %s "
3437 "contained unexpected null characters"),
3438 filename);
3439 break;
3440 }
3441
3442 return bufstr;
3443 }
3444
3445
3446 static int
3447 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3448 CORE_ADDR addr, int len)
3449 {
3450 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3451 }
3452
3453 static int
3454 default_watchpoint_addr_within_range (struct target_ops *target,
3455 CORE_ADDR addr,
3456 CORE_ADDR start, int length)
3457 {
3458 return addr >= start && addr < start + length;
3459 }
3460
3461 static struct gdbarch *
3462 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3463 {
3464 return target_gdbarch ();
3465 }
3466
3467 static int
3468 return_zero (void)
3469 {
3470 return 0;
3471 }
3472
3473 /*
3474 * Find the next target down the stack from the specified target.
3475 */
3476
3477 struct target_ops *
3478 find_target_beneath (struct target_ops *t)
3479 {
3480 return t->beneath;
3481 }
3482
3483 /* See target.h. */
3484
3485 struct target_ops *
3486 find_target_at (enum strata stratum)
3487 {
3488 struct target_ops *t;
3489
3490 for (t = current_target.beneath; t != NULL; t = t->beneath)
3491 if (t->to_stratum == stratum)
3492 return t;
3493
3494 return NULL;
3495 }
3496
3497 \f
3498 /* The inferior process has died. Long live the inferior! */
3499
3500 void
3501 generic_mourn_inferior (void)
3502 {
3503 ptid_t ptid;
3504
3505 ptid = inferior_ptid;
3506 inferior_ptid = null_ptid;
3507
3508 /* Mark breakpoints uninserted in case something tries to delete a
3509 breakpoint while we delete the inferior's threads (which would
3510 fail, since the inferior is long gone). */
3511 mark_breakpoints_out ();
3512
3513 if (!ptid_equal (ptid, null_ptid))
3514 {
3515 int pid = ptid_get_pid (ptid);
3516 exit_inferior (pid);
3517 }
3518
3519 /* Note this wipes step-resume breakpoints, so needs to be done
3520 after exit_inferior, which ends up referencing the step-resume
3521 breakpoints through clear_thread_inferior_resources. */
3522 breakpoint_init_inferior (inf_exited);
3523
3524 registers_changed ();
3525
3526 reopen_exec_file ();
3527 reinit_frame_cache ();
3528
3529 if (deprecated_detach_hook)
3530 deprecated_detach_hook ();
3531 }
3532 \f
3533 /* Convert a normal process ID to a string. Returns the string in a
3534 static buffer. */
3535
3536 char *
3537 normal_pid_to_str (ptid_t ptid)
3538 {
3539 static char buf[32];
3540
3541 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3542 return buf;
3543 }
3544
3545 static char *
3546 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3547 {
3548 return normal_pid_to_str (ptid);
3549 }
3550
3551 /* Error-catcher for target_find_memory_regions. */
3552 static int
3553 dummy_find_memory_regions (struct target_ops *self,
3554 find_memory_region_ftype ignore1, void *ignore2)
3555 {
3556 error (_("Command not implemented for this target."));
3557 return 0;
3558 }
3559
3560 /* Error-catcher for target_make_corefile_notes. */
3561 static char *
3562 dummy_make_corefile_notes (struct target_ops *self,
3563 bfd *ignore1, int *ignore2)
3564 {
3565 error (_("Command not implemented for this target."));
3566 return NULL;
3567 }
3568
3569 /* Set up the handful of non-empty slots needed by the dummy target
3570 vector. */
3571
3572 static void
3573 init_dummy_target (void)
3574 {
3575 dummy_target.to_shortname = "None";
3576 dummy_target.to_longname = "None";
3577 dummy_target.to_doc = "";
3578 dummy_target.to_create_inferior = find_default_create_inferior;
3579 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3580 dummy_target.to_supports_disable_randomization
3581 = find_default_supports_disable_randomization;
3582 dummy_target.to_pid_to_str = dummy_pid_to_str;
3583 dummy_target.to_stratum = dummy_stratum;
3584 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3585 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3586 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3587 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3588 dummy_target.to_has_execution
3589 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3590 dummy_target.to_magic = OPS_MAGIC;
3591
3592 install_dummy_methods (&dummy_target);
3593 }
3594 \f
3595 static void
3596 debug_to_open (char *args, int from_tty)
3597 {
3598 debug_target.to_open (args, from_tty);
3599
3600 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3601 }
3602
3603 void
3604 target_close (struct target_ops *targ)
3605 {
3606 gdb_assert (!target_is_pushed (targ));
3607
3608 if (targ->to_xclose != NULL)
3609 targ->to_xclose (targ);
3610 else if (targ->to_close != NULL)
3611 targ->to_close (targ);
3612
3613 if (targetdebug)
3614 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3615 }
3616
3617 void
3618 target_attach (char *args, int from_tty)
3619 {
3620 current_target.to_attach (&current_target, args, from_tty);
3621 if (targetdebug)
3622 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3623 args, from_tty);
3624 }
3625
3626 int
3627 target_thread_alive (ptid_t ptid)
3628 {
3629 struct target_ops *t;
3630
3631 for (t = current_target.beneath; t != NULL; t = t->beneath)
3632 {
3633 if (t->to_thread_alive != NULL)
3634 {
3635 int retval;
3636
3637 retval = t->to_thread_alive (t, ptid);
3638 if (targetdebug)
3639 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3640 ptid_get_pid (ptid), retval);
3641
3642 return retval;
3643 }
3644 }
3645
3646 return 0;
3647 }
3648
3649 void
3650 target_find_new_threads (void)
3651 {
3652 struct target_ops *t;
3653
3654 for (t = current_target.beneath; t != NULL; t = t->beneath)
3655 {
3656 if (t->to_find_new_threads != NULL)
3657 {
3658 t->to_find_new_threads (t);
3659 if (targetdebug)
3660 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3661
3662 return;
3663 }
3664 }
3665 }
3666
3667 void
3668 target_stop (ptid_t ptid)
3669 {
3670 if (!may_stop)
3671 {
3672 warning (_("May not interrupt or stop the target, ignoring attempt"));
3673 return;
3674 }
3675
3676 (*current_target.to_stop) (&current_target, ptid);
3677 }
3678
3679 static void
3680 debug_to_post_attach (struct target_ops *self, int pid)
3681 {
3682 debug_target.to_post_attach (&debug_target, pid);
3683
3684 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3685 }
3686
3687 /* Concatenate ELEM to LIST, a comma separate list, and return the
3688 result. The LIST incoming argument is released. */
3689
3690 static char *
3691 str_comma_list_concat_elem (char *list, const char *elem)
3692 {
3693 if (list == NULL)
3694 return xstrdup (elem);
3695 else
3696 return reconcat (list, list, ", ", elem, (char *) NULL);
3697 }
3698
3699 /* Helper for target_options_to_string. If OPT is present in
3700 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3701 Returns the new resulting string. OPT is removed from
3702 TARGET_OPTIONS. */
3703
3704 static char *
3705 do_option (int *target_options, char *ret,
3706 int opt, char *opt_str)
3707 {
3708 if ((*target_options & opt) != 0)
3709 {
3710 ret = str_comma_list_concat_elem (ret, opt_str);
3711 *target_options &= ~opt;
3712 }
3713
3714 return ret;
3715 }
3716
3717 char *
3718 target_options_to_string (int target_options)
3719 {
3720 char *ret = NULL;
3721
3722 #define DO_TARG_OPTION(OPT) \
3723 ret = do_option (&target_options, ret, OPT, #OPT)
3724
3725 DO_TARG_OPTION (TARGET_WNOHANG);
3726
3727 if (target_options != 0)
3728 ret = str_comma_list_concat_elem (ret, "unknown???");
3729
3730 if (ret == NULL)
3731 ret = xstrdup ("");
3732 return ret;
3733 }
3734
3735 static void
3736 debug_print_register (const char * func,
3737 struct regcache *regcache, int regno)
3738 {
3739 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3740
3741 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3742 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3743 && gdbarch_register_name (gdbarch, regno) != NULL
3744 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3745 fprintf_unfiltered (gdb_stdlog, "(%s)",
3746 gdbarch_register_name (gdbarch, regno));
3747 else
3748 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3749 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3750 {
3751 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3752 int i, size = register_size (gdbarch, regno);
3753 gdb_byte buf[MAX_REGISTER_SIZE];
3754
3755 regcache_raw_collect (regcache, regno, buf);
3756 fprintf_unfiltered (gdb_stdlog, " = ");
3757 for (i = 0; i < size; i++)
3758 {
3759 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3760 }
3761 if (size <= sizeof (LONGEST))
3762 {
3763 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3764
3765 fprintf_unfiltered (gdb_stdlog, " %s %s",
3766 core_addr_to_string_nz (val), plongest (val));
3767 }
3768 }
3769 fprintf_unfiltered (gdb_stdlog, "\n");
3770 }
3771
3772 void
3773 target_fetch_registers (struct regcache *regcache, int regno)
3774 {
3775 struct target_ops *t;
3776
3777 for (t = current_target.beneath; t != NULL; t = t->beneath)
3778 {
3779 if (t->to_fetch_registers != NULL)
3780 {
3781 t->to_fetch_registers (t, regcache, regno);
3782 if (targetdebug)
3783 debug_print_register ("target_fetch_registers", regcache, regno);
3784 return;
3785 }
3786 }
3787 }
3788
3789 void
3790 target_store_registers (struct regcache *regcache, int regno)
3791 {
3792 struct target_ops *t;
3793
3794 if (!may_write_registers)
3795 error (_("Writing to registers is not allowed (regno %d)"), regno);
3796
3797 current_target.to_store_registers (&current_target, regcache, regno);
3798 if (targetdebug)
3799 {
3800 debug_print_register ("target_store_registers", regcache, regno);
3801 }
3802 }
3803
3804 int
3805 target_core_of_thread (ptid_t ptid)
3806 {
3807 struct target_ops *t;
3808
3809 for (t = current_target.beneath; t != NULL; t = t->beneath)
3810 {
3811 if (t->to_core_of_thread != NULL)
3812 {
3813 int retval = t->to_core_of_thread (t, ptid);
3814
3815 if (targetdebug)
3816 fprintf_unfiltered (gdb_stdlog,
3817 "target_core_of_thread (%d) = %d\n",
3818 ptid_get_pid (ptid), retval);
3819 return retval;
3820 }
3821 }
3822
3823 return -1;
3824 }
3825
3826 int
3827 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3828 {
3829 struct target_ops *t;
3830
3831 for (t = current_target.beneath; t != NULL; t = t->beneath)
3832 {
3833 if (t->to_verify_memory != NULL)
3834 {
3835 int retval = t->to_verify_memory (t, data, memaddr, size);
3836
3837 if (targetdebug)
3838 fprintf_unfiltered (gdb_stdlog,
3839 "target_verify_memory (%s, %s) = %d\n",
3840 paddress (target_gdbarch (), memaddr),
3841 pulongest (size),
3842 retval);
3843 return retval;
3844 }
3845 }
3846
3847 tcomplain ();
3848 }
3849
3850 /* The documentation for this function is in its prototype declaration in
3851 target.h. */
3852
3853 int
3854 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3855 {
3856 struct target_ops *t;
3857
3858 for (t = current_target.beneath; t != NULL; t = t->beneath)
3859 if (t->to_insert_mask_watchpoint != NULL)
3860 {
3861 int ret;
3862
3863 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
3864
3865 if (targetdebug)
3866 fprintf_unfiltered (gdb_stdlog, "\
3867 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3868 core_addr_to_string (addr),
3869 core_addr_to_string (mask), rw, ret);
3870
3871 return ret;
3872 }
3873
3874 return 1;
3875 }
3876
3877 /* The documentation for this function is in its prototype declaration in
3878 target.h. */
3879
3880 int
3881 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3882 {
3883 struct target_ops *t;
3884
3885 for (t = current_target.beneath; t != NULL; t = t->beneath)
3886 if (t->to_remove_mask_watchpoint != NULL)
3887 {
3888 int ret;
3889
3890 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
3891
3892 if (targetdebug)
3893 fprintf_unfiltered (gdb_stdlog, "\
3894 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3895 core_addr_to_string (addr),
3896 core_addr_to_string (mask), rw, ret);
3897
3898 return ret;
3899 }
3900
3901 return 1;
3902 }
3903
3904 /* The documentation for this function is in its prototype declaration
3905 in target.h. */
3906
3907 int
3908 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3909 {
3910 struct target_ops *t;
3911
3912 for (t = current_target.beneath; t != NULL; t = t->beneath)
3913 if (t->to_masked_watch_num_registers != NULL)
3914 return t->to_masked_watch_num_registers (t, addr, mask);
3915
3916 return -1;
3917 }
3918
3919 /* The documentation for this function is in its prototype declaration
3920 in target.h. */
3921
3922 int
3923 target_ranged_break_num_registers (void)
3924 {
3925 struct target_ops *t;
3926
3927 for (t = current_target.beneath; t != NULL; t = t->beneath)
3928 if (t->to_ranged_break_num_registers != NULL)
3929 return t->to_ranged_break_num_registers (t);
3930
3931 return -1;
3932 }
3933
3934 /* See target.h. */
3935
3936 struct btrace_target_info *
3937 target_enable_btrace (ptid_t ptid)
3938 {
3939 struct target_ops *t;
3940
3941 for (t = current_target.beneath; t != NULL; t = t->beneath)
3942 if (t->to_enable_btrace != NULL)
3943 return t->to_enable_btrace (t, ptid);
3944
3945 tcomplain ();
3946 return NULL;
3947 }
3948
3949 /* See target.h. */
3950
3951 void
3952 target_disable_btrace (struct btrace_target_info *btinfo)
3953 {
3954 struct target_ops *t;
3955
3956 for (t = current_target.beneath; t != NULL; t = t->beneath)
3957 if (t->to_disable_btrace != NULL)
3958 {
3959 t->to_disable_btrace (t, btinfo);
3960 return;
3961 }
3962
3963 tcomplain ();
3964 }
3965
3966 /* See target.h. */
3967
3968 void
3969 target_teardown_btrace (struct btrace_target_info *btinfo)
3970 {
3971 struct target_ops *t;
3972
3973 for (t = current_target.beneath; t != NULL; t = t->beneath)
3974 if (t->to_teardown_btrace != NULL)
3975 {
3976 t->to_teardown_btrace (t, btinfo);
3977 return;
3978 }
3979
3980 tcomplain ();
3981 }
3982
3983 /* See target.h. */
3984
3985 enum btrace_error
3986 target_read_btrace (VEC (btrace_block_s) **btrace,
3987 struct btrace_target_info *btinfo,
3988 enum btrace_read_type type)
3989 {
3990 struct target_ops *t;
3991
3992 for (t = current_target.beneath; t != NULL; t = t->beneath)
3993 if (t->to_read_btrace != NULL)
3994 return t->to_read_btrace (t, btrace, btinfo, type);
3995
3996 tcomplain ();
3997 return BTRACE_ERR_NOT_SUPPORTED;
3998 }
3999
4000 /* See target.h. */
4001
4002 void
4003 target_stop_recording (void)
4004 {
4005 struct target_ops *t;
4006
4007 for (t = current_target.beneath; t != NULL; t = t->beneath)
4008 if (t->to_stop_recording != NULL)
4009 {
4010 t->to_stop_recording (t);
4011 return;
4012 }
4013
4014 /* This is optional. */
4015 }
4016
4017 /* See target.h. */
4018
4019 void
4020 target_info_record (void)
4021 {
4022 struct target_ops *t;
4023
4024 for (t = current_target.beneath; t != NULL; t = t->beneath)
4025 if (t->to_info_record != NULL)
4026 {
4027 t->to_info_record (t);
4028 return;
4029 }
4030
4031 tcomplain ();
4032 }
4033
4034 /* See target.h. */
4035
4036 void
4037 target_save_record (const char *filename)
4038 {
4039 struct target_ops *t;
4040
4041 for (t = current_target.beneath; t != NULL; t = t->beneath)
4042 if (t->to_save_record != NULL)
4043 {
4044 t->to_save_record (t, filename);
4045 return;
4046 }
4047
4048 tcomplain ();
4049 }
4050
4051 /* See target.h. */
4052
4053 int
4054 target_supports_delete_record (void)
4055 {
4056 struct target_ops *t;
4057
4058 for (t = current_target.beneath; t != NULL; t = t->beneath)
4059 if (t->to_delete_record != NULL)
4060 return 1;
4061
4062 return 0;
4063 }
4064
4065 /* See target.h. */
4066
4067 void
4068 target_delete_record (void)
4069 {
4070 struct target_ops *t;
4071
4072 for (t = current_target.beneath; t != NULL; t = t->beneath)
4073 if (t->to_delete_record != NULL)
4074 {
4075 t->to_delete_record (t);
4076 return;
4077 }
4078
4079 tcomplain ();
4080 }
4081
4082 /* See target.h. */
4083
4084 int
4085 target_record_is_replaying (void)
4086 {
4087 struct target_ops *t;
4088
4089 for (t = current_target.beneath; t != NULL; t = t->beneath)
4090 if (t->to_record_is_replaying != NULL)
4091 return t->to_record_is_replaying (t);
4092
4093 return 0;
4094 }
4095
4096 /* See target.h. */
4097
4098 void
4099 target_goto_record_begin (void)
4100 {
4101 struct target_ops *t;
4102
4103 for (t = current_target.beneath; t != NULL; t = t->beneath)
4104 if (t->to_goto_record_begin != NULL)
4105 {
4106 t->to_goto_record_begin (t);
4107 return;
4108 }
4109
4110 tcomplain ();
4111 }
4112
4113 /* See target.h. */
4114
4115 void
4116 target_goto_record_end (void)
4117 {
4118 struct target_ops *t;
4119
4120 for (t = current_target.beneath; t != NULL; t = t->beneath)
4121 if (t->to_goto_record_end != NULL)
4122 {
4123 t->to_goto_record_end (t);
4124 return;
4125 }
4126
4127 tcomplain ();
4128 }
4129
4130 /* See target.h. */
4131
4132 void
4133 target_goto_record (ULONGEST insn)
4134 {
4135 struct target_ops *t;
4136
4137 for (t = current_target.beneath; t != NULL; t = t->beneath)
4138 if (t->to_goto_record != NULL)
4139 {
4140 t->to_goto_record (t, insn);
4141 return;
4142 }
4143
4144 tcomplain ();
4145 }
4146
4147 /* See target.h. */
4148
4149 void
4150 target_insn_history (int size, int flags)
4151 {
4152 struct target_ops *t;
4153
4154 for (t = current_target.beneath; t != NULL; t = t->beneath)
4155 if (t->to_insn_history != NULL)
4156 {
4157 t->to_insn_history (t, size, flags);
4158 return;
4159 }
4160
4161 tcomplain ();
4162 }
4163
4164 /* See target.h. */
4165
4166 void
4167 target_insn_history_from (ULONGEST from, int size, int flags)
4168 {
4169 struct target_ops *t;
4170
4171 for (t = current_target.beneath; t != NULL; t = t->beneath)
4172 if (t->to_insn_history_from != NULL)
4173 {
4174 t->to_insn_history_from (t, from, size, flags);
4175 return;
4176 }
4177
4178 tcomplain ();
4179 }
4180
4181 /* See target.h. */
4182
4183 void
4184 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4185 {
4186 struct target_ops *t;
4187
4188 for (t = current_target.beneath; t != NULL; t = t->beneath)
4189 if (t->to_insn_history_range != NULL)
4190 {
4191 t->to_insn_history_range (t, begin, end, flags);
4192 return;
4193 }
4194
4195 tcomplain ();
4196 }
4197
4198 /* See target.h. */
4199
4200 void
4201 target_call_history (int size, int flags)
4202 {
4203 struct target_ops *t;
4204
4205 for (t = current_target.beneath; t != NULL; t = t->beneath)
4206 if (t->to_call_history != NULL)
4207 {
4208 t->to_call_history (t, size, flags);
4209 return;
4210 }
4211
4212 tcomplain ();
4213 }
4214
4215 /* See target.h. */
4216
4217 void
4218 target_call_history_from (ULONGEST begin, int size, int flags)
4219 {
4220 struct target_ops *t;
4221
4222 for (t = current_target.beneath; t != NULL; t = t->beneath)
4223 if (t->to_call_history_from != NULL)
4224 {
4225 t->to_call_history_from (t, begin, size, flags);
4226 return;
4227 }
4228
4229 tcomplain ();
4230 }
4231
4232 /* See target.h. */
4233
4234 void
4235 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4236 {
4237 struct target_ops *t;
4238
4239 for (t = current_target.beneath; t != NULL; t = t->beneath)
4240 if (t->to_call_history_range != NULL)
4241 {
4242 t->to_call_history_range (t, begin, end, flags);
4243 return;
4244 }
4245
4246 tcomplain ();
4247 }
4248
4249 static void
4250 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4251 {
4252 debug_target.to_prepare_to_store (&debug_target, regcache);
4253
4254 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4255 }
4256
4257 /* See target.h. */
4258
4259 const struct frame_unwind *
4260 target_get_unwinder (void)
4261 {
4262 struct target_ops *t;
4263
4264 for (t = current_target.beneath; t != NULL; t = t->beneath)
4265 if (t->to_get_unwinder != NULL)
4266 return t->to_get_unwinder;
4267
4268 return NULL;
4269 }
4270
4271 /* See target.h. */
4272
4273 const struct frame_unwind *
4274 target_get_tailcall_unwinder (void)
4275 {
4276 struct target_ops *t;
4277
4278 for (t = current_target.beneath; t != NULL; t = t->beneath)
4279 if (t->to_get_tailcall_unwinder != NULL)
4280 return t->to_get_tailcall_unwinder;
4281
4282 return NULL;
4283 }
4284
4285 /* See target.h. */
4286
4287 CORE_ADDR
4288 forward_target_decr_pc_after_break (struct target_ops *ops,
4289 struct gdbarch *gdbarch)
4290 {
4291 for (; ops != NULL; ops = ops->beneath)
4292 if (ops->to_decr_pc_after_break != NULL)
4293 return ops->to_decr_pc_after_break (ops, gdbarch);
4294
4295 return gdbarch_decr_pc_after_break (gdbarch);
4296 }
4297
4298 /* See target.h. */
4299
4300 CORE_ADDR
4301 target_decr_pc_after_break (struct gdbarch *gdbarch)
4302 {
4303 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4304 }
4305
4306 static int
4307 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4308 int write, struct mem_attrib *attrib,
4309 struct target_ops *target)
4310 {
4311 int retval;
4312
4313 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4314 attrib, target);
4315
4316 fprintf_unfiltered (gdb_stdlog,
4317 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4318 paddress (target_gdbarch (), memaddr), len,
4319 write ? "write" : "read", retval);
4320
4321 if (retval > 0)
4322 {
4323 int i;
4324
4325 fputs_unfiltered (", bytes =", gdb_stdlog);
4326 for (i = 0; i < retval; i++)
4327 {
4328 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4329 {
4330 if (targetdebug < 2 && i > 0)
4331 {
4332 fprintf_unfiltered (gdb_stdlog, " ...");
4333 break;
4334 }
4335 fprintf_unfiltered (gdb_stdlog, "\n");
4336 }
4337
4338 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4339 }
4340 }
4341
4342 fputc_unfiltered ('\n', gdb_stdlog);
4343
4344 return retval;
4345 }
4346
4347 static void
4348 debug_to_files_info (struct target_ops *target)
4349 {
4350 debug_target.to_files_info (target);
4351
4352 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4353 }
4354
4355 static int
4356 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4357 struct bp_target_info *bp_tgt)
4358 {
4359 int retval;
4360
4361 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4362
4363 fprintf_unfiltered (gdb_stdlog,
4364 "target_insert_breakpoint (%s, xxx) = %ld\n",
4365 core_addr_to_string (bp_tgt->placed_address),
4366 (unsigned long) retval);
4367 return retval;
4368 }
4369
4370 static int
4371 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4372 struct bp_target_info *bp_tgt)
4373 {
4374 int retval;
4375
4376 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4377
4378 fprintf_unfiltered (gdb_stdlog,
4379 "target_remove_breakpoint (%s, xxx) = %ld\n",
4380 core_addr_to_string (bp_tgt->placed_address),
4381 (unsigned long) retval);
4382 return retval;
4383 }
4384
4385 static int
4386 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4387 int type, int cnt, int from_tty)
4388 {
4389 int retval;
4390
4391 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4392 type, cnt, from_tty);
4393
4394 fprintf_unfiltered (gdb_stdlog,
4395 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4396 (unsigned long) type,
4397 (unsigned long) cnt,
4398 (unsigned long) from_tty,
4399 (unsigned long) retval);
4400 return retval;
4401 }
4402
4403 static int
4404 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4405 CORE_ADDR addr, int len)
4406 {
4407 CORE_ADDR retval;
4408
4409 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4410 addr, len);
4411
4412 fprintf_unfiltered (gdb_stdlog,
4413 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4414 core_addr_to_string (addr), (unsigned long) len,
4415 core_addr_to_string (retval));
4416 return retval;
4417 }
4418
4419 static int
4420 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4421 CORE_ADDR addr, int len, int rw,
4422 struct expression *cond)
4423 {
4424 int retval;
4425
4426 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4427 addr, len,
4428 rw, cond);
4429
4430 fprintf_unfiltered (gdb_stdlog,
4431 "target_can_accel_watchpoint_condition "
4432 "(%s, %d, %d, %s) = %ld\n",
4433 core_addr_to_string (addr), len, rw,
4434 host_address_to_string (cond), (unsigned long) retval);
4435 return retval;
4436 }
4437
4438 static int
4439 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4440 {
4441 int retval;
4442
4443 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4444
4445 fprintf_unfiltered (gdb_stdlog,
4446 "target_stopped_by_watchpoint () = %ld\n",
4447 (unsigned long) retval);
4448 return retval;
4449 }
4450
4451 static int
4452 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4453 {
4454 int retval;
4455
4456 retval = debug_target.to_stopped_data_address (target, addr);
4457
4458 fprintf_unfiltered (gdb_stdlog,
4459 "target_stopped_data_address ([%s]) = %ld\n",
4460 core_addr_to_string (*addr),
4461 (unsigned long)retval);
4462 return retval;
4463 }
4464
4465 static int
4466 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4467 CORE_ADDR addr,
4468 CORE_ADDR start, int length)
4469 {
4470 int retval;
4471
4472 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4473 start, length);
4474
4475 fprintf_filtered (gdb_stdlog,
4476 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4477 core_addr_to_string (addr), core_addr_to_string (start),
4478 length, retval);
4479 return retval;
4480 }
4481
4482 static int
4483 debug_to_insert_hw_breakpoint (struct target_ops *self,
4484 struct gdbarch *gdbarch,
4485 struct bp_target_info *bp_tgt)
4486 {
4487 int retval;
4488
4489 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4490 gdbarch, bp_tgt);
4491
4492 fprintf_unfiltered (gdb_stdlog,
4493 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4494 core_addr_to_string (bp_tgt->placed_address),
4495 (unsigned long) retval);
4496 return retval;
4497 }
4498
4499 static int
4500 debug_to_remove_hw_breakpoint (struct target_ops *self,
4501 struct gdbarch *gdbarch,
4502 struct bp_target_info *bp_tgt)
4503 {
4504 int retval;
4505
4506 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4507 gdbarch, bp_tgt);
4508
4509 fprintf_unfiltered (gdb_stdlog,
4510 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4511 core_addr_to_string (bp_tgt->placed_address),
4512 (unsigned long) retval);
4513 return retval;
4514 }
4515
4516 static int
4517 debug_to_insert_watchpoint (struct target_ops *self,
4518 CORE_ADDR addr, int len, int type,
4519 struct expression *cond)
4520 {
4521 int retval;
4522
4523 retval = debug_target.to_insert_watchpoint (&debug_target,
4524 addr, len, type, cond);
4525
4526 fprintf_unfiltered (gdb_stdlog,
4527 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4528 core_addr_to_string (addr), len, type,
4529 host_address_to_string (cond), (unsigned long) retval);
4530 return retval;
4531 }
4532
4533 static int
4534 debug_to_remove_watchpoint (struct target_ops *self,
4535 CORE_ADDR addr, int len, int type,
4536 struct expression *cond)
4537 {
4538 int retval;
4539
4540 retval = debug_target.to_remove_watchpoint (&debug_target,
4541 addr, len, type, cond);
4542
4543 fprintf_unfiltered (gdb_stdlog,
4544 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4545 core_addr_to_string (addr), len, type,
4546 host_address_to_string (cond), (unsigned long) retval);
4547 return retval;
4548 }
4549
4550 static void
4551 debug_to_terminal_init (struct target_ops *self)
4552 {
4553 debug_target.to_terminal_init (&debug_target);
4554
4555 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4556 }
4557
4558 static void
4559 debug_to_terminal_inferior (struct target_ops *self)
4560 {
4561 debug_target.to_terminal_inferior (&debug_target);
4562
4563 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4564 }
4565
4566 static void
4567 debug_to_terminal_ours_for_output (struct target_ops *self)
4568 {
4569 debug_target.to_terminal_ours_for_output (&debug_target);
4570
4571 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4572 }
4573
4574 static void
4575 debug_to_terminal_ours (struct target_ops *self)
4576 {
4577 debug_target.to_terminal_ours (&debug_target);
4578
4579 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4580 }
4581
4582 static void
4583 debug_to_terminal_save_ours (struct target_ops *self)
4584 {
4585 debug_target.to_terminal_save_ours (&debug_target);
4586
4587 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4588 }
4589
4590 static void
4591 debug_to_terminal_info (struct target_ops *self,
4592 const char *arg, int from_tty)
4593 {
4594 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4595
4596 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4597 from_tty);
4598 }
4599
4600 static void
4601 debug_to_load (struct target_ops *self, char *args, int from_tty)
4602 {
4603 debug_target.to_load (&debug_target, args, from_tty);
4604
4605 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4606 }
4607
4608 static void
4609 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4610 {
4611 debug_target.to_post_startup_inferior (&debug_target, ptid);
4612
4613 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4614 ptid_get_pid (ptid));
4615 }
4616
4617 static int
4618 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4619 {
4620 int retval;
4621
4622 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4623
4624 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4625 pid, retval);
4626
4627 return retval;
4628 }
4629
4630 static int
4631 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4632 {
4633 int retval;
4634
4635 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4636
4637 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4638 pid, retval);
4639
4640 return retval;
4641 }
4642
4643 static int
4644 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4645 {
4646 int retval;
4647
4648 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4649
4650 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4651 pid, retval);
4652
4653 return retval;
4654 }
4655
4656 static int
4657 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4658 {
4659 int retval;
4660
4661 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4662
4663 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4664 pid, retval);
4665
4666 return retval;
4667 }
4668
4669 static int
4670 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4671 {
4672 int retval;
4673
4674 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4675
4676 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4677 pid, retval);
4678
4679 return retval;
4680 }
4681
4682 static int
4683 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4684 {
4685 int retval;
4686
4687 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4688
4689 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4690 pid, retval);
4691
4692 return retval;
4693 }
4694
4695 static int
4696 debug_to_has_exited (struct target_ops *self,
4697 int pid, int wait_status, int *exit_status)
4698 {
4699 int has_exited;
4700
4701 has_exited = debug_target.to_has_exited (&debug_target,
4702 pid, wait_status, exit_status);
4703
4704 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4705 pid, wait_status, *exit_status, has_exited);
4706
4707 return has_exited;
4708 }
4709
4710 static int
4711 debug_to_can_run (struct target_ops *self)
4712 {
4713 int retval;
4714
4715 retval = debug_target.to_can_run (&debug_target);
4716
4717 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4718
4719 return retval;
4720 }
4721
4722 static struct gdbarch *
4723 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4724 {
4725 struct gdbarch *retval;
4726
4727 retval = debug_target.to_thread_architecture (ops, ptid);
4728
4729 fprintf_unfiltered (gdb_stdlog,
4730 "target_thread_architecture (%s) = %s [%s]\n",
4731 target_pid_to_str (ptid),
4732 host_address_to_string (retval),
4733 gdbarch_bfd_arch_info (retval)->printable_name);
4734 return retval;
4735 }
4736
4737 static void
4738 debug_to_stop (struct target_ops *self, ptid_t ptid)
4739 {
4740 debug_target.to_stop (&debug_target, ptid);
4741
4742 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4743 target_pid_to_str (ptid));
4744 }
4745
4746 static void
4747 debug_to_rcmd (struct target_ops *self, char *command,
4748 struct ui_file *outbuf)
4749 {
4750 debug_target.to_rcmd (&debug_target, command, outbuf);
4751 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4752 }
4753
4754 static char *
4755 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4756 {
4757 char *exec_file;
4758
4759 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4760
4761 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4762 pid, exec_file);
4763
4764 return exec_file;
4765 }
4766
4767 static void
4768 setup_target_debug (void)
4769 {
4770 memcpy (&debug_target, &current_target, sizeof debug_target);
4771
4772 current_target.to_open = debug_to_open;
4773 current_target.to_post_attach = debug_to_post_attach;
4774 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4775 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4776 current_target.to_files_info = debug_to_files_info;
4777 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4778 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4779 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4780 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4781 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4782 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4783 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4784 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4785 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4786 current_target.to_watchpoint_addr_within_range
4787 = debug_to_watchpoint_addr_within_range;
4788 current_target.to_region_ok_for_hw_watchpoint
4789 = debug_to_region_ok_for_hw_watchpoint;
4790 current_target.to_can_accel_watchpoint_condition
4791 = debug_to_can_accel_watchpoint_condition;
4792 current_target.to_terminal_init = debug_to_terminal_init;
4793 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4794 current_target.to_terminal_ours_for_output
4795 = debug_to_terminal_ours_for_output;
4796 current_target.to_terminal_ours = debug_to_terminal_ours;
4797 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4798 current_target.to_terminal_info = debug_to_terminal_info;
4799 current_target.to_load = debug_to_load;
4800 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4801 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4802 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4803 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4804 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4805 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4806 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4807 current_target.to_has_exited = debug_to_has_exited;
4808 current_target.to_can_run = debug_to_can_run;
4809 current_target.to_stop = debug_to_stop;
4810 current_target.to_rcmd = debug_to_rcmd;
4811 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4812 current_target.to_thread_architecture = debug_to_thread_architecture;
4813 }
4814 \f
4815
4816 static char targ_desc[] =
4817 "Names of targets and files being debugged.\nShows the entire \
4818 stack of targets currently in use (including the exec-file,\n\
4819 core-file, and process, if any), as well as the symbol file name.";
4820
4821 static void
4822 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
4823 {
4824 error (_("\"monitor\" command not supported by this target."));
4825 }
4826
4827 static void
4828 do_monitor_command (char *cmd,
4829 int from_tty)
4830 {
4831 target_rcmd (cmd, gdb_stdtarg);
4832 }
4833
4834 /* Print the name of each layers of our target stack. */
4835
4836 static void
4837 maintenance_print_target_stack (char *cmd, int from_tty)
4838 {
4839 struct target_ops *t;
4840
4841 printf_filtered (_("The current target stack is:\n"));
4842
4843 for (t = target_stack; t != NULL; t = t->beneath)
4844 {
4845 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4846 }
4847 }
4848
4849 /* Controls if async mode is permitted. */
4850 int target_async_permitted = 0;
4851
4852 /* The set command writes to this variable. If the inferior is
4853 executing, target_async_permitted is *not* updated. */
4854 static int target_async_permitted_1 = 0;
4855
4856 static void
4857 set_target_async_command (char *args, int from_tty,
4858 struct cmd_list_element *c)
4859 {
4860 if (have_live_inferiors ())
4861 {
4862 target_async_permitted_1 = target_async_permitted;
4863 error (_("Cannot change this setting while the inferior is running."));
4864 }
4865
4866 target_async_permitted = target_async_permitted_1;
4867 }
4868
4869 static void
4870 show_target_async_command (struct ui_file *file, int from_tty,
4871 struct cmd_list_element *c,
4872 const char *value)
4873 {
4874 fprintf_filtered (file,
4875 _("Controlling the inferior in "
4876 "asynchronous mode is %s.\n"), value);
4877 }
4878
4879 /* Temporary copies of permission settings. */
4880
4881 static int may_write_registers_1 = 1;
4882 static int may_write_memory_1 = 1;
4883 static int may_insert_breakpoints_1 = 1;
4884 static int may_insert_tracepoints_1 = 1;
4885 static int may_insert_fast_tracepoints_1 = 1;
4886 static int may_stop_1 = 1;
4887
4888 /* Make the user-set values match the real values again. */
4889
4890 void
4891 update_target_permissions (void)
4892 {
4893 may_write_registers_1 = may_write_registers;
4894 may_write_memory_1 = may_write_memory;
4895 may_insert_breakpoints_1 = may_insert_breakpoints;
4896 may_insert_tracepoints_1 = may_insert_tracepoints;
4897 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4898 may_stop_1 = may_stop;
4899 }
4900
4901 /* The one function handles (most of) the permission flags in the same
4902 way. */
4903
4904 static void
4905 set_target_permissions (char *args, int from_tty,
4906 struct cmd_list_element *c)
4907 {
4908 if (target_has_execution)
4909 {
4910 update_target_permissions ();
4911 error (_("Cannot change this setting while the inferior is running."));
4912 }
4913
4914 /* Make the real values match the user-changed values. */
4915 may_write_registers = may_write_registers_1;
4916 may_insert_breakpoints = may_insert_breakpoints_1;
4917 may_insert_tracepoints = may_insert_tracepoints_1;
4918 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4919 may_stop = may_stop_1;
4920 update_observer_mode ();
4921 }
4922
4923 /* Set memory write permission independently of observer mode. */
4924
4925 static void
4926 set_write_memory_permission (char *args, int from_tty,
4927 struct cmd_list_element *c)
4928 {
4929 /* Make the real values match the user-changed values. */
4930 may_write_memory = may_write_memory_1;
4931 update_observer_mode ();
4932 }
4933
4934
4935 void
4936 initialize_targets (void)
4937 {
4938 init_dummy_target ();
4939 push_target (&dummy_target);
4940
4941 add_info ("target", target_info, targ_desc);
4942 add_info ("files", target_info, targ_desc);
4943
4944 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4945 Set target debugging."), _("\
4946 Show target debugging."), _("\
4947 When non-zero, target debugging is enabled. Higher numbers are more\n\
4948 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4949 command."),
4950 NULL,
4951 show_targetdebug,
4952 &setdebuglist, &showdebuglist);
4953
4954 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4955 &trust_readonly, _("\
4956 Set mode for reading from readonly sections."), _("\
4957 Show mode for reading from readonly sections."), _("\
4958 When this mode is on, memory reads from readonly sections (such as .text)\n\
4959 will be read from the object file instead of from the target. This will\n\
4960 result in significant performance improvement for remote targets."),
4961 NULL,
4962 show_trust_readonly,
4963 &setlist, &showlist);
4964
4965 add_com ("monitor", class_obscure, do_monitor_command,
4966 _("Send a command to the remote monitor (remote targets only)."));
4967
4968 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4969 _("Print the name of each layer of the internal target stack."),
4970 &maintenanceprintlist);
4971
4972 add_setshow_boolean_cmd ("target-async", no_class,
4973 &target_async_permitted_1, _("\
4974 Set whether gdb controls the inferior in asynchronous mode."), _("\
4975 Show whether gdb controls the inferior in asynchronous mode."), _("\
4976 Tells gdb whether to control the inferior in asynchronous mode."),
4977 set_target_async_command,
4978 show_target_async_command,
4979 &setlist,
4980 &showlist);
4981
4982 add_setshow_boolean_cmd ("may-write-registers", class_support,
4983 &may_write_registers_1, _("\
4984 Set permission to write into registers."), _("\
4985 Show permission to write into registers."), _("\
4986 When this permission is on, GDB may write into the target's registers.\n\
4987 Otherwise, any sort of write attempt will result in an error."),
4988 set_target_permissions, NULL,
4989 &setlist, &showlist);
4990
4991 add_setshow_boolean_cmd ("may-write-memory", class_support,
4992 &may_write_memory_1, _("\
4993 Set permission to write into target memory."), _("\
4994 Show permission to write into target memory."), _("\
4995 When this permission is on, GDB may write into the target's memory.\n\
4996 Otherwise, any sort of write attempt will result in an error."),
4997 set_write_memory_permission, NULL,
4998 &setlist, &showlist);
4999
5000 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5001 &may_insert_breakpoints_1, _("\
5002 Set permission to insert breakpoints in the target."), _("\
5003 Show permission to insert breakpoints in the target."), _("\
5004 When this permission is on, GDB may insert breakpoints in the program.\n\
5005 Otherwise, any sort of insertion attempt will result in an error."),
5006 set_target_permissions, NULL,
5007 &setlist, &showlist);
5008
5009 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5010 &may_insert_tracepoints_1, _("\
5011 Set permission to insert tracepoints in the target."), _("\
5012 Show permission to insert tracepoints in the target."), _("\
5013 When this permission is on, GDB may insert tracepoints in the program.\n\
5014 Otherwise, any sort of insertion attempt will result in an error."),
5015 set_target_permissions, NULL,
5016 &setlist, &showlist);
5017
5018 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5019 &may_insert_fast_tracepoints_1, _("\
5020 Set permission to insert fast tracepoints in the target."), _("\
5021 Show permission to insert fast tracepoints in the target."), _("\
5022 When this permission is on, GDB may insert fast tracepoints.\n\
5023 Otherwise, any sort of insertion attempt will result in an error."),
5024 set_target_permissions, NULL,
5025 &setlist, &showlist);
5026
5027 add_setshow_boolean_cmd ("may-interrupt", class_support,
5028 &may_stop_1, _("\
5029 Set permission to interrupt or signal the target."), _("\
5030 Show permission to interrupt or signal the target."), _("\
5031 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5032 Otherwise, any attempt to interrupt or stop will be ignored."),
5033 set_target_permissions, NULL,
5034 &setlist, &showlist);
5035 }