convert to_download_trace_state_variable
[binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (struct target_ops *, const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
59
60 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
61 long lwp, long tid);
62
63 static void tcomplain (void) ATTRIBUTE_NORETURN;
64
65 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
66
67 static int return_zero (void);
68
69 static int return_minus_one (void);
70
71 static void *return_null (void);
72
73 void target_ignore (void);
74
75 static void target_command (char *, int);
76
77 static struct target_ops *find_default_run_target (char *);
78
79 static target_xfer_partial_ftype default_xfer_partial;
80
81 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
82 ptid_t ptid);
83
84 static int dummy_find_memory_regions (struct target_ops *self,
85 find_memory_region_ftype ignore1,
86 void *ignore2);
87
88 static char *dummy_make_corefile_notes (struct target_ops *self,
89 bfd *ignore1, int *ignore2);
90
91 static int find_default_can_async_p (struct target_ops *ignore);
92
93 static int find_default_is_async_p (struct target_ops *ignore);
94
95 static enum exec_direction_kind default_execution_direction
96 (struct target_ops *self);
97
98 #include "target-delegates.c"
99
100 static void init_dummy_target (void);
101
102 static struct target_ops debug_target;
103
104 static void debug_to_open (char *, int);
105
106 static void debug_to_prepare_to_store (struct target_ops *self,
107 struct regcache *);
108
109 static void debug_to_files_info (struct target_ops *);
110
111 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
112 struct bp_target_info *);
113
114 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
115 struct bp_target_info *);
116
117 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
118 int, int, int);
119
120 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
121 struct gdbarch *,
122 struct bp_target_info *);
123
124 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
125 struct gdbarch *,
126 struct bp_target_info *);
127
128 static int debug_to_insert_watchpoint (struct target_ops *self,
129 CORE_ADDR, int, int,
130 struct expression *);
131
132 static int debug_to_remove_watchpoint (struct target_ops *self,
133 CORE_ADDR, int, int,
134 struct expression *);
135
136 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
137
138 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
139 CORE_ADDR, CORE_ADDR, int);
140
141 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
142 CORE_ADDR, int);
143
144 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
145 CORE_ADDR, int, int,
146 struct expression *);
147
148 static void debug_to_terminal_init (struct target_ops *self);
149
150 static void debug_to_terminal_inferior (struct target_ops *self);
151
152 static void debug_to_terminal_ours_for_output (struct target_ops *self);
153
154 static void debug_to_terminal_save_ours (struct target_ops *self);
155
156 static void debug_to_terminal_ours (struct target_ops *self);
157
158 static void debug_to_load (struct target_ops *self, char *, int);
159
160 static int debug_to_can_run (struct target_ops *self);
161
162 static void debug_to_stop (struct target_ops *self, ptid_t);
163
164 /* Pointer to array of target architecture structures; the size of the
165 array; the current index into the array; the allocated size of the
166 array. */
167 struct target_ops **target_structs;
168 unsigned target_struct_size;
169 unsigned target_struct_allocsize;
170 #define DEFAULT_ALLOCSIZE 10
171
172 /* The initial current target, so that there is always a semi-valid
173 current target. */
174
175 static struct target_ops dummy_target;
176
177 /* Top of target stack. */
178
179 static struct target_ops *target_stack;
180
181 /* The target structure we are currently using to talk to a process
182 or file or whatever "inferior" we have. */
183
184 struct target_ops current_target;
185
186 /* Command list for target. */
187
188 static struct cmd_list_element *targetlist = NULL;
189
190 /* Nonzero if we should trust readonly sections from the
191 executable when reading memory. */
192
193 static int trust_readonly = 0;
194
195 /* Nonzero if we should show true memory content including
196 memory breakpoint inserted by gdb. */
197
198 static int show_memory_breakpoints = 0;
199
200 /* These globals control whether GDB attempts to perform these
201 operations; they are useful for targets that need to prevent
202 inadvertant disruption, such as in non-stop mode. */
203
204 int may_write_registers = 1;
205
206 int may_write_memory = 1;
207
208 int may_insert_breakpoints = 1;
209
210 int may_insert_tracepoints = 1;
211
212 int may_insert_fast_tracepoints = 1;
213
214 int may_stop = 1;
215
216 /* Non-zero if we want to see trace of target level stuff. */
217
218 static unsigned int targetdebug = 0;
219 static void
220 show_targetdebug (struct ui_file *file, int from_tty,
221 struct cmd_list_element *c, const char *value)
222 {
223 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
224 }
225
226 static void setup_target_debug (void);
227
228 /* The user just typed 'target' without the name of a target. */
229
230 static void
231 target_command (char *arg, int from_tty)
232 {
233 fputs_filtered ("Argument required (target name). Try `help target'\n",
234 gdb_stdout);
235 }
236
237 /* Default target_has_* methods for process_stratum targets. */
238
239 int
240 default_child_has_all_memory (struct target_ops *ops)
241 {
242 /* If no inferior selected, then we can't read memory here. */
243 if (ptid_equal (inferior_ptid, null_ptid))
244 return 0;
245
246 return 1;
247 }
248
249 int
250 default_child_has_memory (struct target_ops *ops)
251 {
252 /* If no inferior selected, then we can't read memory here. */
253 if (ptid_equal (inferior_ptid, null_ptid))
254 return 0;
255
256 return 1;
257 }
258
259 int
260 default_child_has_stack (struct target_ops *ops)
261 {
262 /* If no inferior selected, there's no stack. */
263 if (ptid_equal (inferior_ptid, null_ptid))
264 return 0;
265
266 return 1;
267 }
268
269 int
270 default_child_has_registers (struct target_ops *ops)
271 {
272 /* Can't read registers from no inferior. */
273 if (ptid_equal (inferior_ptid, null_ptid))
274 return 0;
275
276 return 1;
277 }
278
279 int
280 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
281 {
282 /* If there's no thread selected, then we can't make it run through
283 hoops. */
284 if (ptid_equal (the_ptid, null_ptid))
285 return 0;
286
287 return 1;
288 }
289
290
291 int
292 target_has_all_memory_1 (void)
293 {
294 struct target_ops *t;
295
296 for (t = current_target.beneath; t != NULL; t = t->beneath)
297 if (t->to_has_all_memory (t))
298 return 1;
299
300 return 0;
301 }
302
303 int
304 target_has_memory_1 (void)
305 {
306 struct target_ops *t;
307
308 for (t = current_target.beneath; t != NULL; t = t->beneath)
309 if (t->to_has_memory (t))
310 return 1;
311
312 return 0;
313 }
314
315 int
316 target_has_stack_1 (void)
317 {
318 struct target_ops *t;
319
320 for (t = current_target.beneath; t != NULL; t = t->beneath)
321 if (t->to_has_stack (t))
322 return 1;
323
324 return 0;
325 }
326
327 int
328 target_has_registers_1 (void)
329 {
330 struct target_ops *t;
331
332 for (t = current_target.beneath; t != NULL; t = t->beneath)
333 if (t->to_has_registers (t))
334 return 1;
335
336 return 0;
337 }
338
339 int
340 target_has_execution_1 (ptid_t the_ptid)
341 {
342 struct target_ops *t;
343
344 for (t = current_target.beneath; t != NULL; t = t->beneath)
345 if (t->to_has_execution (t, the_ptid))
346 return 1;
347
348 return 0;
349 }
350
351 int
352 target_has_execution_current (void)
353 {
354 return target_has_execution_1 (inferior_ptid);
355 }
356
357 /* Complete initialization of T. This ensures that various fields in
358 T are set, if needed by the target implementation. */
359
360 void
361 complete_target_initialization (struct target_ops *t)
362 {
363 /* Provide default values for all "must have" methods. */
364 if (t->to_xfer_partial == NULL)
365 t->to_xfer_partial = default_xfer_partial;
366
367 if (t->to_has_all_memory == NULL)
368 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
369
370 if (t->to_has_memory == NULL)
371 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
372
373 if (t->to_has_stack == NULL)
374 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
375
376 if (t->to_has_registers == NULL)
377 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
378
379 if (t->to_has_execution == NULL)
380 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
381
382 install_delegators (t);
383 }
384
385 /* Add possible target architecture T to the list and add a new
386 command 'target T->to_shortname'. Set COMPLETER as the command's
387 completer if not NULL. */
388
389 void
390 add_target_with_completer (struct target_ops *t,
391 completer_ftype *completer)
392 {
393 struct cmd_list_element *c;
394
395 complete_target_initialization (t);
396
397 if (!target_structs)
398 {
399 target_struct_allocsize = DEFAULT_ALLOCSIZE;
400 target_structs = (struct target_ops **) xmalloc
401 (target_struct_allocsize * sizeof (*target_structs));
402 }
403 if (target_struct_size >= target_struct_allocsize)
404 {
405 target_struct_allocsize *= 2;
406 target_structs = (struct target_ops **)
407 xrealloc ((char *) target_structs,
408 target_struct_allocsize * sizeof (*target_structs));
409 }
410 target_structs[target_struct_size++] = t;
411
412 if (targetlist == NULL)
413 add_prefix_cmd ("target", class_run, target_command, _("\
414 Connect to a target machine or process.\n\
415 The first argument is the type or protocol of the target machine.\n\
416 Remaining arguments are interpreted by the target protocol. For more\n\
417 information on the arguments for a particular protocol, type\n\
418 `help target ' followed by the protocol name."),
419 &targetlist, "target ", 0, &cmdlist);
420 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
421 &targetlist);
422 if (completer != NULL)
423 set_cmd_completer (c, completer);
424 }
425
426 /* Add a possible target architecture to the list. */
427
428 void
429 add_target (struct target_ops *t)
430 {
431 add_target_with_completer (t, NULL);
432 }
433
434 /* See target.h. */
435
436 void
437 add_deprecated_target_alias (struct target_ops *t, char *alias)
438 {
439 struct cmd_list_element *c;
440 char *alt;
441
442 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
443 see PR cli/15104. */
444 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
445 alt = xstrprintf ("target %s", t->to_shortname);
446 deprecate_cmd (c, alt);
447 }
448
449 /* Stub functions */
450
451 void
452 target_ignore (void)
453 {
454 }
455
456 void
457 target_kill (void)
458 {
459 struct target_ops *t;
460
461 for (t = current_target.beneath; t != NULL; t = t->beneath)
462 if (t->to_kill != NULL)
463 {
464 if (targetdebug)
465 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
466
467 t->to_kill (t);
468 return;
469 }
470
471 noprocess ();
472 }
473
474 void
475 target_load (char *arg, int from_tty)
476 {
477 target_dcache_invalidate ();
478 (*current_target.to_load) (&current_target, arg, from_tty);
479 }
480
481 void
482 target_create_inferior (char *exec_file, char *args,
483 char **env, int from_tty)
484 {
485 struct target_ops *t;
486
487 for (t = current_target.beneath; t != NULL; t = t->beneath)
488 {
489 if (t->to_create_inferior != NULL)
490 {
491 t->to_create_inferior (t, exec_file, args, env, from_tty);
492 if (targetdebug)
493 fprintf_unfiltered (gdb_stdlog,
494 "target_create_inferior (%s, %s, xxx, %d)\n",
495 exec_file, args, from_tty);
496 return;
497 }
498 }
499
500 internal_error (__FILE__, __LINE__,
501 _("could not find a target to create inferior"));
502 }
503
504 void
505 target_terminal_inferior (void)
506 {
507 /* A background resume (``run&'') should leave GDB in control of the
508 terminal. Use target_can_async_p, not target_is_async_p, since at
509 this point the target is not async yet. However, if sync_execution
510 is not set, we know it will become async prior to resume. */
511 if (target_can_async_p () && !sync_execution)
512 return;
513
514 /* If GDB is resuming the inferior in the foreground, install
515 inferior's terminal modes. */
516 (*current_target.to_terminal_inferior) (&current_target);
517 }
518
519 static int
520 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
521 struct target_ops *t)
522 {
523 errno = EIO; /* Can't read/write this location. */
524 return 0; /* No bytes handled. */
525 }
526
527 static void
528 tcomplain (void)
529 {
530 error (_("You can't do that when your target is `%s'"),
531 current_target.to_shortname);
532 }
533
534 void
535 noprocess (void)
536 {
537 error (_("You can't do that without a process to debug."));
538 }
539
540 static void
541 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
542 {
543 printf_unfiltered (_("No saved terminal information.\n"));
544 }
545
546 /* A default implementation for the to_get_ada_task_ptid target method.
547
548 This function builds the PTID by using both LWP and TID as part of
549 the PTID lwp and tid elements. The pid used is the pid of the
550 inferior_ptid. */
551
552 static ptid_t
553 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
554 {
555 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
556 }
557
558 static enum exec_direction_kind
559 default_execution_direction (struct target_ops *self)
560 {
561 if (!target_can_execute_reverse)
562 return EXEC_FORWARD;
563 else if (!target_can_async_p ())
564 return EXEC_FORWARD;
565 else
566 gdb_assert_not_reached ("\
567 to_execution_direction must be implemented for reverse async");
568 }
569
570 /* Go through the target stack from top to bottom, copying over zero
571 entries in current_target, then filling in still empty entries. In
572 effect, we are doing class inheritance through the pushed target
573 vectors.
574
575 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
576 is currently implemented, is that it discards any knowledge of
577 which target an inherited method originally belonged to.
578 Consequently, new new target methods should instead explicitly and
579 locally search the target stack for the target that can handle the
580 request. */
581
582 static void
583 update_current_target (void)
584 {
585 struct target_ops *t;
586
587 /* First, reset current's contents. */
588 memset (&current_target, 0, sizeof (current_target));
589
590 /* Install the delegators. */
591 install_delegators (&current_target);
592
593 #define INHERIT(FIELD, TARGET) \
594 if (!current_target.FIELD) \
595 current_target.FIELD = (TARGET)->FIELD
596
597 for (t = target_stack; t; t = t->beneath)
598 {
599 INHERIT (to_shortname, t);
600 INHERIT (to_longname, t);
601 INHERIT (to_doc, t);
602 /* Do not inherit to_open. */
603 /* Do not inherit to_close. */
604 /* Do not inherit to_attach. */
605 /* Do not inherit to_post_attach. */
606 INHERIT (to_attach_no_wait, t);
607 /* Do not inherit to_detach. */
608 /* Do not inherit to_disconnect. */
609 /* Do not inherit to_resume. */
610 /* Do not inherit to_wait. */
611 /* Do not inherit to_fetch_registers. */
612 /* Do not inherit to_store_registers. */
613 /* Do not inherit to_prepare_to_store. */
614 INHERIT (deprecated_xfer_memory, t);
615 /* Do not inherit to_files_info. */
616 /* Do not inherit to_insert_breakpoint. */
617 /* Do not inherit to_remove_breakpoint. */
618 /* Do not inherit to_can_use_hw_breakpoint. */
619 /* Do not inherit to_insert_hw_breakpoint. */
620 /* Do not inherit to_remove_hw_breakpoint. */
621 /* Do not inherit to_ranged_break_num_registers. */
622 /* Do not inherit to_insert_watchpoint. */
623 /* Do not inherit to_remove_watchpoint. */
624 /* Do not inherit to_insert_mask_watchpoint. */
625 /* Do not inherit to_remove_mask_watchpoint. */
626 /* Do not inherit to_stopped_data_address. */
627 INHERIT (to_have_steppable_watchpoint, t);
628 INHERIT (to_have_continuable_watchpoint, t);
629 /* Do not inherit to_stopped_by_watchpoint. */
630 /* Do not inherit to_watchpoint_addr_within_range. */
631 /* Do not inherit to_region_ok_for_hw_watchpoint. */
632 /* Do not inherit to_can_accel_watchpoint_condition. */
633 /* Do not inherit to_masked_watch_num_registers. */
634 /* Do not inherit to_terminal_init. */
635 /* Do not inherit to_terminal_inferior. */
636 /* Do not inherit to_terminal_ours_for_output. */
637 /* Do not inherit to_terminal_ours. */
638 /* Do not inherit to_terminal_save_ours. */
639 /* Do not inherit to_terminal_info. */
640 /* Do not inherit to_kill. */
641 /* Do not inherit to_load. */
642 /* Do no inherit to_create_inferior. */
643 /* Do not inherit to_post_startup_inferior. */
644 /* Do not inherit to_insert_fork_catchpoint. */
645 /* Do not inherit to_remove_fork_catchpoint. */
646 /* Do not inherit to_insert_vfork_catchpoint. */
647 /* Do not inherit to_remove_vfork_catchpoint. */
648 /* Do not inherit to_follow_fork. */
649 /* Do not inherit to_insert_exec_catchpoint. */
650 /* Do not inherit to_remove_exec_catchpoint. */
651 /* Do not inherit to_set_syscall_catchpoint. */
652 /* Do not inherit to_has_exited. */
653 /* Do not inherit to_mourn_inferior. */
654 INHERIT (to_can_run, t);
655 /* Do not inherit to_pass_signals. */
656 /* Do not inherit to_program_signals. */
657 /* Do not inherit to_thread_alive. */
658 /* Do not inherit to_find_new_threads. */
659 /* Do not inherit to_pid_to_str. */
660 /* Do not inherit to_extra_thread_info. */
661 /* Do not inherit to_thread_name. */
662 INHERIT (to_stop, t);
663 /* Do not inherit to_xfer_partial. */
664 /* Do not inherit to_rcmd. */
665 /* Do not inherit to_pid_to_exec_file. */
666 /* Do not inherit to_log_command. */
667 INHERIT (to_stratum, t);
668 /* Do not inherit to_has_all_memory. */
669 /* Do not inherit to_has_memory. */
670 /* Do not inherit to_has_stack. */
671 /* Do not inherit to_has_registers. */
672 /* Do not inherit to_has_execution. */
673 INHERIT (to_has_thread_control, t);
674 /* Do not inherit to_can_async_p. */
675 /* Do not inherit to_is_async_p. */
676 /* Do not inherit to_async. */
677 /* Do not inherit to_find_memory_regions. */
678 /* Do not inherit to_make_corefile_notes. */
679 /* Do not inherit to_get_bookmark. */
680 /* Do not inherit to_goto_bookmark. */
681 /* Do not inherit to_get_thread_local_address. */
682 /* Do not inherit to_can_execute_reverse. */
683 /* Do not inherit to_execution_direction. */
684 /* Do not inherit to_thread_architecture. */
685 /* Do not inherit to_read_description. */
686 /* Do not inherit to_get_ada_task_ptid. */
687 /* Do not inherit to_search_memory. */
688 /* Do not inherit to_supports_multi_process. */
689 /* Do not inherit to_supports_enable_disable_tracepoint. */
690 /* Do not inherit to_supports_string_tracing. */
691 /* Do not inherit to_trace_init. */
692 /* Do not inherit to_download_tracepoint. */
693 /* Do not inherit to_can_download_tracepoint. */
694 /* Do not inherit to_download_trace_state_variable. */
695 INHERIT (to_enable_tracepoint, t);
696 INHERIT (to_disable_tracepoint, t);
697 INHERIT (to_trace_set_readonly_regions, t);
698 INHERIT (to_trace_start, t);
699 INHERIT (to_get_trace_status, t);
700 INHERIT (to_get_tracepoint_status, t);
701 INHERIT (to_trace_stop, t);
702 INHERIT (to_trace_find, t);
703 INHERIT (to_get_trace_state_variable_value, t);
704 INHERIT (to_save_trace_data, t);
705 INHERIT (to_upload_tracepoints, t);
706 INHERIT (to_upload_trace_state_variables, t);
707 INHERIT (to_get_raw_trace_data, t);
708 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
709 INHERIT (to_set_disconnected_tracing, t);
710 INHERIT (to_set_circular_trace_buffer, t);
711 INHERIT (to_set_trace_buffer_size, t);
712 INHERIT (to_set_trace_notes, t);
713 INHERIT (to_get_tib_address, t);
714 INHERIT (to_set_permissions, t);
715 INHERIT (to_static_tracepoint_marker_at, t);
716 INHERIT (to_static_tracepoint_markers_by_strid, t);
717 INHERIT (to_traceframe_info, t);
718 INHERIT (to_use_agent, t);
719 INHERIT (to_can_use_agent, t);
720 INHERIT (to_augmented_libraries_svr4_read, t);
721 INHERIT (to_magic, t);
722 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
723 INHERIT (to_can_run_breakpoint_commands, t);
724 /* Do not inherit to_memory_map. */
725 /* Do not inherit to_flash_erase. */
726 /* Do not inherit to_flash_done. */
727 }
728 #undef INHERIT
729
730 /* Clean up a target struct so it no longer has any zero pointers in
731 it. Some entries are defaulted to a method that print an error,
732 others are hard-wired to a standard recursive default. */
733
734 #define de_fault(field, value) \
735 if (!current_target.field) \
736 current_target.field = value
737
738 de_fault (to_open,
739 (void (*) (char *, int))
740 tcomplain);
741 de_fault (to_close,
742 (void (*) (struct target_ops *))
743 target_ignore);
744 de_fault (deprecated_xfer_memory,
745 (int (*) (CORE_ADDR, gdb_byte *, int, int,
746 struct mem_attrib *, struct target_ops *))
747 nomemory);
748 de_fault (to_can_run,
749 (int (*) (struct target_ops *))
750 return_zero);
751 de_fault (to_stop,
752 (void (*) (struct target_ops *, ptid_t))
753 target_ignore);
754 current_target.to_read_description = NULL;
755 de_fault (to_enable_tracepoint,
756 (void (*) (struct target_ops *, struct bp_location *))
757 tcomplain);
758 de_fault (to_disable_tracepoint,
759 (void (*) (struct target_ops *, struct bp_location *))
760 tcomplain);
761 de_fault (to_trace_set_readonly_regions,
762 (void (*) (struct target_ops *))
763 tcomplain);
764 de_fault (to_trace_start,
765 (void (*) (struct target_ops *))
766 tcomplain);
767 de_fault (to_get_trace_status,
768 (int (*) (struct target_ops *, struct trace_status *))
769 return_minus_one);
770 de_fault (to_get_tracepoint_status,
771 (void (*) (struct target_ops *, struct breakpoint *,
772 struct uploaded_tp *))
773 tcomplain);
774 de_fault (to_trace_stop,
775 (void (*) (struct target_ops *))
776 tcomplain);
777 de_fault (to_trace_find,
778 (int (*) (struct target_ops *,
779 enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
780 return_minus_one);
781 de_fault (to_get_trace_state_variable_value,
782 (int (*) (struct target_ops *, int, LONGEST *))
783 return_zero);
784 de_fault (to_save_trace_data,
785 (int (*) (struct target_ops *, const char *))
786 tcomplain);
787 de_fault (to_upload_tracepoints,
788 (int (*) (struct target_ops *, struct uploaded_tp **))
789 return_zero);
790 de_fault (to_upload_trace_state_variables,
791 (int (*) (struct target_ops *, struct uploaded_tsv **))
792 return_zero);
793 de_fault (to_get_raw_trace_data,
794 (LONGEST (*) (struct target_ops *, gdb_byte *, ULONGEST, LONGEST))
795 tcomplain);
796 de_fault (to_get_min_fast_tracepoint_insn_len,
797 (int (*) (struct target_ops *))
798 return_minus_one);
799 de_fault (to_set_disconnected_tracing,
800 (void (*) (struct target_ops *, int))
801 target_ignore);
802 de_fault (to_set_circular_trace_buffer,
803 (void (*) (struct target_ops *, int))
804 target_ignore);
805 de_fault (to_set_trace_buffer_size,
806 (void (*) (struct target_ops *, LONGEST))
807 target_ignore);
808 de_fault (to_set_trace_notes,
809 (int (*) (struct target_ops *,
810 const char *, const char *, const char *))
811 return_zero);
812 de_fault (to_get_tib_address,
813 (int (*) (struct target_ops *, ptid_t, CORE_ADDR *))
814 tcomplain);
815 de_fault (to_set_permissions,
816 (void (*) (struct target_ops *))
817 target_ignore);
818 de_fault (to_static_tracepoint_marker_at,
819 (int (*) (struct target_ops *,
820 CORE_ADDR, struct static_tracepoint_marker *))
821 return_zero);
822 de_fault (to_static_tracepoint_markers_by_strid,
823 (VEC(static_tracepoint_marker_p) * (*) (struct target_ops *,
824 const char *))
825 tcomplain);
826 de_fault (to_traceframe_info,
827 (struct traceframe_info * (*) (struct target_ops *))
828 return_null);
829 de_fault (to_supports_evaluation_of_breakpoint_conditions,
830 (int (*) (struct target_ops *))
831 return_zero);
832 de_fault (to_can_run_breakpoint_commands,
833 (int (*) (struct target_ops *))
834 return_zero);
835 de_fault (to_use_agent,
836 (int (*) (struct target_ops *, int))
837 tcomplain);
838 de_fault (to_can_use_agent,
839 (int (*) (struct target_ops *))
840 return_zero);
841 de_fault (to_augmented_libraries_svr4_read,
842 (int (*) (struct target_ops *))
843 return_zero);
844
845 #undef de_fault
846
847 /* Finally, position the target-stack beneath the squashed
848 "current_target". That way code looking for a non-inherited
849 target method can quickly and simply find it. */
850 current_target.beneath = target_stack;
851
852 if (targetdebug)
853 setup_target_debug ();
854 }
855
856 /* Push a new target type into the stack of the existing target accessors,
857 possibly superseding some of the existing accessors.
858
859 Rather than allow an empty stack, we always have the dummy target at
860 the bottom stratum, so we can call the function vectors without
861 checking them. */
862
863 void
864 push_target (struct target_ops *t)
865 {
866 struct target_ops **cur;
867
868 /* Check magic number. If wrong, it probably means someone changed
869 the struct definition, but not all the places that initialize one. */
870 if (t->to_magic != OPS_MAGIC)
871 {
872 fprintf_unfiltered (gdb_stderr,
873 "Magic number of %s target struct wrong\n",
874 t->to_shortname);
875 internal_error (__FILE__, __LINE__,
876 _("failed internal consistency check"));
877 }
878
879 /* Find the proper stratum to install this target in. */
880 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
881 {
882 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
883 break;
884 }
885
886 /* If there's already targets at this stratum, remove them. */
887 /* FIXME: cagney/2003-10-15: I think this should be popping all
888 targets to CUR, and not just those at this stratum level. */
889 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
890 {
891 /* There's already something at this stratum level. Close it,
892 and un-hook it from the stack. */
893 struct target_ops *tmp = (*cur);
894
895 (*cur) = (*cur)->beneath;
896 tmp->beneath = NULL;
897 target_close (tmp);
898 }
899
900 /* We have removed all targets in our stratum, now add the new one. */
901 t->beneath = (*cur);
902 (*cur) = t;
903
904 update_current_target ();
905 }
906
907 /* Remove a target_ops vector from the stack, wherever it may be.
908 Return how many times it was removed (0 or 1). */
909
910 int
911 unpush_target (struct target_ops *t)
912 {
913 struct target_ops **cur;
914 struct target_ops *tmp;
915
916 if (t->to_stratum == dummy_stratum)
917 internal_error (__FILE__, __LINE__,
918 _("Attempt to unpush the dummy target"));
919
920 /* Look for the specified target. Note that we assume that a target
921 can only occur once in the target stack. */
922
923 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
924 {
925 if ((*cur) == t)
926 break;
927 }
928
929 /* If we don't find target_ops, quit. Only open targets should be
930 closed. */
931 if ((*cur) == NULL)
932 return 0;
933
934 /* Unchain the target. */
935 tmp = (*cur);
936 (*cur) = (*cur)->beneath;
937 tmp->beneath = NULL;
938
939 update_current_target ();
940
941 /* Finally close the target. Note we do this after unchaining, so
942 any target method calls from within the target_close
943 implementation don't end up in T anymore. */
944 target_close (t);
945
946 return 1;
947 }
948
949 void
950 pop_all_targets_above (enum strata above_stratum)
951 {
952 while ((int) (current_target.to_stratum) > (int) above_stratum)
953 {
954 if (!unpush_target (target_stack))
955 {
956 fprintf_unfiltered (gdb_stderr,
957 "pop_all_targets couldn't find target %s\n",
958 target_stack->to_shortname);
959 internal_error (__FILE__, __LINE__,
960 _("failed internal consistency check"));
961 break;
962 }
963 }
964 }
965
966 void
967 pop_all_targets (void)
968 {
969 pop_all_targets_above (dummy_stratum);
970 }
971
972 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
973
974 int
975 target_is_pushed (struct target_ops *t)
976 {
977 struct target_ops **cur;
978
979 /* Check magic number. If wrong, it probably means someone changed
980 the struct definition, but not all the places that initialize one. */
981 if (t->to_magic != OPS_MAGIC)
982 {
983 fprintf_unfiltered (gdb_stderr,
984 "Magic number of %s target struct wrong\n",
985 t->to_shortname);
986 internal_error (__FILE__, __LINE__,
987 _("failed internal consistency check"));
988 }
989
990 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
991 if (*cur == t)
992 return 1;
993
994 return 0;
995 }
996
997 /* Using the objfile specified in OBJFILE, find the address for the
998 current thread's thread-local storage with offset OFFSET. */
999 CORE_ADDR
1000 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1001 {
1002 volatile CORE_ADDR addr = 0;
1003 struct target_ops *target;
1004
1005 for (target = current_target.beneath;
1006 target != NULL;
1007 target = target->beneath)
1008 {
1009 if (target->to_get_thread_local_address != NULL)
1010 break;
1011 }
1012
1013 if (target != NULL
1014 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1015 {
1016 ptid_t ptid = inferior_ptid;
1017 volatile struct gdb_exception ex;
1018
1019 TRY_CATCH (ex, RETURN_MASK_ALL)
1020 {
1021 CORE_ADDR lm_addr;
1022
1023 /* Fetch the load module address for this objfile. */
1024 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1025 objfile);
1026 /* If it's 0, throw the appropriate exception. */
1027 if (lm_addr == 0)
1028 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1029 _("TLS load module not found"));
1030
1031 addr = target->to_get_thread_local_address (target, ptid,
1032 lm_addr, offset);
1033 }
1034 /* If an error occurred, print TLS related messages here. Otherwise,
1035 throw the error to some higher catcher. */
1036 if (ex.reason < 0)
1037 {
1038 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1039
1040 switch (ex.error)
1041 {
1042 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1043 error (_("Cannot find thread-local variables "
1044 "in this thread library."));
1045 break;
1046 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1047 if (objfile_is_library)
1048 error (_("Cannot find shared library `%s' in dynamic"
1049 " linker's load module list"), objfile_name (objfile));
1050 else
1051 error (_("Cannot find executable file `%s' in dynamic"
1052 " linker's load module list"), objfile_name (objfile));
1053 break;
1054 case TLS_NOT_ALLOCATED_YET_ERROR:
1055 if (objfile_is_library)
1056 error (_("The inferior has not yet allocated storage for"
1057 " thread-local variables in\n"
1058 "the shared library `%s'\n"
1059 "for %s"),
1060 objfile_name (objfile), target_pid_to_str (ptid));
1061 else
1062 error (_("The inferior has not yet allocated storage for"
1063 " thread-local variables in\n"
1064 "the executable `%s'\n"
1065 "for %s"),
1066 objfile_name (objfile), target_pid_to_str (ptid));
1067 break;
1068 case TLS_GENERIC_ERROR:
1069 if (objfile_is_library)
1070 error (_("Cannot find thread-local storage for %s, "
1071 "shared library %s:\n%s"),
1072 target_pid_to_str (ptid),
1073 objfile_name (objfile), ex.message);
1074 else
1075 error (_("Cannot find thread-local storage for %s, "
1076 "executable file %s:\n%s"),
1077 target_pid_to_str (ptid),
1078 objfile_name (objfile), ex.message);
1079 break;
1080 default:
1081 throw_exception (ex);
1082 break;
1083 }
1084 }
1085 }
1086 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1087 TLS is an ABI-specific thing. But we don't do that yet. */
1088 else
1089 error (_("Cannot find thread-local variables on this target"));
1090
1091 return addr;
1092 }
1093
1094 const char *
1095 target_xfer_status_to_string (enum target_xfer_status err)
1096 {
1097 #define CASE(X) case X: return #X
1098 switch (err)
1099 {
1100 CASE(TARGET_XFER_E_IO);
1101 CASE(TARGET_XFER_E_UNAVAILABLE);
1102 default:
1103 return "<unknown>";
1104 }
1105 #undef CASE
1106 };
1107
1108
1109 #undef MIN
1110 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1111
1112 /* target_read_string -- read a null terminated string, up to LEN bytes,
1113 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1114 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1115 is responsible for freeing it. Return the number of bytes successfully
1116 read. */
1117
1118 int
1119 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1120 {
1121 int tlen, offset, i;
1122 gdb_byte buf[4];
1123 int errcode = 0;
1124 char *buffer;
1125 int buffer_allocated;
1126 char *bufptr;
1127 unsigned int nbytes_read = 0;
1128
1129 gdb_assert (string);
1130
1131 /* Small for testing. */
1132 buffer_allocated = 4;
1133 buffer = xmalloc (buffer_allocated);
1134 bufptr = buffer;
1135
1136 while (len > 0)
1137 {
1138 tlen = MIN (len, 4 - (memaddr & 3));
1139 offset = memaddr & 3;
1140
1141 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1142 if (errcode != 0)
1143 {
1144 /* The transfer request might have crossed the boundary to an
1145 unallocated region of memory. Retry the transfer, requesting
1146 a single byte. */
1147 tlen = 1;
1148 offset = 0;
1149 errcode = target_read_memory (memaddr, buf, 1);
1150 if (errcode != 0)
1151 goto done;
1152 }
1153
1154 if (bufptr - buffer + tlen > buffer_allocated)
1155 {
1156 unsigned int bytes;
1157
1158 bytes = bufptr - buffer;
1159 buffer_allocated *= 2;
1160 buffer = xrealloc (buffer, buffer_allocated);
1161 bufptr = buffer + bytes;
1162 }
1163
1164 for (i = 0; i < tlen; i++)
1165 {
1166 *bufptr++ = buf[i + offset];
1167 if (buf[i + offset] == '\000')
1168 {
1169 nbytes_read += i + 1;
1170 goto done;
1171 }
1172 }
1173
1174 memaddr += tlen;
1175 len -= tlen;
1176 nbytes_read += tlen;
1177 }
1178 done:
1179 *string = buffer;
1180 if (errnop != NULL)
1181 *errnop = errcode;
1182 return nbytes_read;
1183 }
1184
1185 struct target_section_table *
1186 target_get_section_table (struct target_ops *target)
1187 {
1188 struct target_ops *t;
1189
1190 if (targetdebug)
1191 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1192
1193 for (t = target; t != NULL; t = t->beneath)
1194 if (t->to_get_section_table != NULL)
1195 return (*t->to_get_section_table) (t);
1196
1197 return NULL;
1198 }
1199
1200 /* Find a section containing ADDR. */
1201
1202 struct target_section *
1203 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1204 {
1205 struct target_section_table *table = target_get_section_table (target);
1206 struct target_section *secp;
1207
1208 if (table == NULL)
1209 return NULL;
1210
1211 for (secp = table->sections; secp < table->sections_end; secp++)
1212 {
1213 if (addr >= secp->addr && addr < secp->endaddr)
1214 return secp;
1215 }
1216 return NULL;
1217 }
1218
1219 /* Read memory from the live target, even if currently inspecting a
1220 traceframe. The return is the same as that of target_read. */
1221
1222 static enum target_xfer_status
1223 target_read_live_memory (enum target_object object,
1224 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1225 ULONGEST *xfered_len)
1226 {
1227 enum target_xfer_status ret;
1228 struct cleanup *cleanup;
1229
1230 /* Switch momentarily out of tfind mode so to access live memory.
1231 Note that this must not clear global state, such as the frame
1232 cache, which must still remain valid for the previous traceframe.
1233 We may be _building_ the frame cache at this point. */
1234 cleanup = make_cleanup_restore_traceframe_number ();
1235 set_traceframe_number (-1);
1236
1237 ret = target_xfer_partial (current_target.beneath, object, NULL,
1238 myaddr, NULL, memaddr, len, xfered_len);
1239
1240 do_cleanups (cleanup);
1241 return ret;
1242 }
1243
1244 /* Using the set of read-only target sections of OPS, read live
1245 read-only memory. Note that the actual reads start from the
1246 top-most target again.
1247
1248 For interface/parameters/return description see target.h,
1249 to_xfer_partial. */
1250
1251 static enum target_xfer_status
1252 memory_xfer_live_readonly_partial (struct target_ops *ops,
1253 enum target_object object,
1254 gdb_byte *readbuf, ULONGEST memaddr,
1255 ULONGEST len, ULONGEST *xfered_len)
1256 {
1257 struct target_section *secp;
1258 struct target_section_table *table;
1259
1260 secp = target_section_by_addr (ops, memaddr);
1261 if (secp != NULL
1262 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1263 secp->the_bfd_section)
1264 & SEC_READONLY))
1265 {
1266 struct target_section *p;
1267 ULONGEST memend = memaddr + len;
1268
1269 table = target_get_section_table (ops);
1270
1271 for (p = table->sections; p < table->sections_end; p++)
1272 {
1273 if (memaddr >= p->addr)
1274 {
1275 if (memend <= p->endaddr)
1276 {
1277 /* Entire transfer is within this section. */
1278 return target_read_live_memory (object, memaddr,
1279 readbuf, len, xfered_len);
1280 }
1281 else if (memaddr >= p->endaddr)
1282 {
1283 /* This section ends before the transfer starts. */
1284 continue;
1285 }
1286 else
1287 {
1288 /* This section overlaps the transfer. Just do half. */
1289 len = p->endaddr - memaddr;
1290 return target_read_live_memory (object, memaddr,
1291 readbuf, len, xfered_len);
1292 }
1293 }
1294 }
1295 }
1296
1297 return TARGET_XFER_EOF;
1298 }
1299
1300 /* Read memory from more than one valid target. A core file, for
1301 instance, could have some of memory but delegate other bits to
1302 the target below it. So, we must manually try all targets. */
1303
1304 static enum target_xfer_status
1305 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1306 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1307 ULONGEST *xfered_len)
1308 {
1309 enum target_xfer_status res;
1310
1311 do
1312 {
1313 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1314 readbuf, writebuf, memaddr, len,
1315 xfered_len);
1316 if (res == TARGET_XFER_OK)
1317 break;
1318
1319 /* Stop if the target reports that the memory is not available. */
1320 if (res == TARGET_XFER_E_UNAVAILABLE)
1321 break;
1322
1323 /* We want to continue past core files to executables, but not
1324 past a running target's memory. */
1325 if (ops->to_has_all_memory (ops))
1326 break;
1327
1328 ops = ops->beneath;
1329 }
1330 while (ops != NULL);
1331
1332 return res;
1333 }
1334
1335 /* Perform a partial memory transfer.
1336 For docs see target.h, to_xfer_partial. */
1337
1338 static enum target_xfer_status
1339 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1340 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1341 ULONGEST len, ULONGEST *xfered_len)
1342 {
1343 enum target_xfer_status res;
1344 int reg_len;
1345 struct mem_region *region;
1346 struct inferior *inf;
1347
1348 /* For accesses to unmapped overlay sections, read directly from
1349 files. Must do this first, as MEMADDR may need adjustment. */
1350 if (readbuf != NULL && overlay_debugging)
1351 {
1352 struct obj_section *section = find_pc_overlay (memaddr);
1353
1354 if (pc_in_unmapped_range (memaddr, section))
1355 {
1356 struct target_section_table *table
1357 = target_get_section_table (ops);
1358 const char *section_name = section->the_bfd_section->name;
1359
1360 memaddr = overlay_mapped_address (memaddr, section);
1361 return section_table_xfer_memory_partial (readbuf, writebuf,
1362 memaddr, len, xfered_len,
1363 table->sections,
1364 table->sections_end,
1365 section_name);
1366 }
1367 }
1368
1369 /* Try the executable files, if "trust-readonly-sections" is set. */
1370 if (readbuf != NULL && trust_readonly)
1371 {
1372 struct target_section *secp;
1373 struct target_section_table *table;
1374
1375 secp = target_section_by_addr (ops, memaddr);
1376 if (secp != NULL
1377 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1378 secp->the_bfd_section)
1379 & SEC_READONLY))
1380 {
1381 table = target_get_section_table (ops);
1382 return section_table_xfer_memory_partial (readbuf, writebuf,
1383 memaddr, len, xfered_len,
1384 table->sections,
1385 table->sections_end,
1386 NULL);
1387 }
1388 }
1389
1390 /* If reading unavailable memory in the context of traceframes, and
1391 this address falls within a read-only section, fallback to
1392 reading from live memory. */
1393 if (readbuf != NULL && get_traceframe_number () != -1)
1394 {
1395 VEC(mem_range_s) *available;
1396
1397 /* If we fail to get the set of available memory, then the
1398 target does not support querying traceframe info, and so we
1399 attempt reading from the traceframe anyway (assuming the
1400 target implements the old QTro packet then). */
1401 if (traceframe_available_memory (&available, memaddr, len))
1402 {
1403 struct cleanup *old_chain;
1404
1405 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1406
1407 if (VEC_empty (mem_range_s, available)
1408 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1409 {
1410 /* Don't read into the traceframe's available
1411 memory. */
1412 if (!VEC_empty (mem_range_s, available))
1413 {
1414 LONGEST oldlen = len;
1415
1416 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1417 gdb_assert (len <= oldlen);
1418 }
1419
1420 do_cleanups (old_chain);
1421
1422 /* This goes through the topmost target again. */
1423 res = memory_xfer_live_readonly_partial (ops, object,
1424 readbuf, memaddr,
1425 len, xfered_len);
1426 if (res == TARGET_XFER_OK)
1427 return TARGET_XFER_OK;
1428 else
1429 {
1430 /* No use trying further, we know some memory starting
1431 at MEMADDR isn't available. */
1432 *xfered_len = len;
1433 return TARGET_XFER_E_UNAVAILABLE;
1434 }
1435 }
1436
1437 /* Don't try to read more than how much is available, in
1438 case the target implements the deprecated QTro packet to
1439 cater for older GDBs (the target's knowledge of read-only
1440 sections may be outdated by now). */
1441 len = VEC_index (mem_range_s, available, 0)->length;
1442
1443 do_cleanups (old_chain);
1444 }
1445 }
1446
1447 /* Try GDB's internal data cache. */
1448 region = lookup_mem_region (memaddr);
1449 /* region->hi == 0 means there's no upper bound. */
1450 if (memaddr + len < region->hi || region->hi == 0)
1451 reg_len = len;
1452 else
1453 reg_len = region->hi - memaddr;
1454
1455 switch (region->attrib.mode)
1456 {
1457 case MEM_RO:
1458 if (writebuf != NULL)
1459 return TARGET_XFER_E_IO;
1460 break;
1461
1462 case MEM_WO:
1463 if (readbuf != NULL)
1464 return TARGET_XFER_E_IO;
1465 break;
1466
1467 case MEM_FLASH:
1468 /* We only support writing to flash during "load" for now. */
1469 if (writebuf != NULL)
1470 error (_("Writing to flash memory forbidden in this context"));
1471 break;
1472
1473 case MEM_NONE:
1474 return TARGET_XFER_E_IO;
1475 }
1476
1477 if (!ptid_equal (inferior_ptid, null_ptid))
1478 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1479 else
1480 inf = NULL;
1481
1482 if (inf != NULL
1483 /* The dcache reads whole cache lines; that doesn't play well
1484 with reading from a trace buffer, because reading outside of
1485 the collected memory range fails. */
1486 && get_traceframe_number () == -1
1487 && (region->attrib.cache
1488 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1489 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1490 {
1491 DCACHE *dcache = target_dcache_get_or_init ();
1492 int l;
1493
1494 if (readbuf != NULL)
1495 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1496 else
1497 /* FIXME drow/2006-08-09: If we're going to preserve const
1498 correctness dcache_xfer_memory should take readbuf and
1499 writebuf. */
1500 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1501 reg_len, 1);
1502 if (l <= 0)
1503 return TARGET_XFER_E_IO;
1504 else
1505 {
1506 *xfered_len = (ULONGEST) l;
1507 return TARGET_XFER_OK;
1508 }
1509 }
1510
1511 /* If none of those methods found the memory we wanted, fall back
1512 to a target partial transfer. Normally a single call to
1513 to_xfer_partial is enough; if it doesn't recognize an object
1514 it will call the to_xfer_partial of the next target down.
1515 But for memory this won't do. Memory is the only target
1516 object which can be read from more than one valid target.
1517 A core file, for instance, could have some of memory but
1518 delegate other bits to the target below it. So, we must
1519 manually try all targets. */
1520
1521 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1522 xfered_len);
1523
1524 /* Make sure the cache gets updated no matter what - if we are writing
1525 to the stack. Even if this write is not tagged as such, we still need
1526 to update the cache. */
1527
1528 if (res == TARGET_XFER_OK
1529 && inf != NULL
1530 && writebuf != NULL
1531 && target_dcache_init_p ()
1532 && !region->attrib.cache
1533 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1534 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1535 {
1536 DCACHE *dcache = target_dcache_get ();
1537
1538 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1539 }
1540
1541 /* If we still haven't got anything, return the last error. We
1542 give up. */
1543 return res;
1544 }
1545
1546 /* Perform a partial memory transfer. For docs see target.h,
1547 to_xfer_partial. */
1548
1549 static enum target_xfer_status
1550 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1551 gdb_byte *readbuf, const gdb_byte *writebuf,
1552 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1553 {
1554 enum target_xfer_status res;
1555
1556 /* Zero length requests are ok and require no work. */
1557 if (len == 0)
1558 return TARGET_XFER_EOF;
1559
1560 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1561 breakpoint insns, thus hiding out from higher layers whether
1562 there are software breakpoints inserted in the code stream. */
1563 if (readbuf != NULL)
1564 {
1565 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1566 xfered_len);
1567
1568 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1569 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1570 }
1571 else
1572 {
1573 void *buf;
1574 struct cleanup *old_chain;
1575
1576 /* A large write request is likely to be partially satisfied
1577 by memory_xfer_partial_1. We will continually malloc
1578 and free a copy of the entire write request for breakpoint
1579 shadow handling even though we only end up writing a small
1580 subset of it. Cap writes to 4KB to mitigate this. */
1581 len = min (4096, len);
1582
1583 buf = xmalloc (len);
1584 old_chain = make_cleanup (xfree, buf);
1585 memcpy (buf, writebuf, len);
1586
1587 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1588 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1589 xfered_len);
1590
1591 do_cleanups (old_chain);
1592 }
1593
1594 return res;
1595 }
1596
1597 static void
1598 restore_show_memory_breakpoints (void *arg)
1599 {
1600 show_memory_breakpoints = (uintptr_t) arg;
1601 }
1602
1603 struct cleanup *
1604 make_show_memory_breakpoints_cleanup (int show)
1605 {
1606 int current = show_memory_breakpoints;
1607
1608 show_memory_breakpoints = show;
1609 return make_cleanup (restore_show_memory_breakpoints,
1610 (void *) (uintptr_t) current);
1611 }
1612
1613 /* For docs see target.h, to_xfer_partial. */
1614
1615 enum target_xfer_status
1616 target_xfer_partial (struct target_ops *ops,
1617 enum target_object object, const char *annex,
1618 gdb_byte *readbuf, const gdb_byte *writebuf,
1619 ULONGEST offset, ULONGEST len,
1620 ULONGEST *xfered_len)
1621 {
1622 enum target_xfer_status retval;
1623
1624 gdb_assert (ops->to_xfer_partial != NULL);
1625
1626 /* Transfer is done when LEN is zero. */
1627 if (len == 0)
1628 return TARGET_XFER_EOF;
1629
1630 if (writebuf && !may_write_memory)
1631 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1632 core_addr_to_string_nz (offset), plongest (len));
1633
1634 *xfered_len = 0;
1635
1636 /* If this is a memory transfer, let the memory-specific code
1637 have a look at it instead. Memory transfers are more
1638 complicated. */
1639 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1640 || object == TARGET_OBJECT_CODE_MEMORY)
1641 retval = memory_xfer_partial (ops, object, readbuf,
1642 writebuf, offset, len, xfered_len);
1643 else if (object == TARGET_OBJECT_RAW_MEMORY)
1644 {
1645 /* Request the normal memory object from other layers. */
1646 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1647 xfered_len);
1648 }
1649 else
1650 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1651 writebuf, offset, len, xfered_len);
1652
1653 if (targetdebug)
1654 {
1655 const unsigned char *myaddr = NULL;
1656
1657 fprintf_unfiltered (gdb_stdlog,
1658 "%s:target_xfer_partial "
1659 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1660 ops->to_shortname,
1661 (int) object,
1662 (annex ? annex : "(null)"),
1663 host_address_to_string (readbuf),
1664 host_address_to_string (writebuf),
1665 core_addr_to_string_nz (offset),
1666 pulongest (len), retval,
1667 pulongest (*xfered_len));
1668
1669 if (readbuf)
1670 myaddr = readbuf;
1671 if (writebuf)
1672 myaddr = writebuf;
1673 if (retval == TARGET_XFER_OK && myaddr != NULL)
1674 {
1675 int i;
1676
1677 fputs_unfiltered (", bytes =", gdb_stdlog);
1678 for (i = 0; i < *xfered_len; i++)
1679 {
1680 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1681 {
1682 if (targetdebug < 2 && i > 0)
1683 {
1684 fprintf_unfiltered (gdb_stdlog, " ...");
1685 break;
1686 }
1687 fprintf_unfiltered (gdb_stdlog, "\n");
1688 }
1689
1690 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1691 }
1692 }
1693
1694 fputc_unfiltered ('\n', gdb_stdlog);
1695 }
1696
1697 /* Check implementations of to_xfer_partial update *XFERED_LEN
1698 properly. Do assertion after printing debug messages, so that we
1699 can find more clues on assertion failure from debugging messages. */
1700 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1701 gdb_assert (*xfered_len > 0);
1702
1703 return retval;
1704 }
1705
1706 /* Read LEN bytes of target memory at address MEMADDR, placing the
1707 results in GDB's memory at MYADDR. Returns either 0 for success or
1708 TARGET_XFER_E_IO if any error occurs.
1709
1710 If an error occurs, no guarantee is made about the contents of the data at
1711 MYADDR. In particular, the caller should not depend upon partial reads
1712 filling the buffer with good data. There is no way for the caller to know
1713 how much good data might have been transfered anyway. Callers that can
1714 deal with partial reads should call target_read (which will retry until
1715 it makes no progress, and then return how much was transferred). */
1716
1717 int
1718 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1719 {
1720 /* Dispatch to the topmost target, not the flattened current_target.
1721 Memory accesses check target->to_has_(all_)memory, and the
1722 flattened target doesn't inherit those. */
1723 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1724 myaddr, memaddr, len) == len)
1725 return 0;
1726 else
1727 return TARGET_XFER_E_IO;
1728 }
1729
1730 /* Like target_read_memory, but specify explicitly that this is a read
1731 from the target's raw memory. That is, this read bypasses the
1732 dcache, breakpoint shadowing, etc. */
1733
1734 int
1735 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1736 {
1737 /* See comment in target_read_memory about why the request starts at
1738 current_target.beneath. */
1739 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1740 myaddr, memaddr, len) == len)
1741 return 0;
1742 else
1743 return TARGET_XFER_E_IO;
1744 }
1745
1746 /* Like target_read_memory, but specify explicitly that this is a read from
1747 the target's stack. This may trigger different cache behavior. */
1748
1749 int
1750 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1751 {
1752 /* See comment in target_read_memory about why the request starts at
1753 current_target.beneath. */
1754 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1755 myaddr, memaddr, len) == len)
1756 return 0;
1757 else
1758 return TARGET_XFER_E_IO;
1759 }
1760
1761 /* Like target_read_memory, but specify explicitly that this is a read from
1762 the target's code. This may trigger different cache behavior. */
1763
1764 int
1765 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1766 {
1767 /* See comment in target_read_memory about why the request starts at
1768 current_target.beneath. */
1769 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1770 myaddr, memaddr, len) == len)
1771 return 0;
1772 else
1773 return TARGET_XFER_E_IO;
1774 }
1775
1776 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1777 Returns either 0 for success or TARGET_XFER_E_IO if any
1778 error occurs. If an error occurs, no guarantee is made about how
1779 much data got written. Callers that can deal with partial writes
1780 should call target_write. */
1781
1782 int
1783 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1784 {
1785 /* See comment in target_read_memory about why the request starts at
1786 current_target.beneath. */
1787 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1788 myaddr, memaddr, len) == len)
1789 return 0;
1790 else
1791 return TARGET_XFER_E_IO;
1792 }
1793
1794 /* Write LEN bytes from MYADDR to target raw memory at address
1795 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1796 if any error occurs. If an error occurs, no guarantee is made
1797 about how much data got written. Callers that can deal with
1798 partial writes should call target_write. */
1799
1800 int
1801 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1802 {
1803 /* See comment in target_read_memory about why the request starts at
1804 current_target.beneath. */
1805 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1806 myaddr, memaddr, len) == len)
1807 return 0;
1808 else
1809 return TARGET_XFER_E_IO;
1810 }
1811
1812 /* Fetch the target's memory map. */
1813
1814 VEC(mem_region_s) *
1815 target_memory_map (void)
1816 {
1817 VEC(mem_region_s) *result;
1818 struct mem_region *last_one, *this_one;
1819 int ix;
1820 struct target_ops *t;
1821
1822 if (targetdebug)
1823 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1824
1825 for (t = current_target.beneath; t != NULL; t = t->beneath)
1826 if (t->to_memory_map != NULL)
1827 break;
1828
1829 if (t == NULL)
1830 return NULL;
1831
1832 result = t->to_memory_map (t);
1833 if (result == NULL)
1834 return NULL;
1835
1836 qsort (VEC_address (mem_region_s, result),
1837 VEC_length (mem_region_s, result),
1838 sizeof (struct mem_region), mem_region_cmp);
1839
1840 /* Check that regions do not overlap. Simultaneously assign
1841 a numbering for the "mem" commands to use to refer to
1842 each region. */
1843 last_one = NULL;
1844 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1845 {
1846 this_one->number = ix;
1847
1848 if (last_one && last_one->hi > this_one->lo)
1849 {
1850 warning (_("Overlapping regions in memory map: ignoring"));
1851 VEC_free (mem_region_s, result);
1852 return NULL;
1853 }
1854 last_one = this_one;
1855 }
1856
1857 return result;
1858 }
1859
1860 void
1861 target_flash_erase (ULONGEST address, LONGEST length)
1862 {
1863 struct target_ops *t;
1864
1865 for (t = current_target.beneath; t != NULL; t = t->beneath)
1866 if (t->to_flash_erase != NULL)
1867 {
1868 if (targetdebug)
1869 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1870 hex_string (address), phex (length, 0));
1871 t->to_flash_erase (t, address, length);
1872 return;
1873 }
1874
1875 tcomplain ();
1876 }
1877
1878 void
1879 target_flash_done (void)
1880 {
1881 struct target_ops *t;
1882
1883 for (t = current_target.beneath; t != NULL; t = t->beneath)
1884 if (t->to_flash_done != NULL)
1885 {
1886 if (targetdebug)
1887 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1888 t->to_flash_done (t);
1889 return;
1890 }
1891
1892 tcomplain ();
1893 }
1894
1895 static void
1896 show_trust_readonly (struct ui_file *file, int from_tty,
1897 struct cmd_list_element *c, const char *value)
1898 {
1899 fprintf_filtered (file,
1900 _("Mode for reading from readonly sections is %s.\n"),
1901 value);
1902 }
1903
1904 /* More generic transfers. */
1905
1906 static enum target_xfer_status
1907 default_xfer_partial (struct target_ops *ops, enum target_object object,
1908 const char *annex, gdb_byte *readbuf,
1909 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
1910 ULONGEST *xfered_len)
1911 {
1912 if (object == TARGET_OBJECT_MEMORY
1913 && ops->deprecated_xfer_memory != NULL)
1914 /* If available, fall back to the target's
1915 "deprecated_xfer_memory" method. */
1916 {
1917 int xfered = -1;
1918
1919 errno = 0;
1920 if (writebuf != NULL)
1921 {
1922 void *buffer = xmalloc (len);
1923 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1924
1925 memcpy (buffer, writebuf, len);
1926 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1927 1/*write*/, NULL, ops);
1928 do_cleanups (cleanup);
1929 }
1930 if (readbuf != NULL)
1931 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1932 0/*read*/, NULL, ops);
1933 if (xfered > 0)
1934 {
1935 *xfered_len = (ULONGEST) xfered;
1936 return TARGET_XFER_E_IO;
1937 }
1938 else if (xfered == 0 && errno == 0)
1939 /* "deprecated_xfer_memory" uses 0, cross checked against
1940 ERRNO as one indication of an error. */
1941 return TARGET_XFER_EOF;
1942 else
1943 return TARGET_XFER_E_IO;
1944 }
1945 else
1946 {
1947 gdb_assert (ops->beneath != NULL);
1948 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1949 readbuf, writebuf, offset, len,
1950 xfered_len);
1951 }
1952 }
1953
1954 /* Target vector read/write partial wrapper functions. */
1955
1956 static enum target_xfer_status
1957 target_read_partial (struct target_ops *ops,
1958 enum target_object object,
1959 const char *annex, gdb_byte *buf,
1960 ULONGEST offset, ULONGEST len,
1961 ULONGEST *xfered_len)
1962 {
1963 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1964 xfered_len);
1965 }
1966
1967 static enum target_xfer_status
1968 target_write_partial (struct target_ops *ops,
1969 enum target_object object,
1970 const char *annex, const gdb_byte *buf,
1971 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1972 {
1973 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1974 xfered_len);
1975 }
1976
1977 /* Wrappers to perform the full transfer. */
1978
1979 /* For docs on target_read see target.h. */
1980
1981 LONGEST
1982 target_read (struct target_ops *ops,
1983 enum target_object object,
1984 const char *annex, gdb_byte *buf,
1985 ULONGEST offset, LONGEST len)
1986 {
1987 LONGEST xfered = 0;
1988
1989 while (xfered < len)
1990 {
1991 ULONGEST xfered_len;
1992 enum target_xfer_status status;
1993
1994 status = target_read_partial (ops, object, annex,
1995 (gdb_byte *) buf + xfered,
1996 offset + xfered, len - xfered,
1997 &xfered_len);
1998
1999 /* Call an observer, notifying them of the xfer progress? */
2000 if (status == TARGET_XFER_EOF)
2001 return xfered;
2002 else if (status == TARGET_XFER_OK)
2003 {
2004 xfered += xfered_len;
2005 QUIT;
2006 }
2007 else
2008 return -1;
2009
2010 }
2011 return len;
2012 }
2013
2014 /* Assuming that the entire [begin, end) range of memory cannot be
2015 read, try to read whatever subrange is possible to read.
2016
2017 The function returns, in RESULT, either zero or one memory block.
2018 If there's a readable subrange at the beginning, it is completely
2019 read and returned. Any further readable subrange will not be read.
2020 Otherwise, if there's a readable subrange at the end, it will be
2021 completely read and returned. Any readable subranges before it
2022 (obviously, not starting at the beginning), will be ignored. In
2023 other cases -- either no readable subrange, or readable subrange(s)
2024 that is neither at the beginning, or end, nothing is returned.
2025
2026 The purpose of this function is to handle a read across a boundary
2027 of accessible memory in a case when memory map is not available.
2028 The above restrictions are fine for this case, but will give
2029 incorrect results if the memory is 'patchy'. However, supporting
2030 'patchy' memory would require trying to read every single byte,
2031 and it seems unacceptable solution. Explicit memory map is
2032 recommended for this case -- and target_read_memory_robust will
2033 take care of reading multiple ranges then. */
2034
2035 static void
2036 read_whatever_is_readable (struct target_ops *ops,
2037 ULONGEST begin, ULONGEST end,
2038 VEC(memory_read_result_s) **result)
2039 {
2040 gdb_byte *buf = xmalloc (end - begin);
2041 ULONGEST current_begin = begin;
2042 ULONGEST current_end = end;
2043 int forward;
2044 memory_read_result_s r;
2045 ULONGEST xfered_len;
2046
2047 /* If we previously failed to read 1 byte, nothing can be done here. */
2048 if (end - begin <= 1)
2049 {
2050 xfree (buf);
2051 return;
2052 }
2053
2054 /* Check that either first or the last byte is readable, and give up
2055 if not. This heuristic is meant to permit reading accessible memory
2056 at the boundary of accessible region. */
2057 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2058 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
2059 {
2060 forward = 1;
2061 ++current_begin;
2062 }
2063 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2064 buf + (end-begin) - 1, end - 1, 1,
2065 &xfered_len) == TARGET_XFER_OK)
2066 {
2067 forward = 0;
2068 --current_end;
2069 }
2070 else
2071 {
2072 xfree (buf);
2073 return;
2074 }
2075
2076 /* Loop invariant is that the [current_begin, current_end) was previously
2077 found to be not readable as a whole.
2078
2079 Note loop condition -- if the range has 1 byte, we can't divide the range
2080 so there's no point trying further. */
2081 while (current_end - current_begin > 1)
2082 {
2083 ULONGEST first_half_begin, first_half_end;
2084 ULONGEST second_half_begin, second_half_end;
2085 LONGEST xfer;
2086 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2087
2088 if (forward)
2089 {
2090 first_half_begin = current_begin;
2091 first_half_end = middle;
2092 second_half_begin = middle;
2093 second_half_end = current_end;
2094 }
2095 else
2096 {
2097 first_half_begin = middle;
2098 first_half_end = current_end;
2099 second_half_begin = current_begin;
2100 second_half_end = middle;
2101 }
2102
2103 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2104 buf + (first_half_begin - begin),
2105 first_half_begin,
2106 first_half_end - first_half_begin);
2107
2108 if (xfer == first_half_end - first_half_begin)
2109 {
2110 /* This half reads up fine. So, the error must be in the
2111 other half. */
2112 current_begin = second_half_begin;
2113 current_end = second_half_end;
2114 }
2115 else
2116 {
2117 /* This half is not readable. Because we've tried one byte, we
2118 know some part of this half if actually redable. Go to the next
2119 iteration to divide again and try to read.
2120
2121 We don't handle the other half, because this function only tries
2122 to read a single readable subrange. */
2123 current_begin = first_half_begin;
2124 current_end = first_half_end;
2125 }
2126 }
2127
2128 if (forward)
2129 {
2130 /* The [begin, current_begin) range has been read. */
2131 r.begin = begin;
2132 r.end = current_begin;
2133 r.data = buf;
2134 }
2135 else
2136 {
2137 /* The [current_end, end) range has been read. */
2138 LONGEST rlen = end - current_end;
2139
2140 r.data = xmalloc (rlen);
2141 memcpy (r.data, buf + current_end - begin, rlen);
2142 r.begin = current_end;
2143 r.end = end;
2144 xfree (buf);
2145 }
2146 VEC_safe_push(memory_read_result_s, (*result), &r);
2147 }
2148
2149 void
2150 free_memory_read_result_vector (void *x)
2151 {
2152 VEC(memory_read_result_s) *v = x;
2153 memory_read_result_s *current;
2154 int ix;
2155
2156 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2157 {
2158 xfree (current->data);
2159 }
2160 VEC_free (memory_read_result_s, v);
2161 }
2162
2163 VEC(memory_read_result_s) *
2164 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2165 {
2166 VEC(memory_read_result_s) *result = 0;
2167
2168 LONGEST xfered = 0;
2169 while (xfered < len)
2170 {
2171 struct mem_region *region = lookup_mem_region (offset + xfered);
2172 LONGEST rlen;
2173
2174 /* If there is no explicit region, a fake one should be created. */
2175 gdb_assert (region);
2176
2177 if (region->hi == 0)
2178 rlen = len - xfered;
2179 else
2180 rlen = region->hi - offset;
2181
2182 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2183 {
2184 /* Cannot read this region. Note that we can end up here only
2185 if the region is explicitly marked inaccessible, or
2186 'inaccessible-by-default' is in effect. */
2187 xfered += rlen;
2188 }
2189 else
2190 {
2191 LONGEST to_read = min (len - xfered, rlen);
2192 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2193
2194 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2195 (gdb_byte *) buffer,
2196 offset + xfered, to_read);
2197 /* Call an observer, notifying them of the xfer progress? */
2198 if (xfer <= 0)
2199 {
2200 /* Got an error reading full chunk. See if maybe we can read
2201 some subrange. */
2202 xfree (buffer);
2203 read_whatever_is_readable (ops, offset + xfered,
2204 offset + xfered + to_read, &result);
2205 xfered += to_read;
2206 }
2207 else
2208 {
2209 struct memory_read_result r;
2210 r.data = buffer;
2211 r.begin = offset + xfered;
2212 r.end = r.begin + xfer;
2213 VEC_safe_push (memory_read_result_s, result, &r);
2214 xfered += xfer;
2215 }
2216 QUIT;
2217 }
2218 }
2219 return result;
2220 }
2221
2222
2223 /* An alternative to target_write with progress callbacks. */
2224
2225 LONGEST
2226 target_write_with_progress (struct target_ops *ops,
2227 enum target_object object,
2228 const char *annex, const gdb_byte *buf,
2229 ULONGEST offset, LONGEST len,
2230 void (*progress) (ULONGEST, void *), void *baton)
2231 {
2232 LONGEST xfered = 0;
2233
2234 /* Give the progress callback a chance to set up. */
2235 if (progress)
2236 (*progress) (0, baton);
2237
2238 while (xfered < len)
2239 {
2240 ULONGEST xfered_len;
2241 enum target_xfer_status status;
2242
2243 status = target_write_partial (ops, object, annex,
2244 (gdb_byte *) buf + xfered,
2245 offset + xfered, len - xfered,
2246 &xfered_len);
2247
2248 if (status == TARGET_XFER_EOF)
2249 return xfered;
2250 if (TARGET_XFER_STATUS_ERROR_P (status))
2251 return -1;
2252
2253 gdb_assert (status == TARGET_XFER_OK);
2254 if (progress)
2255 (*progress) (xfered_len, baton);
2256
2257 xfered += xfered_len;
2258 QUIT;
2259 }
2260 return len;
2261 }
2262
2263 /* For docs on target_write see target.h. */
2264
2265 LONGEST
2266 target_write (struct target_ops *ops,
2267 enum target_object object,
2268 const char *annex, const gdb_byte *buf,
2269 ULONGEST offset, LONGEST len)
2270 {
2271 return target_write_with_progress (ops, object, annex, buf, offset, len,
2272 NULL, NULL);
2273 }
2274
2275 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2276 the size of the transferred data. PADDING additional bytes are
2277 available in *BUF_P. This is a helper function for
2278 target_read_alloc; see the declaration of that function for more
2279 information. */
2280
2281 static LONGEST
2282 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2283 const char *annex, gdb_byte **buf_p, int padding)
2284 {
2285 size_t buf_alloc, buf_pos;
2286 gdb_byte *buf;
2287
2288 /* This function does not have a length parameter; it reads the
2289 entire OBJECT). Also, it doesn't support objects fetched partly
2290 from one target and partly from another (in a different stratum,
2291 e.g. a core file and an executable). Both reasons make it
2292 unsuitable for reading memory. */
2293 gdb_assert (object != TARGET_OBJECT_MEMORY);
2294
2295 /* Start by reading up to 4K at a time. The target will throttle
2296 this number down if necessary. */
2297 buf_alloc = 4096;
2298 buf = xmalloc (buf_alloc);
2299 buf_pos = 0;
2300 while (1)
2301 {
2302 ULONGEST xfered_len;
2303 enum target_xfer_status status;
2304
2305 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2306 buf_pos, buf_alloc - buf_pos - padding,
2307 &xfered_len);
2308
2309 if (status == TARGET_XFER_EOF)
2310 {
2311 /* Read all there was. */
2312 if (buf_pos == 0)
2313 xfree (buf);
2314 else
2315 *buf_p = buf;
2316 return buf_pos;
2317 }
2318 else if (status != TARGET_XFER_OK)
2319 {
2320 /* An error occurred. */
2321 xfree (buf);
2322 return TARGET_XFER_E_IO;
2323 }
2324
2325 buf_pos += xfered_len;
2326
2327 /* If the buffer is filling up, expand it. */
2328 if (buf_alloc < buf_pos * 2)
2329 {
2330 buf_alloc *= 2;
2331 buf = xrealloc (buf, buf_alloc);
2332 }
2333
2334 QUIT;
2335 }
2336 }
2337
2338 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2339 the size of the transferred data. See the declaration in "target.h"
2340 function for more information about the return value. */
2341
2342 LONGEST
2343 target_read_alloc (struct target_ops *ops, enum target_object object,
2344 const char *annex, gdb_byte **buf_p)
2345 {
2346 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2347 }
2348
2349 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2350 returned as a string, allocated using xmalloc. If an error occurs
2351 or the transfer is unsupported, NULL is returned. Empty objects
2352 are returned as allocated but empty strings. A warning is issued
2353 if the result contains any embedded NUL bytes. */
2354
2355 char *
2356 target_read_stralloc (struct target_ops *ops, enum target_object object,
2357 const char *annex)
2358 {
2359 gdb_byte *buffer;
2360 char *bufstr;
2361 LONGEST i, transferred;
2362
2363 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2364 bufstr = (char *) buffer;
2365
2366 if (transferred < 0)
2367 return NULL;
2368
2369 if (transferred == 0)
2370 return xstrdup ("");
2371
2372 bufstr[transferred] = 0;
2373
2374 /* Check for embedded NUL bytes; but allow trailing NULs. */
2375 for (i = strlen (bufstr); i < transferred; i++)
2376 if (bufstr[i] != 0)
2377 {
2378 warning (_("target object %d, annex %s, "
2379 "contained unexpected null characters"),
2380 (int) object, annex ? annex : "(none)");
2381 break;
2382 }
2383
2384 return bufstr;
2385 }
2386
2387 /* Memory transfer methods. */
2388
2389 void
2390 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2391 LONGEST len)
2392 {
2393 /* This method is used to read from an alternate, non-current
2394 target. This read must bypass the overlay support (as symbols
2395 don't match this target), and GDB's internal cache (wrong cache
2396 for this target). */
2397 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2398 != len)
2399 memory_error (TARGET_XFER_E_IO, addr);
2400 }
2401
2402 ULONGEST
2403 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2404 int len, enum bfd_endian byte_order)
2405 {
2406 gdb_byte buf[sizeof (ULONGEST)];
2407
2408 gdb_assert (len <= sizeof (buf));
2409 get_target_memory (ops, addr, buf, len);
2410 return extract_unsigned_integer (buf, len, byte_order);
2411 }
2412
2413 /* See target.h. */
2414
2415 int
2416 target_insert_breakpoint (struct gdbarch *gdbarch,
2417 struct bp_target_info *bp_tgt)
2418 {
2419 if (!may_insert_breakpoints)
2420 {
2421 warning (_("May not insert breakpoints"));
2422 return 1;
2423 }
2424
2425 return current_target.to_insert_breakpoint (&current_target,
2426 gdbarch, bp_tgt);
2427 }
2428
2429 /* See target.h. */
2430
2431 int
2432 target_remove_breakpoint (struct gdbarch *gdbarch,
2433 struct bp_target_info *bp_tgt)
2434 {
2435 /* This is kind of a weird case to handle, but the permission might
2436 have been changed after breakpoints were inserted - in which case
2437 we should just take the user literally and assume that any
2438 breakpoints should be left in place. */
2439 if (!may_insert_breakpoints)
2440 {
2441 warning (_("May not remove breakpoints"));
2442 return 1;
2443 }
2444
2445 return current_target.to_remove_breakpoint (&current_target,
2446 gdbarch, bp_tgt);
2447 }
2448
2449 static void
2450 target_info (char *args, int from_tty)
2451 {
2452 struct target_ops *t;
2453 int has_all_mem = 0;
2454
2455 if (symfile_objfile != NULL)
2456 printf_unfiltered (_("Symbols from \"%s\".\n"),
2457 objfile_name (symfile_objfile));
2458
2459 for (t = target_stack; t != NULL; t = t->beneath)
2460 {
2461 if (!(*t->to_has_memory) (t))
2462 continue;
2463
2464 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2465 continue;
2466 if (has_all_mem)
2467 printf_unfiltered (_("\tWhile running this, "
2468 "GDB does not access memory from...\n"));
2469 printf_unfiltered ("%s:\n", t->to_longname);
2470 (t->to_files_info) (t);
2471 has_all_mem = (*t->to_has_all_memory) (t);
2472 }
2473 }
2474
2475 /* This function is called before any new inferior is created, e.g.
2476 by running a program, attaching, or connecting to a target.
2477 It cleans up any state from previous invocations which might
2478 change between runs. This is a subset of what target_preopen
2479 resets (things which might change between targets). */
2480
2481 void
2482 target_pre_inferior (int from_tty)
2483 {
2484 /* Clear out solib state. Otherwise the solib state of the previous
2485 inferior might have survived and is entirely wrong for the new
2486 target. This has been observed on GNU/Linux using glibc 2.3. How
2487 to reproduce:
2488
2489 bash$ ./foo&
2490 [1] 4711
2491 bash$ ./foo&
2492 [1] 4712
2493 bash$ gdb ./foo
2494 [...]
2495 (gdb) attach 4711
2496 (gdb) detach
2497 (gdb) attach 4712
2498 Cannot access memory at address 0xdeadbeef
2499 */
2500
2501 /* In some OSs, the shared library list is the same/global/shared
2502 across inferiors. If code is shared between processes, so are
2503 memory regions and features. */
2504 if (!gdbarch_has_global_solist (target_gdbarch ()))
2505 {
2506 no_shared_libraries (NULL, from_tty);
2507
2508 invalidate_target_mem_regions ();
2509
2510 target_clear_description ();
2511 }
2512
2513 agent_capability_invalidate ();
2514 }
2515
2516 /* Callback for iterate_over_inferiors. Gets rid of the given
2517 inferior. */
2518
2519 static int
2520 dispose_inferior (struct inferior *inf, void *args)
2521 {
2522 struct thread_info *thread;
2523
2524 thread = any_thread_of_process (inf->pid);
2525 if (thread)
2526 {
2527 switch_to_thread (thread->ptid);
2528
2529 /* Core inferiors actually should be detached, not killed. */
2530 if (target_has_execution)
2531 target_kill ();
2532 else
2533 target_detach (NULL, 0);
2534 }
2535
2536 return 0;
2537 }
2538
2539 /* This is to be called by the open routine before it does
2540 anything. */
2541
2542 void
2543 target_preopen (int from_tty)
2544 {
2545 dont_repeat ();
2546
2547 if (have_inferiors ())
2548 {
2549 if (!from_tty
2550 || !have_live_inferiors ()
2551 || query (_("A program is being debugged already. Kill it? ")))
2552 iterate_over_inferiors (dispose_inferior, NULL);
2553 else
2554 error (_("Program not killed."));
2555 }
2556
2557 /* Calling target_kill may remove the target from the stack. But if
2558 it doesn't (which seems like a win for UDI), remove it now. */
2559 /* Leave the exec target, though. The user may be switching from a
2560 live process to a core of the same program. */
2561 pop_all_targets_above (file_stratum);
2562
2563 target_pre_inferior (from_tty);
2564 }
2565
2566 /* Detach a target after doing deferred register stores. */
2567
2568 void
2569 target_detach (const char *args, int from_tty)
2570 {
2571 struct target_ops* t;
2572
2573 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2574 /* Don't remove global breakpoints here. They're removed on
2575 disconnection from the target. */
2576 ;
2577 else
2578 /* If we're in breakpoints-always-inserted mode, have to remove
2579 them before detaching. */
2580 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2581
2582 prepare_for_detach ();
2583
2584 current_target.to_detach (&current_target, args, from_tty);
2585 if (targetdebug)
2586 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2587 args, from_tty);
2588 }
2589
2590 void
2591 target_disconnect (char *args, int from_tty)
2592 {
2593 struct target_ops *t;
2594
2595 /* If we're in breakpoints-always-inserted mode or if breakpoints
2596 are global across processes, we have to remove them before
2597 disconnecting. */
2598 remove_breakpoints ();
2599
2600 for (t = current_target.beneath; t != NULL; t = t->beneath)
2601 if (t->to_disconnect != NULL)
2602 {
2603 if (targetdebug)
2604 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2605 args, from_tty);
2606 t->to_disconnect (t, args, from_tty);
2607 return;
2608 }
2609
2610 tcomplain ();
2611 }
2612
2613 ptid_t
2614 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2615 {
2616 struct target_ops *t;
2617 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2618 status, options);
2619
2620 if (targetdebug)
2621 {
2622 char *status_string;
2623 char *options_string;
2624
2625 status_string = target_waitstatus_to_string (status);
2626 options_string = target_options_to_string (options);
2627 fprintf_unfiltered (gdb_stdlog,
2628 "target_wait (%d, status, options={%s})"
2629 " = %d, %s\n",
2630 ptid_get_pid (ptid), options_string,
2631 ptid_get_pid (retval), status_string);
2632 xfree (status_string);
2633 xfree (options_string);
2634 }
2635
2636 return retval;
2637 }
2638
2639 char *
2640 target_pid_to_str (ptid_t ptid)
2641 {
2642 struct target_ops *t;
2643
2644 for (t = current_target.beneath; t != NULL; t = t->beneath)
2645 {
2646 if (t->to_pid_to_str != NULL)
2647 return (*t->to_pid_to_str) (t, ptid);
2648 }
2649
2650 return normal_pid_to_str (ptid);
2651 }
2652
2653 char *
2654 target_thread_name (struct thread_info *info)
2655 {
2656 return current_target.to_thread_name (&current_target, info);
2657 }
2658
2659 void
2660 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2661 {
2662 struct target_ops *t;
2663
2664 target_dcache_invalidate ();
2665
2666 current_target.to_resume (&current_target, ptid, step, signal);
2667 if (targetdebug)
2668 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2669 ptid_get_pid (ptid),
2670 step ? "step" : "continue",
2671 gdb_signal_to_name (signal));
2672
2673 registers_changed_ptid (ptid);
2674 set_executing (ptid, 1);
2675 set_running (ptid, 1);
2676 clear_inline_frame_state (ptid);
2677 }
2678
2679 void
2680 target_pass_signals (int numsigs, unsigned char *pass_signals)
2681 {
2682 struct target_ops *t;
2683
2684 for (t = current_target.beneath; t != NULL; t = t->beneath)
2685 {
2686 if (t->to_pass_signals != NULL)
2687 {
2688 if (targetdebug)
2689 {
2690 int i;
2691
2692 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2693 numsigs);
2694
2695 for (i = 0; i < numsigs; i++)
2696 if (pass_signals[i])
2697 fprintf_unfiltered (gdb_stdlog, " %s",
2698 gdb_signal_to_name (i));
2699
2700 fprintf_unfiltered (gdb_stdlog, " })\n");
2701 }
2702
2703 (*t->to_pass_signals) (t, numsigs, pass_signals);
2704 return;
2705 }
2706 }
2707 }
2708
2709 void
2710 target_program_signals (int numsigs, unsigned char *program_signals)
2711 {
2712 struct target_ops *t;
2713
2714 for (t = current_target.beneath; t != NULL; t = t->beneath)
2715 {
2716 if (t->to_program_signals != NULL)
2717 {
2718 if (targetdebug)
2719 {
2720 int i;
2721
2722 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2723 numsigs);
2724
2725 for (i = 0; i < numsigs; i++)
2726 if (program_signals[i])
2727 fprintf_unfiltered (gdb_stdlog, " %s",
2728 gdb_signal_to_name (i));
2729
2730 fprintf_unfiltered (gdb_stdlog, " })\n");
2731 }
2732
2733 (*t->to_program_signals) (t, numsigs, program_signals);
2734 return;
2735 }
2736 }
2737 }
2738
2739 /* Look through the list of possible targets for a target that can
2740 follow forks. */
2741
2742 int
2743 target_follow_fork (int follow_child, int detach_fork)
2744 {
2745 struct target_ops *t;
2746
2747 for (t = current_target.beneath; t != NULL; t = t->beneath)
2748 {
2749 if (t->to_follow_fork != NULL)
2750 {
2751 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2752
2753 if (targetdebug)
2754 fprintf_unfiltered (gdb_stdlog,
2755 "target_follow_fork (%d, %d) = %d\n",
2756 follow_child, detach_fork, retval);
2757 return retval;
2758 }
2759 }
2760
2761 /* Some target returned a fork event, but did not know how to follow it. */
2762 internal_error (__FILE__, __LINE__,
2763 _("could not find a target to follow fork"));
2764 }
2765
2766 void
2767 target_mourn_inferior (void)
2768 {
2769 struct target_ops *t;
2770
2771 for (t = current_target.beneath; t != NULL; t = t->beneath)
2772 {
2773 if (t->to_mourn_inferior != NULL)
2774 {
2775 t->to_mourn_inferior (t);
2776 if (targetdebug)
2777 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2778
2779 /* We no longer need to keep handles on any of the object files.
2780 Make sure to release them to avoid unnecessarily locking any
2781 of them while we're not actually debugging. */
2782 bfd_cache_close_all ();
2783
2784 return;
2785 }
2786 }
2787
2788 internal_error (__FILE__, __LINE__,
2789 _("could not find a target to follow mourn inferior"));
2790 }
2791
2792 /* Look for a target which can describe architectural features, starting
2793 from TARGET. If we find one, return its description. */
2794
2795 const struct target_desc *
2796 target_read_description (struct target_ops *target)
2797 {
2798 struct target_ops *t;
2799
2800 for (t = target; t != NULL; t = t->beneath)
2801 if (t->to_read_description != NULL)
2802 {
2803 const struct target_desc *tdesc;
2804
2805 tdesc = t->to_read_description (t);
2806 if (tdesc)
2807 return tdesc;
2808 }
2809
2810 return NULL;
2811 }
2812
2813 /* The default implementation of to_search_memory.
2814 This implements a basic search of memory, reading target memory and
2815 performing the search here (as opposed to performing the search in on the
2816 target side with, for example, gdbserver). */
2817
2818 int
2819 simple_search_memory (struct target_ops *ops,
2820 CORE_ADDR start_addr, ULONGEST search_space_len,
2821 const gdb_byte *pattern, ULONGEST pattern_len,
2822 CORE_ADDR *found_addrp)
2823 {
2824 /* NOTE: also defined in find.c testcase. */
2825 #define SEARCH_CHUNK_SIZE 16000
2826 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2827 /* Buffer to hold memory contents for searching. */
2828 gdb_byte *search_buf;
2829 unsigned search_buf_size;
2830 struct cleanup *old_cleanups;
2831
2832 search_buf_size = chunk_size + pattern_len - 1;
2833
2834 /* No point in trying to allocate a buffer larger than the search space. */
2835 if (search_space_len < search_buf_size)
2836 search_buf_size = search_space_len;
2837
2838 search_buf = malloc (search_buf_size);
2839 if (search_buf == NULL)
2840 error (_("Unable to allocate memory to perform the search."));
2841 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2842
2843 /* Prime the search buffer. */
2844
2845 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2846 search_buf, start_addr, search_buf_size) != search_buf_size)
2847 {
2848 warning (_("Unable to access %s bytes of target "
2849 "memory at %s, halting search."),
2850 pulongest (search_buf_size), hex_string (start_addr));
2851 do_cleanups (old_cleanups);
2852 return -1;
2853 }
2854
2855 /* Perform the search.
2856
2857 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2858 When we've scanned N bytes we copy the trailing bytes to the start and
2859 read in another N bytes. */
2860
2861 while (search_space_len >= pattern_len)
2862 {
2863 gdb_byte *found_ptr;
2864 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2865
2866 found_ptr = memmem (search_buf, nr_search_bytes,
2867 pattern, pattern_len);
2868
2869 if (found_ptr != NULL)
2870 {
2871 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2872
2873 *found_addrp = found_addr;
2874 do_cleanups (old_cleanups);
2875 return 1;
2876 }
2877
2878 /* Not found in this chunk, skip to next chunk. */
2879
2880 /* Don't let search_space_len wrap here, it's unsigned. */
2881 if (search_space_len >= chunk_size)
2882 search_space_len -= chunk_size;
2883 else
2884 search_space_len = 0;
2885
2886 if (search_space_len >= pattern_len)
2887 {
2888 unsigned keep_len = search_buf_size - chunk_size;
2889 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2890 int nr_to_read;
2891
2892 /* Copy the trailing part of the previous iteration to the front
2893 of the buffer for the next iteration. */
2894 gdb_assert (keep_len == pattern_len - 1);
2895 memcpy (search_buf, search_buf + chunk_size, keep_len);
2896
2897 nr_to_read = min (search_space_len - keep_len, chunk_size);
2898
2899 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2900 search_buf + keep_len, read_addr,
2901 nr_to_read) != nr_to_read)
2902 {
2903 warning (_("Unable to access %s bytes of target "
2904 "memory at %s, halting search."),
2905 plongest (nr_to_read),
2906 hex_string (read_addr));
2907 do_cleanups (old_cleanups);
2908 return -1;
2909 }
2910
2911 start_addr += chunk_size;
2912 }
2913 }
2914
2915 /* Not found. */
2916
2917 do_cleanups (old_cleanups);
2918 return 0;
2919 }
2920
2921 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2922 sequence of bytes in PATTERN with length PATTERN_LEN.
2923
2924 The result is 1 if found, 0 if not found, and -1 if there was an error
2925 requiring halting of the search (e.g. memory read error).
2926 If the pattern is found the address is recorded in FOUND_ADDRP. */
2927
2928 int
2929 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2930 const gdb_byte *pattern, ULONGEST pattern_len,
2931 CORE_ADDR *found_addrp)
2932 {
2933 struct target_ops *t;
2934 int found;
2935
2936 /* We don't use INHERIT to set current_target.to_search_memory,
2937 so we have to scan the target stack and handle targetdebug
2938 ourselves. */
2939
2940 if (targetdebug)
2941 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2942 hex_string (start_addr));
2943
2944 for (t = current_target.beneath; t != NULL; t = t->beneath)
2945 if (t->to_search_memory != NULL)
2946 break;
2947
2948 if (t != NULL)
2949 {
2950 found = t->to_search_memory (t, start_addr, search_space_len,
2951 pattern, pattern_len, found_addrp);
2952 }
2953 else
2954 {
2955 /* If a special version of to_search_memory isn't available, use the
2956 simple version. */
2957 found = simple_search_memory (current_target.beneath,
2958 start_addr, search_space_len,
2959 pattern, pattern_len, found_addrp);
2960 }
2961
2962 if (targetdebug)
2963 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2964
2965 return found;
2966 }
2967
2968 /* Look through the currently pushed targets. If none of them will
2969 be able to restart the currently running process, issue an error
2970 message. */
2971
2972 void
2973 target_require_runnable (void)
2974 {
2975 struct target_ops *t;
2976
2977 for (t = target_stack; t != NULL; t = t->beneath)
2978 {
2979 /* If this target knows how to create a new program, then
2980 assume we will still be able to after killing the current
2981 one. Either killing and mourning will not pop T, or else
2982 find_default_run_target will find it again. */
2983 if (t->to_create_inferior != NULL)
2984 return;
2985
2986 /* Do not worry about thread_stratum targets that can not
2987 create inferiors. Assume they will be pushed again if
2988 necessary, and continue to the process_stratum. */
2989 if (t->to_stratum == thread_stratum
2990 || t->to_stratum == arch_stratum)
2991 continue;
2992
2993 error (_("The \"%s\" target does not support \"run\". "
2994 "Try \"help target\" or \"continue\"."),
2995 t->to_shortname);
2996 }
2997
2998 /* This function is only called if the target is running. In that
2999 case there should have been a process_stratum target and it
3000 should either know how to create inferiors, or not... */
3001 internal_error (__FILE__, __LINE__, _("No targets found"));
3002 }
3003
3004 /* Look through the list of possible targets for a target that can
3005 execute a run or attach command without any other data. This is
3006 used to locate the default process stratum.
3007
3008 If DO_MESG is not NULL, the result is always valid (error() is
3009 called for errors); else, return NULL on error. */
3010
3011 static struct target_ops *
3012 find_default_run_target (char *do_mesg)
3013 {
3014 struct target_ops **t;
3015 struct target_ops *runable = NULL;
3016 int count;
3017
3018 count = 0;
3019
3020 for (t = target_structs; t < target_structs + target_struct_size;
3021 ++t)
3022 {
3023 if ((*t)->to_can_run && target_can_run (*t))
3024 {
3025 runable = *t;
3026 ++count;
3027 }
3028 }
3029
3030 if (count != 1)
3031 {
3032 if (do_mesg)
3033 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3034 else
3035 return NULL;
3036 }
3037
3038 return runable;
3039 }
3040
3041 void
3042 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3043 {
3044 struct target_ops *t;
3045
3046 t = find_default_run_target ("attach");
3047 (t->to_attach) (t, args, from_tty);
3048 return;
3049 }
3050
3051 void
3052 find_default_create_inferior (struct target_ops *ops,
3053 char *exec_file, char *allargs, char **env,
3054 int from_tty)
3055 {
3056 struct target_ops *t;
3057
3058 t = find_default_run_target ("run");
3059 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3060 return;
3061 }
3062
3063 static int
3064 find_default_can_async_p (struct target_ops *ignore)
3065 {
3066 struct target_ops *t;
3067
3068 /* This may be called before the target is pushed on the stack;
3069 look for the default process stratum. If there's none, gdb isn't
3070 configured with a native debugger, and target remote isn't
3071 connected yet. */
3072 t = find_default_run_target (NULL);
3073 if (t && t->to_can_async_p != delegate_can_async_p)
3074 return (t->to_can_async_p) (t);
3075 return 0;
3076 }
3077
3078 static int
3079 find_default_is_async_p (struct target_ops *ignore)
3080 {
3081 struct target_ops *t;
3082
3083 /* This may be called before the target is pushed on the stack;
3084 look for the default process stratum. If there's none, gdb isn't
3085 configured with a native debugger, and target remote isn't
3086 connected yet. */
3087 t = find_default_run_target (NULL);
3088 if (t && t->to_is_async_p != delegate_is_async_p)
3089 return (t->to_is_async_p) (t);
3090 return 0;
3091 }
3092
3093 static int
3094 find_default_supports_non_stop (struct target_ops *self)
3095 {
3096 struct target_ops *t;
3097
3098 t = find_default_run_target (NULL);
3099 if (t && t->to_supports_non_stop)
3100 return (t->to_supports_non_stop) (t);
3101 return 0;
3102 }
3103
3104 int
3105 target_supports_non_stop (void)
3106 {
3107 struct target_ops *t;
3108
3109 for (t = &current_target; t != NULL; t = t->beneath)
3110 if (t->to_supports_non_stop)
3111 return t->to_supports_non_stop (t);
3112
3113 return 0;
3114 }
3115
3116 /* Implement the "info proc" command. */
3117
3118 int
3119 target_info_proc (char *args, enum info_proc_what what)
3120 {
3121 struct target_ops *t;
3122
3123 /* If we're already connected to something that can get us OS
3124 related data, use it. Otherwise, try using the native
3125 target. */
3126 if (current_target.to_stratum >= process_stratum)
3127 t = current_target.beneath;
3128 else
3129 t = find_default_run_target (NULL);
3130
3131 for (; t != NULL; t = t->beneath)
3132 {
3133 if (t->to_info_proc != NULL)
3134 {
3135 t->to_info_proc (t, args, what);
3136
3137 if (targetdebug)
3138 fprintf_unfiltered (gdb_stdlog,
3139 "target_info_proc (\"%s\", %d)\n", args, what);
3140
3141 return 1;
3142 }
3143 }
3144
3145 return 0;
3146 }
3147
3148 static int
3149 find_default_supports_disable_randomization (struct target_ops *self)
3150 {
3151 struct target_ops *t;
3152
3153 t = find_default_run_target (NULL);
3154 if (t && t->to_supports_disable_randomization)
3155 return (t->to_supports_disable_randomization) (t);
3156 return 0;
3157 }
3158
3159 int
3160 target_supports_disable_randomization (void)
3161 {
3162 struct target_ops *t;
3163
3164 for (t = &current_target; t != NULL; t = t->beneath)
3165 if (t->to_supports_disable_randomization)
3166 return t->to_supports_disable_randomization (t);
3167
3168 return 0;
3169 }
3170
3171 char *
3172 target_get_osdata (const char *type)
3173 {
3174 struct target_ops *t;
3175
3176 /* If we're already connected to something that can get us OS
3177 related data, use it. Otherwise, try using the native
3178 target. */
3179 if (current_target.to_stratum >= process_stratum)
3180 t = current_target.beneath;
3181 else
3182 t = find_default_run_target ("get OS data");
3183
3184 if (!t)
3185 return NULL;
3186
3187 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3188 }
3189
3190 /* Determine the current address space of thread PTID. */
3191
3192 struct address_space *
3193 target_thread_address_space (ptid_t ptid)
3194 {
3195 struct address_space *aspace;
3196 struct inferior *inf;
3197 struct target_ops *t;
3198
3199 for (t = current_target.beneath; t != NULL; t = t->beneath)
3200 {
3201 if (t->to_thread_address_space != NULL)
3202 {
3203 aspace = t->to_thread_address_space (t, ptid);
3204 gdb_assert (aspace);
3205
3206 if (targetdebug)
3207 fprintf_unfiltered (gdb_stdlog,
3208 "target_thread_address_space (%s) = %d\n",
3209 target_pid_to_str (ptid),
3210 address_space_num (aspace));
3211 return aspace;
3212 }
3213 }
3214
3215 /* Fall-back to the "main" address space of the inferior. */
3216 inf = find_inferior_pid (ptid_get_pid (ptid));
3217
3218 if (inf == NULL || inf->aspace == NULL)
3219 internal_error (__FILE__, __LINE__,
3220 _("Can't determine the current "
3221 "address space of thread %s\n"),
3222 target_pid_to_str (ptid));
3223
3224 return inf->aspace;
3225 }
3226
3227
3228 /* Target file operations. */
3229
3230 static struct target_ops *
3231 default_fileio_target (void)
3232 {
3233 /* If we're already connected to something that can perform
3234 file I/O, use it. Otherwise, try using the native target. */
3235 if (current_target.to_stratum >= process_stratum)
3236 return current_target.beneath;
3237 else
3238 return find_default_run_target ("file I/O");
3239 }
3240
3241 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3242 target file descriptor, or -1 if an error occurs (and set
3243 *TARGET_ERRNO). */
3244 int
3245 target_fileio_open (const char *filename, int flags, int mode,
3246 int *target_errno)
3247 {
3248 struct target_ops *t;
3249
3250 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3251 {
3252 if (t->to_fileio_open != NULL)
3253 {
3254 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
3255
3256 if (targetdebug)
3257 fprintf_unfiltered (gdb_stdlog,
3258 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3259 filename, flags, mode,
3260 fd, fd != -1 ? 0 : *target_errno);
3261 return fd;
3262 }
3263 }
3264
3265 *target_errno = FILEIO_ENOSYS;
3266 return -1;
3267 }
3268
3269 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3270 Return the number of bytes written, or -1 if an error occurs
3271 (and set *TARGET_ERRNO). */
3272 int
3273 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3274 ULONGEST offset, int *target_errno)
3275 {
3276 struct target_ops *t;
3277
3278 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3279 {
3280 if (t->to_fileio_pwrite != NULL)
3281 {
3282 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
3283 target_errno);
3284
3285 if (targetdebug)
3286 fprintf_unfiltered (gdb_stdlog,
3287 "target_fileio_pwrite (%d,...,%d,%s) "
3288 "= %d (%d)\n",
3289 fd, len, pulongest (offset),
3290 ret, ret != -1 ? 0 : *target_errno);
3291 return ret;
3292 }
3293 }
3294
3295 *target_errno = FILEIO_ENOSYS;
3296 return -1;
3297 }
3298
3299 /* Read up to LEN bytes FD on the target into READ_BUF.
3300 Return the number of bytes read, or -1 if an error occurs
3301 (and set *TARGET_ERRNO). */
3302 int
3303 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3304 ULONGEST offset, int *target_errno)
3305 {
3306 struct target_ops *t;
3307
3308 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3309 {
3310 if (t->to_fileio_pread != NULL)
3311 {
3312 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3313 target_errno);
3314
3315 if (targetdebug)
3316 fprintf_unfiltered (gdb_stdlog,
3317 "target_fileio_pread (%d,...,%d,%s) "
3318 "= %d (%d)\n",
3319 fd, len, pulongest (offset),
3320 ret, ret != -1 ? 0 : *target_errno);
3321 return ret;
3322 }
3323 }
3324
3325 *target_errno = FILEIO_ENOSYS;
3326 return -1;
3327 }
3328
3329 /* Close FD on the target. Return 0, or -1 if an error occurs
3330 (and set *TARGET_ERRNO). */
3331 int
3332 target_fileio_close (int fd, int *target_errno)
3333 {
3334 struct target_ops *t;
3335
3336 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3337 {
3338 if (t->to_fileio_close != NULL)
3339 {
3340 int ret = t->to_fileio_close (t, fd, target_errno);
3341
3342 if (targetdebug)
3343 fprintf_unfiltered (gdb_stdlog,
3344 "target_fileio_close (%d) = %d (%d)\n",
3345 fd, ret, ret != -1 ? 0 : *target_errno);
3346 return ret;
3347 }
3348 }
3349
3350 *target_errno = FILEIO_ENOSYS;
3351 return -1;
3352 }
3353
3354 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3355 occurs (and set *TARGET_ERRNO). */
3356 int
3357 target_fileio_unlink (const char *filename, int *target_errno)
3358 {
3359 struct target_ops *t;
3360
3361 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3362 {
3363 if (t->to_fileio_unlink != NULL)
3364 {
3365 int ret = t->to_fileio_unlink (t, filename, target_errno);
3366
3367 if (targetdebug)
3368 fprintf_unfiltered (gdb_stdlog,
3369 "target_fileio_unlink (%s) = %d (%d)\n",
3370 filename, ret, ret != -1 ? 0 : *target_errno);
3371 return ret;
3372 }
3373 }
3374
3375 *target_errno = FILEIO_ENOSYS;
3376 return -1;
3377 }
3378
3379 /* Read value of symbolic link FILENAME on the target. Return a
3380 null-terminated string allocated via xmalloc, or NULL if an error
3381 occurs (and set *TARGET_ERRNO). */
3382 char *
3383 target_fileio_readlink (const char *filename, int *target_errno)
3384 {
3385 struct target_ops *t;
3386
3387 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3388 {
3389 if (t->to_fileio_readlink != NULL)
3390 {
3391 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3392
3393 if (targetdebug)
3394 fprintf_unfiltered (gdb_stdlog,
3395 "target_fileio_readlink (%s) = %s (%d)\n",
3396 filename, ret? ret : "(nil)",
3397 ret? 0 : *target_errno);
3398 return ret;
3399 }
3400 }
3401
3402 *target_errno = FILEIO_ENOSYS;
3403 return NULL;
3404 }
3405
3406 static void
3407 target_fileio_close_cleanup (void *opaque)
3408 {
3409 int fd = *(int *) opaque;
3410 int target_errno;
3411
3412 target_fileio_close (fd, &target_errno);
3413 }
3414
3415 /* Read target file FILENAME. Store the result in *BUF_P and
3416 return the size of the transferred data. PADDING additional bytes are
3417 available in *BUF_P. This is a helper function for
3418 target_fileio_read_alloc; see the declaration of that function for more
3419 information. */
3420
3421 static LONGEST
3422 target_fileio_read_alloc_1 (const char *filename,
3423 gdb_byte **buf_p, int padding)
3424 {
3425 struct cleanup *close_cleanup;
3426 size_t buf_alloc, buf_pos;
3427 gdb_byte *buf;
3428 LONGEST n;
3429 int fd;
3430 int target_errno;
3431
3432 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3433 if (fd == -1)
3434 return -1;
3435
3436 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3437
3438 /* Start by reading up to 4K at a time. The target will throttle
3439 this number down if necessary. */
3440 buf_alloc = 4096;
3441 buf = xmalloc (buf_alloc);
3442 buf_pos = 0;
3443 while (1)
3444 {
3445 n = target_fileio_pread (fd, &buf[buf_pos],
3446 buf_alloc - buf_pos - padding, buf_pos,
3447 &target_errno);
3448 if (n < 0)
3449 {
3450 /* An error occurred. */
3451 do_cleanups (close_cleanup);
3452 xfree (buf);
3453 return -1;
3454 }
3455 else if (n == 0)
3456 {
3457 /* Read all there was. */
3458 do_cleanups (close_cleanup);
3459 if (buf_pos == 0)
3460 xfree (buf);
3461 else
3462 *buf_p = buf;
3463 return buf_pos;
3464 }
3465
3466 buf_pos += n;
3467
3468 /* If the buffer is filling up, expand it. */
3469 if (buf_alloc < buf_pos * 2)
3470 {
3471 buf_alloc *= 2;
3472 buf = xrealloc (buf, buf_alloc);
3473 }
3474
3475 QUIT;
3476 }
3477 }
3478
3479 /* Read target file FILENAME. Store the result in *BUF_P and return
3480 the size of the transferred data. See the declaration in "target.h"
3481 function for more information about the return value. */
3482
3483 LONGEST
3484 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3485 {
3486 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3487 }
3488
3489 /* Read target file FILENAME. The result is NUL-terminated and
3490 returned as a string, allocated using xmalloc. If an error occurs
3491 or the transfer is unsupported, NULL is returned. Empty objects
3492 are returned as allocated but empty strings. A warning is issued
3493 if the result contains any embedded NUL bytes. */
3494
3495 char *
3496 target_fileio_read_stralloc (const char *filename)
3497 {
3498 gdb_byte *buffer;
3499 char *bufstr;
3500 LONGEST i, transferred;
3501
3502 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3503 bufstr = (char *) buffer;
3504
3505 if (transferred < 0)
3506 return NULL;
3507
3508 if (transferred == 0)
3509 return xstrdup ("");
3510
3511 bufstr[transferred] = 0;
3512
3513 /* Check for embedded NUL bytes; but allow trailing NULs. */
3514 for (i = strlen (bufstr); i < transferred; i++)
3515 if (bufstr[i] != 0)
3516 {
3517 warning (_("target file %s "
3518 "contained unexpected null characters"),
3519 filename);
3520 break;
3521 }
3522
3523 return bufstr;
3524 }
3525
3526
3527 static int
3528 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3529 CORE_ADDR addr, int len)
3530 {
3531 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3532 }
3533
3534 static int
3535 default_watchpoint_addr_within_range (struct target_ops *target,
3536 CORE_ADDR addr,
3537 CORE_ADDR start, int length)
3538 {
3539 return addr >= start && addr < start + length;
3540 }
3541
3542 static struct gdbarch *
3543 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3544 {
3545 return target_gdbarch ();
3546 }
3547
3548 static int
3549 return_zero (void)
3550 {
3551 return 0;
3552 }
3553
3554 static int
3555 return_minus_one (void)
3556 {
3557 return -1;
3558 }
3559
3560 static void *
3561 return_null (void)
3562 {
3563 return 0;
3564 }
3565
3566 /*
3567 * Find the next target down the stack from the specified target.
3568 */
3569
3570 struct target_ops *
3571 find_target_beneath (struct target_ops *t)
3572 {
3573 return t->beneath;
3574 }
3575
3576 /* See target.h. */
3577
3578 struct target_ops *
3579 find_target_at (enum strata stratum)
3580 {
3581 struct target_ops *t;
3582
3583 for (t = current_target.beneath; t != NULL; t = t->beneath)
3584 if (t->to_stratum == stratum)
3585 return t;
3586
3587 return NULL;
3588 }
3589
3590 \f
3591 /* The inferior process has died. Long live the inferior! */
3592
3593 void
3594 generic_mourn_inferior (void)
3595 {
3596 ptid_t ptid;
3597
3598 ptid = inferior_ptid;
3599 inferior_ptid = null_ptid;
3600
3601 /* Mark breakpoints uninserted in case something tries to delete a
3602 breakpoint while we delete the inferior's threads (which would
3603 fail, since the inferior is long gone). */
3604 mark_breakpoints_out ();
3605
3606 if (!ptid_equal (ptid, null_ptid))
3607 {
3608 int pid = ptid_get_pid (ptid);
3609 exit_inferior (pid);
3610 }
3611
3612 /* Note this wipes step-resume breakpoints, so needs to be done
3613 after exit_inferior, which ends up referencing the step-resume
3614 breakpoints through clear_thread_inferior_resources. */
3615 breakpoint_init_inferior (inf_exited);
3616
3617 registers_changed ();
3618
3619 reopen_exec_file ();
3620 reinit_frame_cache ();
3621
3622 if (deprecated_detach_hook)
3623 deprecated_detach_hook ();
3624 }
3625 \f
3626 /* Convert a normal process ID to a string. Returns the string in a
3627 static buffer. */
3628
3629 char *
3630 normal_pid_to_str (ptid_t ptid)
3631 {
3632 static char buf[32];
3633
3634 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3635 return buf;
3636 }
3637
3638 static char *
3639 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3640 {
3641 return normal_pid_to_str (ptid);
3642 }
3643
3644 /* Error-catcher for target_find_memory_regions. */
3645 static int
3646 dummy_find_memory_regions (struct target_ops *self,
3647 find_memory_region_ftype ignore1, void *ignore2)
3648 {
3649 error (_("Command not implemented for this target."));
3650 return 0;
3651 }
3652
3653 /* Error-catcher for target_make_corefile_notes. */
3654 static char *
3655 dummy_make_corefile_notes (struct target_ops *self,
3656 bfd *ignore1, int *ignore2)
3657 {
3658 error (_("Command not implemented for this target."));
3659 return NULL;
3660 }
3661
3662 /* Set up the handful of non-empty slots needed by the dummy target
3663 vector. */
3664
3665 static void
3666 init_dummy_target (void)
3667 {
3668 dummy_target.to_shortname = "None";
3669 dummy_target.to_longname = "None";
3670 dummy_target.to_doc = "";
3671 dummy_target.to_create_inferior = find_default_create_inferior;
3672 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3673 dummy_target.to_supports_disable_randomization
3674 = find_default_supports_disable_randomization;
3675 dummy_target.to_pid_to_str = dummy_pid_to_str;
3676 dummy_target.to_stratum = dummy_stratum;
3677 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3678 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3679 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3680 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3681 dummy_target.to_has_execution
3682 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3683 dummy_target.to_magic = OPS_MAGIC;
3684
3685 install_dummy_methods (&dummy_target);
3686 }
3687 \f
3688 static void
3689 debug_to_open (char *args, int from_tty)
3690 {
3691 debug_target.to_open (args, from_tty);
3692
3693 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3694 }
3695
3696 void
3697 target_close (struct target_ops *targ)
3698 {
3699 gdb_assert (!target_is_pushed (targ));
3700
3701 if (targ->to_xclose != NULL)
3702 targ->to_xclose (targ);
3703 else if (targ->to_close != NULL)
3704 targ->to_close (targ);
3705
3706 if (targetdebug)
3707 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3708 }
3709
3710 void
3711 target_attach (char *args, int from_tty)
3712 {
3713 current_target.to_attach (&current_target, args, from_tty);
3714 if (targetdebug)
3715 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3716 args, from_tty);
3717 }
3718
3719 int
3720 target_thread_alive (ptid_t ptid)
3721 {
3722 struct target_ops *t;
3723
3724 for (t = current_target.beneath; t != NULL; t = t->beneath)
3725 {
3726 if (t->to_thread_alive != NULL)
3727 {
3728 int retval;
3729
3730 retval = t->to_thread_alive (t, ptid);
3731 if (targetdebug)
3732 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3733 ptid_get_pid (ptid), retval);
3734
3735 return retval;
3736 }
3737 }
3738
3739 return 0;
3740 }
3741
3742 void
3743 target_find_new_threads (void)
3744 {
3745 struct target_ops *t;
3746
3747 for (t = current_target.beneath; t != NULL; t = t->beneath)
3748 {
3749 if (t->to_find_new_threads != NULL)
3750 {
3751 t->to_find_new_threads (t);
3752 if (targetdebug)
3753 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3754
3755 return;
3756 }
3757 }
3758 }
3759
3760 void
3761 target_stop (ptid_t ptid)
3762 {
3763 if (!may_stop)
3764 {
3765 warning (_("May not interrupt or stop the target, ignoring attempt"));
3766 return;
3767 }
3768
3769 (*current_target.to_stop) (&current_target, ptid);
3770 }
3771
3772 static void
3773 debug_to_post_attach (struct target_ops *self, int pid)
3774 {
3775 debug_target.to_post_attach (&debug_target, pid);
3776
3777 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3778 }
3779
3780 /* Concatenate ELEM to LIST, a comma separate list, and return the
3781 result. The LIST incoming argument is released. */
3782
3783 static char *
3784 str_comma_list_concat_elem (char *list, const char *elem)
3785 {
3786 if (list == NULL)
3787 return xstrdup (elem);
3788 else
3789 return reconcat (list, list, ", ", elem, (char *) NULL);
3790 }
3791
3792 /* Helper for target_options_to_string. If OPT is present in
3793 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3794 Returns the new resulting string. OPT is removed from
3795 TARGET_OPTIONS. */
3796
3797 static char *
3798 do_option (int *target_options, char *ret,
3799 int opt, char *opt_str)
3800 {
3801 if ((*target_options & opt) != 0)
3802 {
3803 ret = str_comma_list_concat_elem (ret, opt_str);
3804 *target_options &= ~opt;
3805 }
3806
3807 return ret;
3808 }
3809
3810 char *
3811 target_options_to_string (int target_options)
3812 {
3813 char *ret = NULL;
3814
3815 #define DO_TARG_OPTION(OPT) \
3816 ret = do_option (&target_options, ret, OPT, #OPT)
3817
3818 DO_TARG_OPTION (TARGET_WNOHANG);
3819
3820 if (target_options != 0)
3821 ret = str_comma_list_concat_elem (ret, "unknown???");
3822
3823 if (ret == NULL)
3824 ret = xstrdup ("");
3825 return ret;
3826 }
3827
3828 static void
3829 debug_print_register (const char * func,
3830 struct regcache *regcache, int regno)
3831 {
3832 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3833
3834 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3835 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3836 && gdbarch_register_name (gdbarch, regno) != NULL
3837 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3838 fprintf_unfiltered (gdb_stdlog, "(%s)",
3839 gdbarch_register_name (gdbarch, regno));
3840 else
3841 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3842 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3843 {
3844 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3845 int i, size = register_size (gdbarch, regno);
3846 gdb_byte buf[MAX_REGISTER_SIZE];
3847
3848 regcache_raw_collect (regcache, regno, buf);
3849 fprintf_unfiltered (gdb_stdlog, " = ");
3850 for (i = 0; i < size; i++)
3851 {
3852 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3853 }
3854 if (size <= sizeof (LONGEST))
3855 {
3856 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3857
3858 fprintf_unfiltered (gdb_stdlog, " %s %s",
3859 core_addr_to_string_nz (val), plongest (val));
3860 }
3861 }
3862 fprintf_unfiltered (gdb_stdlog, "\n");
3863 }
3864
3865 void
3866 target_fetch_registers (struct regcache *regcache, int regno)
3867 {
3868 struct target_ops *t;
3869
3870 for (t = current_target.beneath; t != NULL; t = t->beneath)
3871 {
3872 if (t->to_fetch_registers != NULL)
3873 {
3874 t->to_fetch_registers (t, regcache, regno);
3875 if (targetdebug)
3876 debug_print_register ("target_fetch_registers", regcache, regno);
3877 return;
3878 }
3879 }
3880 }
3881
3882 void
3883 target_store_registers (struct regcache *regcache, int regno)
3884 {
3885 struct target_ops *t;
3886
3887 if (!may_write_registers)
3888 error (_("Writing to registers is not allowed (regno %d)"), regno);
3889
3890 current_target.to_store_registers (&current_target, regcache, regno);
3891 if (targetdebug)
3892 {
3893 debug_print_register ("target_store_registers", regcache, regno);
3894 }
3895 }
3896
3897 int
3898 target_core_of_thread (ptid_t ptid)
3899 {
3900 struct target_ops *t;
3901
3902 for (t = current_target.beneath; t != NULL; t = t->beneath)
3903 {
3904 if (t->to_core_of_thread != NULL)
3905 {
3906 int retval = t->to_core_of_thread (t, ptid);
3907
3908 if (targetdebug)
3909 fprintf_unfiltered (gdb_stdlog,
3910 "target_core_of_thread (%d) = %d\n",
3911 ptid_get_pid (ptid), retval);
3912 return retval;
3913 }
3914 }
3915
3916 return -1;
3917 }
3918
3919 int
3920 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3921 {
3922 struct target_ops *t;
3923
3924 for (t = current_target.beneath; t != NULL; t = t->beneath)
3925 {
3926 if (t->to_verify_memory != NULL)
3927 {
3928 int retval = t->to_verify_memory (t, data, memaddr, size);
3929
3930 if (targetdebug)
3931 fprintf_unfiltered (gdb_stdlog,
3932 "target_verify_memory (%s, %s) = %d\n",
3933 paddress (target_gdbarch (), memaddr),
3934 pulongest (size),
3935 retval);
3936 return retval;
3937 }
3938 }
3939
3940 tcomplain ();
3941 }
3942
3943 /* The documentation for this function is in its prototype declaration in
3944 target.h. */
3945
3946 int
3947 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3948 {
3949 struct target_ops *t;
3950
3951 for (t = current_target.beneath; t != NULL; t = t->beneath)
3952 if (t->to_insert_mask_watchpoint != NULL)
3953 {
3954 int ret;
3955
3956 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
3957
3958 if (targetdebug)
3959 fprintf_unfiltered (gdb_stdlog, "\
3960 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3961 core_addr_to_string (addr),
3962 core_addr_to_string (mask), rw, ret);
3963
3964 return ret;
3965 }
3966
3967 return 1;
3968 }
3969
3970 /* The documentation for this function is in its prototype declaration in
3971 target.h. */
3972
3973 int
3974 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3975 {
3976 struct target_ops *t;
3977
3978 for (t = current_target.beneath; t != NULL; t = t->beneath)
3979 if (t->to_remove_mask_watchpoint != NULL)
3980 {
3981 int ret;
3982
3983 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
3984
3985 if (targetdebug)
3986 fprintf_unfiltered (gdb_stdlog, "\
3987 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3988 core_addr_to_string (addr),
3989 core_addr_to_string (mask), rw, ret);
3990
3991 return ret;
3992 }
3993
3994 return 1;
3995 }
3996
3997 /* The documentation for this function is in its prototype declaration
3998 in target.h. */
3999
4000 int
4001 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4002 {
4003 struct target_ops *t;
4004
4005 for (t = current_target.beneath; t != NULL; t = t->beneath)
4006 if (t->to_masked_watch_num_registers != NULL)
4007 return t->to_masked_watch_num_registers (t, addr, mask);
4008
4009 return -1;
4010 }
4011
4012 /* The documentation for this function is in its prototype declaration
4013 in target.h. */
4014
4015 int
4016 target_ranged_break_num_registers (void)
4017 {
4018 struct target_ops *t;
4019
4020 for (t = current_target.beneath; t != NULL; t = t->beneath)
4021 if (t->to_ranged_break_num_registers != NULL)
4022 return t->to_ranged_break_num_registers (t);
4023
4024 return -1;
4025 }
4026
4027 /* See target.h. */
4028
4029 struct btrace_target_info *
4030 target_enable_btrace (ptid_t ptid)
4031 {
4032 struct target_ops *t;
4033
4034 for (t = current_target.beneath; t != NULL; t = t->beneath)
4035 if (t->to_enable_btrace != NULL)
4036 return t->to_enable_btrace (t, ptid);
4037
4038 tcomplain ();
4039 return NULL;
4040 }
4041
4042 /* See target.h. */
4043
4044 void
4045 target_disable_btrace (struct btrace_target_info *btinfo)
4046 {
4047 struct target_ops *t;
4048
4049 for (t = current_target.beneath; t != NULL; t = t->beneath)
4050 if (t->to_disable_btrace != NULL)
4051 {
4052 t->to_disable_btrace (t, btinfo);
4053 return;
4054 }
4055
4056 tcomplain ();
4057 }
4058
4059 /* See target.h. */
4060
4061 void
4062 target_teardown_btrace (struct btrace_target_info *btinfo)
4063 {
4064 struct target_ops *t;
4065
4066 for (t = current_target.beneath; t != NULL; t = t->beneath)
4067 if (t->to_teardown_btrace != NULL)
4068 {
4069 t->to_teardown_btrace (t, btinfo);
4070 return;
4071 }
4072
4073 tcomplain ();
4074 }
4075
4076 /* See target.h. */
4077
4078 enum btrace_error
4079 target_read_btrace (VEC (btrace_block_s) **btrace,
4080 struct btrace_target_info *btinfo,
4081 enum btrace_read_type type)
4082 {
4083 struct target_ops *t;
4084
4085 for (t = current_target.beneath; t != NULL; t = t->beneath)
4086 if (t->to_read_btrace != NULL)
4087 return t->to_read_btrace (t, btrace, btinfo, type);
4088
4089 tcomplain ();
4090 return BTRACE_ERR_NOT_SUPPORTED;
4091 }
4092
4093 /* See target.h. */
4094
4095 void
4096 target_stop_recording (void)
4097 {
4098 struct target_ops *t;
4099
4100 for (t = current_target.beneath; t != NULL; t = t->beneath)
4101 if (t->to_stop_recording != NULL)
4102 {
4103 t->to_stop_recording (t);
4104 return;
4105 }
4106
4107 /* This is optional. */
4108 }
4109
4110 /* See target.h. */
4111
4112 void
4113 target_info_record (void)
4114 {
4115 struct target_ops *t;
4116
4117 for (t = current_target.beneath; t != NULL; t = t->beneath)
4118 if (t->to_info_record != NULL)
4119 {
4120 t->to_info_record (t);
4121 return;
4122 }
4123
4124 tcomplain ();
4125 }
4126
4127 /* See target.h. */
4128
4129 void
4130 target_save_record (const char *filename)
4131 {
4132 struct target_ops *t;
4133
4134 for (t = current_target.beneath; t != NULL; t = t->beneath)
4135 if (t->to_save_record != NULL)
4136 {
4137 t->to_save_record (t, filename);
4138 return;
4139 }
4140
4141 tcomplain ();
4142 }
4143
4144 /* See target.h. */
4145
4146 int
4147 target_supports_delete_record (void)
4148 {
4149 struct target_ops *t;
4150
4151 for (t = current_target.beneath; t != NULL; t = t->beneath)
4152 if (t->to_delete_record != NULL)
4153 return 1;
4154
4155 return 0;
4156 }
4157
4158 /* See target.h. */
4159
4160 void
4161 target_delete_record (void)
4162 {
4163 struct target_ops *t;
4164
4165 for (t = current_target.beneath; t != NULL; t = t->beneath)
4166 if (t->to_delete_record != NULL)
4167 {
4168 t->to_delete_record (t);
4169 return;
4170 }
4171
4172 tcomplain ();
4173 }
4174
4175 /* See target.h. */
4176
4177 int
4178 target_record_is_replaying (void)
4179 {
4180 struct target_ops *t;
4181
4182 for (t = current_target.beneath; t != NULL; t = t->beneath)
4183 if (t->to_record_is_replaying != NULL)
4184 return t->to_record_is_replaying (t);
4185
4186 return 0;
4187 }
4188
4189 /* See target.h. */
4190
4191 void
4192 target_goto_record_begin (void)
4193 {
4194 struct target_ops *t;
4195
4196 for (t = current_target.beneath; t != NULL; t = t->beneath)
4197 if (t->to_goto_record_begin != NULL)
4198 {
4199 t->to_goto_record_begin (t);
4200 return;
4201 }
4202
4203 tcomplain ();
4204 }
4205
4206 /* See target.h. */
4207
4208 void
4209 target_goto_record_end (void)
4210 {
4211 struct target_ops *t;
4212
4213 for (t = current_target.beneath; t != NULL; t = t->beneath)
4214 if (t->to_goto_record_end != NULL)
4215 {
4216 t->to_goto_record_end (t);
4217 return;
4218 }
4219
4220 tcomplain ();
4221 }
4222
4223 /* See target.h. */
4224
4225 void
4226 target_goto_record (ULONGEST insn)
4227 {
4228 struct target_ops *t;
4229
4230 for (t = current_target.beneath; t != NULL; t = t->beneath)
4231 if (t->to_goto_record != NULL)
4232 {
4233 t->to_goto_record (t, insn);
4234 return;
4235 }
4236
4237 tcomplain ();
4238 }
4239
4240 /* See target.h. */
4241
4242 void
4243 target_insn_history (int size, int flags)
4244 {
4245 struct target_ops *t;
4246
4247 for (t = current_target.beneath; t != NULL; t = t->beneath)
4248 if (t->to_insn_history != NULL)
4249 {
4250 t->to_insn_history (t, size, flags);
4251 return;
4252 }
4253
4254 tcomplain ();
4255 }
4256
4257 /* See target.h. */
4258
4259 void
4260 target_insn_history_from (ULONGEST from, int size, int flags)
4261 {
4262 struct target_ops *t;
4263
4264 for (t = current_target.beneath; t != NULL; t = t->beneath)
4265 if (t->to_insn_history_from != NULL)
4266 {
4267 t->to_insn_history_from (t, from, size, flags);
4268 return;
4269 }
4270
4271 tcomplain ();
4272 }
4273
4274 /* See target.h. */
4275
4276 void
4277 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4278 {
4279 struct target_ops *t;
4280
4281 for (t = current_target.beneath; t != NULL; t = t->beneath)
4282 if (t->to_insn_history_range != NULL)
4283 {
4284 t->to_insn_history_range (t, begin, end, flags);
4285 return;
4286 }
4287
4288 tcomplain ();
4289 }
4290
4291 /* See target.h. */
4292
4293 void
4294 target_call_history (int size, int flags)
4295 {
4296 struct target_ops *t;
4297
4298 for (t = current_target.beneath; t != NULL; t = t->beneath)
4299 if (t->to_call_history != NULL)
4300 {
4301 t->to_call_history (t, size, flags);
4302 return;
4303 }
4304
4305 tcomplain ();
4306 }
4307
4308 /* See target.h. */
4309
4310 void
4311 target_call_history_from (ULONGEST begin, int size, int flags)
4312 {
4313 struct target_ops *t;
4314
4315 for (t = current_target.beneath; t != NULL; t = t->beneath)
4316 if (t->to_call_history_from != NULL)
4317 {
4318 t->to_call_history_from (t, begin, size, flags);
4319 return;
4320 }
4321
4322 tcomplain ();
4323 }
4324
4325 /* See target.h. */
4326
4327 void
4328 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4329 {
4330 struct target_ops *t;
4331
4332 for (t = current_target.beneath; t != NULL; t = t->beneath)
4333 if (t->to_call_history_range != NULL)
4334 {
4335 t->to_call_history_range (t, begin, end, flags);
4336 return;
4337 }
4338
4339 tcomplain ();
4340 }
4341
4342 static void
4343 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4344 {
4345 debug_target.to_prepare_to_store (&debug_target, regcache);
4346
4347 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4348 }
4349
4350 /* See target.h. */
4351
4352 const struct frame_unwind *
4353 target_get_unwinder (void)
4354 {
4355 struct target_ops *t;
4356
4357 for (t = current_target.beneath; t != NULL; t = t->beneath)
4358 if (t->to_get_unwinder != NULL)
4359 return t->to_get_unwinder;
4360
4361 return NULL;
4362 }
4363
4364 /* See target.h. */
4365
4366 const struct frame_unwind *
4367 target_get_tailcall_unwinder (void)
4368 {
4369 struct target_ops *t;
4370
4371 for (t = current_target.beneath; t != NULL; t = t->beneath)
4372 if (t->to_get_tailcall_unwinder != NULL)
4373 return t->to_get_tailcall_unwinder;
4374
4375 return NULL;
4376 }
4377
4378 /* See target.h. */
4379
4380 CORE_ADDR
4381 forward_target_decr_pc_after_break (struct target_ops *ops,
4382 struct gdbarch *gdbarch)
4383 {
4384 for (; ops != NULL; ops = ops->beneath)
4385 if (ops->to_decr_pc_after_break != NULL)
4386 return ops->to_decr_pc_after_break (ops, gdbarch);
4387
4388 return gdbarch_decr_pc_after_break (gdbarch);
4389 }
4390
4391 /* See target.h. */
4392
4393 CORE_ADDR
4394 target_decr_pc_after_break (struct gdbarch *gdbarch)
4395 {
4396 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4397 }
4398
4399 static int
4400 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4401 int write, struct mem_attrib *attrib,
4402 struct target_ops *target)
4403 {
4404 int retval;
4405
4406 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4407 attrib, target);
4408
4409 fprintf_unfiltered (gdb_stdlog,
4410 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4411 paddress (target_gdbarch (), memaddr), len,
4412 write ? "write" : "read", retval);
4413
4414 if (retval > 0)
4415 {
4416 int i;
4417
4418 fputs_unfiltered (", bytes =", gdb_stdlog);
4419 for (i = 0; i < retval; i++)
4420 {
4421 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4422 {
4423 if (targetdebug < 2 && i > 0)
4424 {
4425 fprintf_unfiltered (gdb_stdlog, " ...");
4426 break;
4427 }
4428 fprintf_unfiltered (gdb_stdlog, "\n");
4429 }
4430
4431 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4432 }
4433 }
4434
4435 fputc_unfiltered ('\n', gdb_stdlog);
4436
4437 return retval;
4438 }
4439
4440 static void
4441 debug_to_files_info (struct target_ops *target)
4442 {
4443 debug_target.to_files_info (target);
4444
4445 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4446 }
4447
4448 static int
4449 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4450 struct bp_target_info *bp_tgt)
4451 {
4452 int retval;
4453
4454 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4455
4456 fprintf_unfiltered (gdb_stdlog,
4457 "target_insert_breakpoint (%s, xxx) = %ld\n",
4458 core_addr_to_string (bp_tgt->placed_address),
4459 (unsigned long) retval);
4460 return retval;
4461 }
4462
4463 static int
4464 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4465 struct bp_target_info *bp_tgt)
4466 {
4467 int retval;
4468
4469 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4470
4471 fprintf_unfiltered (gdb_stdlog,
4472 "target_remove_breakpoint (%s, xxx) = %ld\n",
4473 core_addr_to_string (bp_tgt->placed_address),
4474 (unsigned long) retval);
4475 return retval;
4476 }
4477
4478 static int
4479 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4480 int type, int cnt, int from_tty)
4481 {
4482 int retval;
4483
4484 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4485 type, cnt, from_tty);
4486
4487 fprintf_unfiltered (gdb_stdlog,
4488 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4489 (unsigned long) type,
4490 (unsigned long) cnt,
4491 (unsigned long) from_tty,
4492 (unsigned long) retval);
4493 return retval;
4494 }
4495
4496 static int
4497 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4498 CORE_ADDR addr, int len)
4499 {
4500 CORE_ADDR retval;
4501
4502 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4503 addr, len);
4504
4505 fprintf_unfiltered (gdb_stdlog,
4506 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4507 core_addr_to_string (addr), (unsigned long) len,
4508 core_addr_to_string (retval));
4509 return retval;
4510 }
4511
4512 static int
4513 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4514 CORE_ADDR addr, int len, int rw,
4515 struct expression *cond)
4516 {
4517 int retval;
4518
4519 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4520 addr, len,
4521 rw, cond);
4522
4523 fprintf_unfiltered (gdb_stdlog,
4524 "target_can_accel_watchpoint_condition "
4525 "(%s, %d, %d, %s) = %ld\n",
4526 core_addr_to_string (addr), len, rw,
4527 host_address_to_string (cond), (unsigned long) retval);
4528 return retval;
4529 }
4530
4531 static int
4532 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4533 {
4534 int retval;
4535
4536 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4537
4538 fprintf_unfiltered (gdb_stdlog,
4539 "target_stopped_by_watchpoint () = %ld\n",
4540 (unsigned long) retval);
4541 return retval;
4542 }
4543
4544 static int
4545 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4546 {
4547 int retval;
4548
4549 retval = debug_target.to_stopped_data_address (target, addr);
4550
4551 fprintf_unfiltered (gdb_stdlog,
4552 "target_stopped_data_address ([%s]) = %ld\n",
4553 core_addr_to_string (*addr),
4554 (unsigned long)retval);
4555 return retval;
4556 }
4557
4558 static int
4559 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4560 CORE_ADDR addr,
4561 CORE_ADDR start, int length)
4562 {
4563 int retval;
4564
4565 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4566 start, length);
4567
4568 fprintf_filtered (gdb_stdlog,
4569 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4570 core_addr_to_string (addr), core_addr_to_string (start),
4571 length, retval);
4572 return retval;
4573 }
4574
4575 static int
4576 debug_to_insert_hw_breakpoint (struct target_ops *self,
4577 struct gdbarch *gdbarch,
4578 struct bp_target_info *bp_tgt)
4579 {
4580 int retval;
4581
4582 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4583 gdbarch, bp_tgt);
4584
4585 fprintf_unfiltered (gdb_stdlog,
4586 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4587 core_addr_to_string (bp_tgt->placed_address),
4588 (unsigned long) retval);
4589 return retval;
4590 }
4591
4592 static int
4593 debug_to_remove_hw_breakpoint (struct target_ops *self,
4594 struct gdbarch *gdbarch,
4595 struct bp_target_info *bp_tgt)
4596 {
4597 int retval;
4598
4599 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4600 gdbarch, bp_tgt);
4601
4602 fprintf_unfiltered (gdb_stdlog,
4603 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4604 core_addr_to_string (bp_tgt->placed_address),
4605 (unsigned long) retval);
4606 return retval;
4607 }
4608
4609 static int
4610 debug_to_insert_watchpoint (struct target_ops *self,
4611 CORE_ADDR addr, int len, int type,
4612 struct expression *cond)
4613 {
4614 int retval;
4615
4616 retval = debug_target.to_insert_watchpoint (&debug_target,
4617 addr, len, type, cond);
4618
4619 fprintf_unfiltered (gdb_stdlog,
4620 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4621 core_addr_to_string (addr), len, type,
4622 host_address_to_string (cond), (unsigned long) retval);
4623 return retval;
4624 }
4625
4626 static int
4627 debug_to_remove_watchpoint (struct target_ops *self,
4628 CORE_ADDR addr, int len, int type,
4629 struct expression *cond)
4630 {
4631 int retval;
4632
4633 retval = debug_target.to_remove_watchpoint (&debug_target,
4634 addr, len, type, cond);
4635
4636 fprintf_unfiltered (gdb_stdlog,
4637 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4638 core_addr_to_string (addr), len, type,
4639 host_address_to_string (cond), (unsigned long) retval);
4640 return retval;
4641 }
4642
4643 static void
4644 debug_to_terminal_init (struct target_ops *self)
4645 {
4646 debug_target.to_terminal_init (&debug_target);
4647
4648 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4649 }
4650
4651 static void
4652 debug_to_terminal_inferior (struct target_ops *self)
4653 {
4654 debug_target.to_terminal_inferior (&debug_target);
4655
4656 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4657 }
4658
4659 static void
4660 debug_to_terminal_ours_for_output (struct target_ops *self)
4661 {
4662 debug_target.to_terminal_ours_for_output (&debug_target);
4663
4664 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4665 }
4666
4667 static void
4668 debug_to_terminal_ours (struct target_ops *self)
4669 {
4670 debug_target.to_terminal_ours (&debug_target);
4671
4672 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4673 }
4674
4675 static void
4676 debug_to_terminal_save_ours (struct target_ops *self)
4677 {
4678 debug_target.to_terminal_save_ours (&debug_target);
4679
4680 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4681 }
4682
4683 static void
4684 debug_to_terminal_info (struct target_ops *self,
4685 const char *arg, int from_tty)
4686 {
4687 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4688
4689 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4690 from_tty);
4691 }
4692
4693 static void
4694 debug_to_load (struct target_ops *self, char *args, int from_tty)
4695 {
4696 debug_target.to_load (&debug_target, args, from_tty);
4697
4698 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4699 }
4700
4701 static void
4702 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4703 {
4704 debug_target.to_post_startup_inferior (&debug_target, ptid);
4705
4706 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4707 ptid_get_pid (ptid));
4708 }
4709
4710 static int
4711 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4712 {
4713 int retval;
4714
4715 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4716
4717 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4718 pid, retval);
4719
4720 return retval;
4721 }
4722
4723 static int
4724 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4725 {
4726 int retval;
4727
4728 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4729
4730 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4731 pid, retval);
4732
4733 return retval;
4734 }
4735
4736 static int
4737 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4738 {
4739 int retval;
4740
4741 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4742
4743 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4744 pid, retval);
4745
4746 return retval;
4747 }
4748
4749 static int
4750 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4751 {
4752 int retval;
4753
4754 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4755
4756 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4757 pid, retval);
4758
4759 return retval;
4760 }
4761
4762 static int
4763 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4764 {
4765 int retval;
4766
4767 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4768
4769 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4770 pid, retval);
4771
4772 return retval;
4773 }
4774
4775 static int
4776 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4777 {
4778 int retval;
4779
4780 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4781
4782 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4783 pid, retval);
4784
4785 return retval;
4786 }
4787
4788 static int
4789 debug_to_has_exited (struct target_ops *self,
4790 int pid, int wait_status, int *exit_status)
4791 {
4792 int has_exited;
4793
4794 has_exited = debug_target.to_has_exited (&debug_target,
4795 pid, wait_status, exit_status);
4796
4797 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4798 pid, wait_status, *exit_status, has_exited);
4799
4800 return has_exited;
4801 }
4802
4803 static int
4804 debug_to_can_run (struct target_ops *self)
4805 {
4806 int retval;
4807
4808 retval = debug_target.to_can_run (&debug_target);
4809
4810 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4811
4812 return retval;
4813 }
4814
4815 static struct gdbarch *
4816 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4817 {
4818 struct gdbarch *retval;
4819
4820 retval = debug_target.to_thread_architecture (ops, ptid);
4821
4822 fprintf_unfiltered (gdb_stdlog,
4823 "target_thread_architecture (%s) = %s [%s]\n",
4824 target_pid_to_str (ptid),
4825 host_address_to_string (retval),
4826 gdbarch_bfd_arch_info (retval)->printable_name);
4827 return retval;
4828 }
4829
4830 static void
4831 debug_to_stop (struct target_ops *self, ptid_t ptid)
4832 {
4833 debug_target.to_stop (&debug_target, ptid);
4834
4835 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4836 target_pid_to_str (ptid));
4837 }
4838
4839 static void
4840 debug_to_rcmd (struct target_ops *self, char *command,
4841 struct ui_file *outbuf)
4842 {
4843 debug_target.to_rcmd (&debug_target, command, outbuf);
4844 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4845 }
4846
4847 static char *
4848 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4849 {
4850 char *exec_file;
4851
4852 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4853
4854 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4855 pid, exec_file);
4856
4857 return exec_file;
4858 }
4859
4860 static void
4861 setup_target_debug (void)
4862 {
4863 memcpy (&debug_target, &current_target, sizeof debug_target);
4864
4865 current_target.to_open = debug_to_open;
4866 current_target.to_post_attach = debug_to_post_attach;
4867 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4868 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4869 current_target.to_files_info = debug_to_files_info;
4870 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4871 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4872 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4873 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4874 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4875 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4876 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4877 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4878 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4879 current_target.to_watchpoint_addr_within_range
4880 = debug_to_watchpoint_addr_within_range;
4881 current_target.to_region_ok_for_hw_watchpoint
4882 = debug_to_region_ok_for_hw_watchpoint;
4883 current_target.to_can_accel_watchpoint_condition
4884 = debug_to_can_accel_watchpoint_condition;
4885 current_target.to_terminal_init = debug_to_terminal_init;
4886 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4887 current_target.to_terminal_ours_for_output
4888 = debug_to_terminal_ours_for_output;
4889 current_target.to_terminal_ours = debug_to_terminal_ours;
4890 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4891 current_target.to_terminal_info = debug_to_terminal_info;
4892 current_target.to_load = debug_to_load;
4893 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4894 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4895 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4896 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4897 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4898 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4899 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4900 current_target.to_has_exited = debug_to_has_exited;
4901 current_target.to_can_run = debug_to_can_run;
4902 current_target.to_stop = debug_to_stop;
4903 current_target.to_rcmd = debug_to_rcmd;
4904 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4905 current_target.to_thread_architecture = debug_to_thread_architecture;
4906 }
4907 \f
4908
4909 static char targ_desc[] =
4910 "Names of targets and files being debugged.\nShows the entire \
4911 stack of targets currently in use (including the exec-file,\n\
4912 core-file, and process, if any), as well as the symbol file name.";
4913
4914 static void
4915 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
4916 {
4917 error (_("\"monitor\" command not supported by this target."));
4918 }
4919
4920 static void
4921 do_monitor_command (char *cmd,
4922 int from_tty)
4923 {
4924 target_rcmd (cmd, gdb_stdtarg);
4925 }
4926
4927 /* Print the name of each layers of our target stack. */
4928
4929 static void
4930 maintenance_print_target_stack (char *cmd, int from_tty)
4931 {
4932 struct target_ops *t;
4933
4934 printf_filtered (_("The current target stack is:\n"));
4935
4936 for (t = target_stack; t != NULL; t = t->beneath)
4937 {
4938 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4939 }
4940 }
4941
4942 /* Controls if async mode is permitted. */
4943 int target_async_permitted = 0;
4944
4945 /* The set command writes to this variable. If the inferior is
4946 executing, target_async_permitted is *not* updated. */
4947 static int target_async_permitted_1 = 0;
4948
4949 static void
4950 set_target_async_command (char *args, int from_tty,
4951 struct cmd_list_element *c)
4952 {
4953 if (have_live_inferiors ())
4954 {
4955 target_async_permitted_1 = target_async_permitted;
4956 error (_("Cannot change this setting while the inferior is running."));
4957 }
4958
4959 target_async_permitted = target_async_permitted_1;
4960 }
4961
4962 static void
4963 show_target_async_command (struct ui_file *file, int from_tty,
4964 struct cmd_list_element *c,
4965 const char *value)
4966 {
4967 fprintf_filtered (file,
4968 _("Controlling the inferior in "
4969 "asynchronous mode is %s.\n"), value);
4970 }
4971
4972 /* Temporary copies of permission settings. */
4973
4974 static int may_write_registers_1 = 1;
4975 static int may_write_memory_1 = 1;
4976 static int may_insert_breakpoints_1 = 1;
4977 static int may_insert_tracepoints_1 = 1;
4978 static int may_insert_fast_tracepoints_1 = 1;
4979 static int may_stop_1 = 1;
4980
4981 /* Make the user-set values match the real values again. */
4982
4983 void
4984 update_target_permissions (void)
4985 {
4986 may_write_registers_1 = may_write_registers;
4987 may_write_memory_1 = may_write_memory;
4988 may_insert_breakpoints_1 = may_insert_breakpoints;
4989 may_insert_tracepoints_1 = may_insert_tracepoints;
4990 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4991 may_stop_1 = may_stop;
4992 }
4993
4994 /* The one function handles (most of) the permission flags in the same
4995 way. */
4996
4997 static void
4998 set_target_permissions (char *args, int from_tty,
4999 struct cmd_list_element *c)
5000 {
5001 if (target_has_execution)
5002 {
5003 update_target_permissions ();
5004 error (_("Cannot change this setting while the inferior is running."));
5005 }
5006
5007 /* Make the real values match the user-changed values. */
5008 may_write_registers = may_write_registers_1;
5009 may_insert_breakpoints = may_insert_breakpoints_1;
5010 may_insert_tracepoints = may_insert_tracepoints_1;
5011 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5012 may_stop = may_stop_1;
5013 update_observer_mode ();
5014 }
5015
5016 /* Set memory write permission independently of observer mode. */
5017
5018 static void
5019 set_write_memory_permission (char *args, int from_tty,
5020 struct cmd_list_element *c)
5021 {
5022 /* Make the real values match the user-changed values. */
5023 may_write_memory = may_write_memory_1;
5024 update_observer_mode ();
5025 }
5026
5027
5028 void
5029 initialize_targets (void)
5030 {
5031 init_dummy_target ();
5032 push_target (&dummy_target);
5033
5034 add_info ("target", target_info, targ_desc);
5035 add_info ("files", target_info, targ_desc);
5036
5037 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5038 Set target debugging."), _("\
5039 Show target debugging."), _("\
5040 When non-zero, target debugging is enabled. Higher numbers are more\n\
5041 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5042 command."),
5043 NULL,
5044 show_targetdebug,
5045 &setdebuglist, &showdebuglist);
5046
5047 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5048 &trust_readonly, _("\
5049 Set mode for reading from readonly sections."), _("\
5050 Show mode for reading from readonly sections."), _("\
5051 When this mode is on, memory reads from readonly sections (such as .text)\n\
5052 will be read from the object file instead of from the target. This will\n\
5053 result in significant performance improvement for remote targets."),
5054 NULL,
5055 show_trust_readonly,
5056 &setlist, &showlist);
5057
5058 add_com ("monitor", class_obscure, do_monitor_command,
5059 _("Send a command to the remote monitor (remote targets only)."));
5060
5061 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5062 _("Print the name of each layer of the internal target stack."),
5063 &maintenanceprintlist);
5064
5065 add_setshow_boolean_cmd ("target-async", no_class,
5066 &target_async_permitted_1, _("\
5067 Set whether gdb controls the inferior in asynchronous mode."), _("\
5068 Show whether gdb controls the inferior in asynchronous mode."), _("\
5069 Tells gdb whether to control the inferior in asynchronous mode."),
5070 set_target_async_command,
5071 show_target_async_command,
5072 &setlist,
5073 &showlist);
5074
5075 add_setshow_boolean_cmd ("may-write-registers", class_support,
5076 &may_write_registers_1, _("\
5077 Set permission to write into registers."), _("\
5078 Show permission to write into registers."), _("\
5079 When this permission is on, GDB may write into the target's registers.\n\
5080 Otherwise, any sort of write attempt will result in an error."),
5081 set_target_permissions, NULL,
5082 &setlist, &showlist);
5083
5084 add_setshow_boolean_cmd ("may-write-memory", class_support,
5085 &may_write_memory_1, _("\
5086 Set permission to write into target memory."), _("\
5087 Show permission to write into target memory."), _("\
5088 When this permission is on, GDB may write into the target's memory.\n\
5089 Otherwise, any sort of write attempt will result in an error."),
5090 set_write_memory_permission, NULL,
5091 &setlist, &showlist);
5092
5093 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5094 &may_insert_breakpoints_1, _("\
5095 Set permission to insert breakpoints in the target."), _("\
5096 Show permission to insert breakpoints in the target."), _("\
5097 When this permission is on, GDB may insert breakpoints in the program.\n\
5098 Otherwise, any sort of insertion attempt will result in an error."),
5099 set_target_permissions, NULL,
5100 &setlist, &showlist);
5101
5102 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5103 &may_insert_tracepoints_1, _("\
5104 Set permission to insert tracepoints in the target."), _("\
5105 Show permission to insert tracepoints in the target."), _("\
5106 When this permission is on, GDB may insert tracepoints in the program.\n\
5107 Otherwise, any sort of insertion attempt will result in an error."),
5108 set_target_permissions, NULL,
5109 &setlist, &showlist);
5110
5111 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5112 &may_insert_fast_tracepoints_1, _("\
5113 Set permission to insert fast tracepoints in the target."), _("\
5114 Show permission to insert fast tracepoints in the target."), _("\
5115 When this permission is on, GDB may insert fast tracepoints.\n\
5116 Otherwise, any sort of insertion attempt will result in an error."),
5117 set_target_permissions, NULL,
5118 &setlist, &showlist);
5119
5120 add_setshow_boolean_cmd ("may-interrupt", class_support,
5121 &may_stop_1, _("\
5122 Set permission to interrupt or signal the target."), _("\
5123 Show permission to interrupt or signal the target."), _("\
5124 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5125 Otherwise, any attempt to interrupt or stop will be ignored."),
5126 set_target_permissions, NULL,
5127 &setlist, &showlist);
5128 }