convert to_trace_stop
[binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (struct target_ops *, const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
59
60 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
61 long lwp, long tid);
62
63 static void tcomplain (void) ATTRIBUTE_NORETURN;
64
65 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
66
67 static int return_zero (void);
68
69 static int return_minus_one (void);
70
71 static void *return_null (void);
72
73 void target_ignore (void);
74
75 static void target_command (char *, int);
76
77 static struct target_ops *find_default_run_target (char *);
78
79 static target_xfer_partial_ftype default_xfer_partial;
80
81 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
82 ptid_t ptid);
83
84 static int dummy_find_memory_regions (struct target_ops *self,
85 find_memory_region_ftype ignore1,
86 void *ignore2);
87
88 static char *dummy_make_corefile_notes (struct target_ops *self,
89 bfd *ignore1, int *ignore2);
90
91 static int find_default_can_async_p (struct target_ops *ignore);
92
93 static int find_default_is_async_p (struct target_ops *ignore);
94
95 static enum exec_direction_kind default_execution_direction
96 (struct target_ops *self);
97
98 #include "target-delegates.c"
99
100 static void init_dummy_target (void);
101
102 static struct target_ops debug_target;
103
104 static void debug_to_open (char *, int);
105
106 static void debug_to_prepare_to_store (struct target_ops *self,
107 struct regcache *);
108
109 static void debug_to_files_info (struct target_ops *);
110
111 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
112 struct bp_target_info *);
113
114 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
115 struct bp_target_info *);
116
117 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
118 int, int, int);
119
120 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
121 struct gdbarch *,
122 struct bp_target_info *);
123
124 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
125 struct gdbarch *,
126 struct bp_target_info *);
127
128 static int debug_to_insert_watchpoint (struct target_ops *self,
129 CORE_ADDR, int, int,
130 struct expression *);
131
132 static int debug_to_remove_watchpoint (struct target_ops *self,
133 CORE_ADDR, int, int,
134 struct expression *);
135
136 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
137
138 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
139 CORE_ADDR, CORE_ADDR, int);
140
141 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
142 CORE_ADDR, int);
143
144 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
145 CORE_ADDR, int, int,
146 struct expression *);
147
148 static void debug_to_terminal_init (struct target_ops *self);
149
150 static void debug_to_terminal_inferior (struct target_ops *self);
151
152 static void debug_to_terminal_ours_for_output (struct target_ops *self);
153
154 static void debug_to_terminal_save_ours (struct target_ops *self);
155
156 static void debug_to_terminal_ours (struct target_ops *self);
157
158 static void debug_to_load (struct target_ops *self, char *, int);
159
160 static int debug_to_can_run (struct target_ops *self);
161
162 static void debug_to_stop (struct target_ops *self, ptid_t);
163
164 /* Pointer to array of target architecture structures; the size of the
165 array; the current index into the array; the allocated size of the
166 array. */
167 struct target_ops **target_structs;
168 unsigned target_struct_size;
169 unsigned target_struct_allocsize;
170 #define DEFAULT_ALLOCSIZE 10
171
172 /* The initial current target, so that there is always a semi-valid
173 current target. */
174
175 static struct target_ops dummy_target;
176
177 /* Top of target stack. */
178
179 static struct target_ops *target_stack;
180
181 /* The target structure we are currently using to talk to a process
182 or file or whatever "inferior" we have. */
183
184 struct target_ops current_target;
185
186 /* Command list for target. */
187
188 static struct cmd_list_element *targetlist = NULL;
189
190 /* Nonzero if we should trust readonly sections from the
191 executable when reading memory. */
192
193 static int trust_readonly = 0;
194
195 /* Nonzero if we should show true memory content including
196 memory breakpoint inserted by gdb. */
197
198 static int show_memory_breakpoints = 0;
199
200 /* These globals control whether GDB attempts to perform these
201 operations; they are useful for targets that need to prevent
202 inadvertant disruption, such as in non-stop mode. */
203
204 int may_write_registers = 1;
205
206 int may_write_memory = 1;
207
208 int may_insert_breakpoints = 1;
209
210 int may_insert_tracepoints = 1;
211
212 int may_insert_fast_tracepoints = 1;
213
214 int may_stop = 1;
215
216 /* Non-zero if we want to see trace of target level stuff. */
217
218 static unsigned int targetdebug = 0;
219 static void
220 show_targetdebug (struct ui_file *file, int from_tty,
221 struct cmd_list_element *c, const char *value)
222 {
223 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
224 }
225
226 static void setup_target_debug (void);
227
228 /* The user just typed 'target' without the name of a target. */
229
230 static void
231 target_command (char *arg, int from_tty)
232 {
233 fputs_filtered ("Argument required (target name). Try `help target'\n",
234 gdb_stdout);
235 }
236
237 /* Default target_has_* methods for process_stratum targets. */
238
239 int
240 default_child_has_all_memory (struct target_ops *ops)
241 {
242 /* If no inferior selected, then we can't read memory here. */
243 if (ptid_equal (inferior_ptid, null_ptid))
244 return 0;
245
246 return 1;
247 }
248
249 int
250 default_child_has_memory (struct target_ops *ops)
251 {
252 /* If no inferior selected, then we can't read memory here. */
253 if (ptid_equal (inferior_ptid, null_ptid))
254 return 0;
255
256 return 1;
257 }
258
259 int
260 default_child_has_stack (struct target_ops *ops)
261 {
262 /* If no inferior selected, there's no stack. */
263 if (ptid_equal (inferior_ptid, null_ptid))
264 return 0;
265
266 return 1;
267 }
268
269 int
270 default_child_has_registers (struct target_ops *ops)
271 {
272 /* Can't read registers from no inferior. */
273 if (ptid_equal (inferior_ptid, null_ptid))
274 return 0;
275
276 return 1;
277 }
278
279 int
280 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
281 {
282 /* If there's no thread selected, then we can't make it run through
283 hoops. */
284 if (ptid_equal (the_ptid, null_ptid))
285 return 0;
286
287 return 1;
288 }
289
290
291 int
292 target_has_all_memory_1 (void)
293 {
294 struct target_ops *t;
295
296 for (t = current_target.beneath; t != NULL; t = t->beneath)
297 if (t->to_has_all_memory (t))
298 return 1;
299
300 return 0;
301 }
302
303 int
304 target_has_memory_1 (void)
305 {
306 struct target_ops *t;
307
308 for (t = current_target.beneath; t != NULL; t = t->beneath)
309 if (t->to_has_memory (t))
310 return 1;
311
312 return 0;
313 }
314
315 int
316 target_has_stack_1 (void)
317 {
318 struct target_ops *t;
319
320 for (t = current_target.beneath; t != NULL; t = t->beneath)
321 if (t->to_has_stack (t))
322 return 1;
323
324 return 0;
325 }
326
327 int
328 target_has_registers_1 (void)
329 {
330 struct target_ops *t;
331
332 for (t = current_target.beneath; t != NULL; t = t->beneath)
333 if (t->to_has_registers (t))
334 return 1;
335
336 return 0;
337 }
338
339 int
340 target_has_execution_1 (ptid_t the_ptid)
341 {
342 struct target_ops *t;
343
344 for (t = current_target.beneath; t != NULL; t = t->beneath)
345 if (t->to_has_execution (t, the_ptid))
346 return 1;
347
348 return 0;
349 }
350
351 int
352 target_has_execution_current (void)
353 {
354 return target_has_execution_1 (inferior_ptid);
355 }
356
357 /* Complete initialization of T. This ensures that various fields in
358 T are set, if needed by the target implementation. */
359
360 void
361 complete_target_initialization (struct target_ops *t)
362 {
363 /* Provide default values for all "must have" methods. */
364 if (t->to_xfer_partial == NULL)
365 t->to_xfer_partial = default_xfer_partial;
366
367 if (t->to_has_all_memory == NULL)
368 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
369
370 if (t->to_has_memory == NULL)
371 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
372
373 if (t->to_has_stack == NULL)
374 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
375
376 if (t->to_has_registers == NULL)
377 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
378
379 if (t->to_has_execution == NULL)
380 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
381
382 install_delegators (t);
383 }
384
385 /* Add possible target architecture T to the list and add a new
386 command 'target T->to_shortname'. Set COMPLETER as the command's
387 completer if not NULL. */
388
389 void
390 add_target_with_completer (struct target_ops *t,
391 completer_ftype *completer)
392 {
393 struct cmd_list_element *c;
394
395 complete_target_initialization (t);
396
397 if (!target_structs)
398 {
399 target_struct_allocsize = DEFAULT_ALLOCSIZE;
400 target_structs = (struct target_ops **) xmalloc
401 (target_struct_allocsize * sizeof (*target_structs));
402 }
403 if (target_struct_size >= target_struct_allocsize)
404 {
405 target_struct_allocsize *= 2;
406 target_structs = (struct target_ops **)
407 xrealloc ((char *) target_structs,
408 target_struct_allocsize * sizeof (*target_structs));
409 }
410 target_structs[target_struct_size++] = t;
411
412 if (targetlist == NULL)
413 add_prefix_cmd ("target", class_run, target_command, _("\
414 Connect to a target machine or process.\n\
415 The first argument is the type or protocol of the target machine.\n\
416 Remaining arguments are interpreted by the target protocol. For more\n\
417 information on the arguments for a particular protocol, type\n\
418 `help target ' followed by the protocol name."),
419 &targetlist, "target ", 0, &cmdlist);
420 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
421 &targetlist);
422 if (completer != NULL)
423 set_cmd_completer (c, completer);
424 }
425
426 /* Add a possible target architecture to the list. */
427
428 void
429 add_target (struct target_ops *t)
430 {
431 add_target_with_completer (t, NULL);
432 }
433
434 /* See target.h. */
435
436 void
437 add_deprecated_target_alias (struct target_ops *t, char *alias)
438 {
439 struct cmd_list_element *c;
440 char *alt;
441
442 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
443 see PR cli/15104. */
444 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
445 alt = xstrprintf ("target %s", t->to_shortname);
446 deprecate_cmd (c, alt);
447 }
448
449 /* Stub functions */
450
451 void
452 target_ignore (void)
453 {
454 }
455
456 void
457 target_kill (void)
458 {
459 struct target_ops *t;
460
461 for (t = current_target.beneath; t != NULL; t = t->beneath)
462 if (t->to_kill != NULL)
463 {
464 if (targetdebug)
465 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
466
467 t->to_kill (t);
468 return;
469 }
470
471 noprocess ();
472 }
473
474 void
475 target_load (char *arg, int from_tty)
476 {
477 target_dcache_invalidate ();
478 (*current_target.to_load) (&current_target, arg, from_tty);
479 }
480
481 void
482 target_create_inferior (char *exec_file, char *args,
483 char **env, int from_tty)
484 {
485 struct target_ops *t;
486
487 for (t = current_target.beneath; t != NULL; t = t->beneath)
488 {
489 if (t->to_create_inferior != NULL)
490 {
491 t->to_create_inferior (t, exec_file, args, env, from_tty);
492 if (targetdebug)
493 fprintf_unfiltered (gdb_stdlog,
494 "target_create_inferior (%s, %s, xxx, %d)\n",
495 exec_file, args, from_tty);
496 return;
497 }
498 }
499
500 internal_error (__FILE__, __LINE__,
501 _("could not find a target to create inferior"));
502 }
503
504 void
505 target_terminal_inferior (void)
506 {
507 /* A background resume (``run&'') should leave GDB in control of the
508 terminal. Use target_can_async_p, not target_is_async_p, since at
509 this point the target is not async yet. However, if sync_execution
510 is not set, we know it will become async prior to resume. */
511 if (target_can_async_p () && !sync_execution)
512 return;
513
514 /* If GDB is resuming the inferior in the foreground, install
515 inferior's terminal modes. */
516 (*current_target.to_terminal_inferior) (&current_target);
517 }
518
519 static int
520 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
521 struct target_ops *t)
522 {
523 errno = EIO; /* Can't read/write this location. */
524 return 0; /* No bytes handled. */
525 }
526
527 static void
528 tcomplain (void)
529 {
530 error (_("You can't do that when your target is `%s'"),
531 current_target.to_shortname);
532 }
533
534 void
535 noprocess (void)
536 {
537 error (_("You can't do that without a process to debug."));
538 }
539
540 static void
541 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
542 {
543 printf_unfiltered (_("No saved terminal information.\n"));
544 }
545
546 /* A default implementation for the to_get_ada_task_ptid target method.
547
548 This function builds the PTID by using both LWP and TID as part of
549 the PTID lwp and tid elements. The pid used is the pid of the
550 inferior_ptid. */
551
552 static ptid_t
553 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
554 {
555 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
556 }
557
558 static enum exec_direction_kind
559 default_execution_direction (struct target_ops *self)
560 {
561 if (!target_can_execute_reverse)
562 return EXEC_FORWARD;
563 else if (!target_can_async_p ())
564 return EXEC_FORWARD;
565 else
566 gdb_assert_not_reached ("\
567 to_execution_direction must be implemented for reverse async");
568 }
569
570 /* Go through the target stack from top to bottom, copying over zero
571 entries in current_target, then filling in still empty entries. In
572 effect, we are doing class inheritance through the pushed target
573 vectors.
574
575 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
576 is currently implemented, is that it discards any knowledge of
577 which target an inherited method originally belonged to.
578 Consequently, new new target methods should instead explicitly and
579 locally search the target stack for the target that can handle the
580 request. */
581
582 static void
583 update_current_target (void)
584 {
585 struct target_ops *t;
586
587 /* First, reset current's contents. */
588 memset (&current_target, 0, sizeof (current_target));
589
590 /* Install the delegators. */
591 install_delegators (&current_target);
592
593 #define INHERIT(FIELD, TARGET) \
594 if (!current_target.FIELD) \
595 current_target.FIELD = (TARGET)->FIELD
596
597 for (t = target_stack; t; t = t->beneath)
598 {
599 INHERIT (to_shortname, t);
600 INHERIT (to_longname, t);
601 INHERIT (to_doc, t);
602 /* Do not inherit to_open. */
603 /* Do not inherit to_close. */
604 /* Do not inherit to_attach. */
605 /* Do not inherit to_post_attach. */
606 INHERIT (to_attach_no_wait, t);
607 /* Do not inherit to_detach. */
608 /* Do not inherit to_disconnect. */
609 /* Do not inherit to_resume. */
610 /* Do not inherit to_wait. */
611 /* Do not inherit to_fetch_registers. */
612 /* Do not inherit to_store_registers. */
613 /* Do not inherit to_prepare_to_store. */
614 INHERIT (deprecated_xfer_memory, t);
615 /* Do not inherit to_files_info. */
616 /* Do not inherit to_insert_breakpoint. */
617 /* Do not inherit to_remove_breakpoint. */
618 /* Do not inherit to_can_use_hw_breakpoint. */
619 /* Do not inherit to_insert_hw_breakpoint. */
620 /* Do not inherit to_remove_hw_breakpoint. */
621 /* Do not inherit to_ranged_break_num_registers. */
622 /* Do not inherit to_insert_watchpoint. */
623 /* Do not inherit to_remove_watchpoint. */
624 /* Do not inherit to_insert_mask_watchpoint. */
625 /* Do not inherit to_remove_mask_watchpoint. */
626 /* Do not inherit to_stopped_data_address. */
627 INHERIT (to_have_steppable_watchpoint, t);
628 INHERIT (to_have_continuable_watchpoint, t);
629 /* Do not inherit to_stopped_by_watchpoint. */
630 /* Do not inherit to_watchpoint_addr_within_range. */
631 /* Do not inherit to_region_ok_for_hw_watchpoint. */
632 /* Do not inherit to_can_accel_watchpoint_condition. */
633 /* Do not inherit to_masked_watch_num_registers. */
634 /* Do not inherit to_terminal_init. */
635 /* Do not inherit to_terminal_inferior. */
636 /* Do not inherit to_terminal_ours_for_output. */
637 /* Do not inherit to_terminal_ours. */
638 /* Do not inherit to_terminal_save_ours. */
639 /* Do not inherit to_terminal_info. */
640 /* Do not inherit to_kill. */
641 /* Do not inherit to_load. */
642 /* Do no inherit to_create_inferior. */
643 /* Do not inherit to_post_startup_inferior. */
644 /* Do not inherit to_insert_fork_catchpoint. */
645 /* Do not inherit to_remove_fork_catchpoint. */
646 /* Do not inherit to_insert_vfork_catchpoint. */
647 /* Do not inherit to_remove_vfork_catchpoint. */
648 /* Do not inherit to_follow_fork. */
649 /* Do not inherit to_insert_exec_catchpoint. */
650 /* Do not inherit to_remove_exec_catchpoint. */
651 /* Do not inherit to_set_syscall_catchpoint. */
652 /* Do not inherit to_has_exited. */
653 /* Do not inherit to_mourn_inferior. */
654 INHERIT (to_can_run, t);
655 /* Do not inherit to_pass_signals. */
656 /* Do not inherit to_program_signals. */
657 /* Do not inherit to_thread_alive. */
658 /* Do not inherit to_find_new_threads. */
659 /* Do not inherit to_pid_to_str. */
660 /* Do not inherit to_extra_thread_info. */
661 /* Do not inherit to_thread_name. */
662 INHERIT (to_stop, t);
663 /* Do not inherit to_xfer_partial. */
664 /* Do not inherit to_rcmd. */
665 /* Do not inherit to_pid_to_exec_file. */
666 /* Do not inherit to_log_command. */
667 INHERIT (to_stratum, t);
668 /* Do not inherit to_has_all_memory. */
669 /* Do not inherit to_has_memory. */
670 /* Do not inherit to_has_stack. */
671 /* Do not inherit to_has_registers. */
672 /* Do not inherit to_has_execution. */
673 INHERIT (to_has_thread_control, t);
674 /* Do not inherit to_can_async_p. */
675 /* Do not inherit to_is_async_p. */
676 /* Do not inherit to_async. */
677 /* Do not inherit to_find_memory_regions. */
678 /* Do not inherit to_make_corefile_notes. */
679 /* Do not inherit to_get_bookmark. */
680 /* Do not inherit to_goto_bookmark. */
681 /* Do not inherit to_get_thread_local_address. */
682 /* Do not inherit to_can_execute_reverse. */
683 /* Do not inherit to_execution_direction. */
684 /* Do not inherit to_thread_architecture. */
685 /* Do not inherit to_read_description. */
686 /* Do not inherit to_get_ada_task_ptid. */
687 /* Do not inherit to_search_memory. */
688 /* Do not inherit to_supports_multi_process. */
689 /* Do not inherit to_supports_enable_disable_tracepoint. */
690 /* Do not inherit to_supports_string_tracing. */
691 /* Do not inherit to_trace_init. */
692 /* Do not inherit to_download_tracepoint. */
693 /* Do not inherit to_can_download_tracepoint. */
694 /* Do not inherit to_download_trace_state_variable. */
695 /* Do not inherit to_enable_tracepoint. */
696 /* Do not inherit to_disable_tracepoint. */
697 /* Do not inherit to_trace_set_readonly_regions. */
698 /* Do not inherit to_trace_start. */
699 /* Do not inherit to_get_trace_status. */
700 /* Do not inherit to_get_tracepoint_status. */
701 /* Do not inherit to_trace_stop. */
702 INHERIT (to_trace_find, t);
703 INHERIT (to_get_trace_state_variable_value, t);
704 INHERIT (to_save_trace_data, t);
705 INHERIT (to_upload_tracepoints, t);
706 INHERIT (to_upload_trace_state_variables, t);
707 INHERIT (to_get_raw_trace_data, t);
708 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
709 INHERIT (to_set_disconnected_tracing, t);
710 INHERIT (to_set_circular_trace_buffer, t);
711 INHERIT (to_set_trace_buffer_size, t);
712 INHERIT (to_set_trace_notes, t);
713 INHERIT (to_get_tib_address, t);
714 INHERIT (to_set_permissions, t);
715 INHERIT (to_static_tracepoint_marker_at, t);
716 INHERIT (to_static_tracepoint_markers_by_strid, t);
717 INHERIT (to_traceframe_info, t);
718 INHERIT (to_use_agent, t);
719 INHERIT (to_can_use_agent, t);
720 INHERIT (to_augmented_libraries_svr4_read, t);
721 INHERIT (to_magic, t);
722 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
723 INHERIT (to_can_run_breakpoint_commands, t);
724 /* Do not inherit to_memory_map. */
725 /* Do not inherit to_flash_erase. */
726 /* Do not inherit to_flash_done. */
727 }
728 #undef INHERIT
729
730 /* Clean up a target struct so it no longer has any zero pointers in
731 it. Some entries are defaulted to a method that print an error,
732 others are hard-wired to a standard recursive default. */
733
734 #define de_fault(field, value) \
735 if (!current_target.field) \
736 current_target.field = value
737
738 de_fault (to_open,
739 (void (*) (char *, int))
740 tcomplain);
741 de_fault (to_close,
742 (void (*) (struct target_ops *))
743 target_ignore);
744 de_fault (deprecated_xfer_memory,
745 (int (*) (CORE_ADDR, gdb_byte *, int, int,
746 struct mem_attrib *, struct target_ops *))
747 nomemory);
748 de_fault (to_can_run,
749 (int (*) (struct target_ops *))
750 return_zero);
751 de_fault (to_stop,
752 (void (*) (struct target_ops *, ptid_t))
753 target_ignore);
754 current_target.to_read_description = NULL;
755 de_fault (to_trace_find,
756 (int (*) (struct target_ops *,
757 enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
758 return_minus_one);
759 de_fault (to_get_trace_state_variable_value,
760 (int (*) (struct target_ops *, int, LONGEST *))
761 return_zero);
762 de_fault (to_save_trace_data,
763 (int (*) (struct target_ops *, const char *))
764 tcomplain);
765 de_fault (to_upload_tracepoints,
766 (int (*) (struct target_ops *, struct uploaded_tp **))
767 return_zero);
768 de_fault (to_upload_trace_state_variables,
769 (int (*) (struct target_ops *, struct uploaded_tsv **))
770 return_zero);
771 de_fault (to_get_raw_trace_data,
772 (LONGEST (*) (struct target_ops *, gdb_byte *, ULONGEST, LONGEST))
773 tcomplain);
774 de_fault (to_get_min_fast_tracepoint_insn_len,
775 (int (*) (struct target_ops *))
776 return_minus_one);
777 de_fault (to_set_disconnected_tracing,
778 (void (*) (struct target_ops *, int))
779 target_ignore);
780 de_fault (to_set_circular_trace_buffer,
781 (void (*) (struct target_ops *, int))
782 target_ignore);
783 de_fault (to_set_trace_buffer_size,
784 (void (*) (struct target_ops *, LONGEST))
785 target_ignore);
786 de_fault (to_set_trace_notes,
787 (int (*) (struct target_ops *,
788 const char *, const char *, const char *))
789 return_zero);
790 de_fault (to_get_tib_address,
791 (int (*) (struct target_ops *, ptid_t, CORE_ADDR *))
792 tcomplain);
793 de_fault (to_set_permissions,
794 (void (*) (struct target_ops *))
795 target_ignore);
796 de_fault (to_static_tracepoint_marker_at,
797 (int (*) (struct target_ops *,
798 CORE_ADDR, struct static_tracepoint_marker *))
799 return_zero);
800 de_fault (to_static_tracepoint_markers_by_strid,
801 (VEC(static_tracepoint_marker_p) * (*) (struct target_ops *,
802 const char *))
803 tcomplain);
804 de_fault (to_traceframe_info,
805 (struct traceframe_info * (*) (struct target_ops *))
806 return_null);
807 de_fault (to_supports_evaluation_of_breakpoint_conditions,
808 (int (*) (struct target_ops *))
809 return_zero);
810 de_fault (to_can_run_breakpoint_commands,
811 (int (*) (struct target_ops *))
812 return_zero);
813 de_fault (to_use_agent,
814 (int (*) (struct target_ops *, int))
815 tcomplain);
816 de_fault (to_can_use_agent,
817 (int (*) (struct target_ops *))
818 return_zero);
819 de_fault (to_augmented_libraries_svr4_read,
820 (int (*) (struct target_ops *))
821 return_zero);
822
823 #undef de_fault
824
825 /* Finally, position the target-stack beneath the squashed
826 "current_target". That way code looking for a non-inherited
827 target method can quickly and simply find it. */
828 current_target.beneath = target_stack;
829
830 if (targetdebug)
831 setup_target_debug ();
832 }
833
834 /* Push a new target type into the stack of the existing target accessors,
835 possibly superseding some of the existing accessors.
836
837 Rather than allow an empty stack, we always have the dummy target at
838 the bottom stratum, so we can call the function vectors without
839 checking them. */
840
841 void
842 push_target (struct target_ops *t)
843 {
844 struct target_ops **cur;
845
846 /* Check magic number. If wrong, it probably means someone changed
847 the struct definition, but not all the places that initialize one. */
848 if (t->to_magic != OPS_MAGIC)
849 {
850 fprintf_unfiltered (gdb_stderr,
851 "Magic number of %s target struct wrong\n",
852 t->to_shortname);
853 internal_error (__FILE__, __LINE__,
854 _("failed internal consistency check"));
855 }
856
857 /* Find the proper stratum to install this target in. */
858 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
859 {
860 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
861 break;
862 }
863
864 /* If there's already targets at this stratum, remove them. */
865 /* FIXME: cagney/2003-10-15: I think this should be popping all
866 targets to CUR, and not just those at this stratum level. */
867 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
868 {
869 /* There's already something at this stratum level. Close it,
870 and un-hook it from the stack. */
871 struct target_ops *tmp = (*cur);
872
873 (*cur) = (*cur)->beneath;
874 tmp->beneath = NULL;
875 target_close (tmp);
876 }
877
878 /* We have removed all targets in our stratum, now add the new one. */
879 t->beneath = (*cur);
880 (*cur) = t;
881
882 update_current_target ();
883 }
884
885 /* Remove a target_ops vector from the stack, wherever it may be.
886 Return how many times it was removed (0 or 1). */
887
888 int
889 unpush_target (struct target_ops *t)
890 {
891 struct target_ops **cur;
892 struct target_ops *tmp;
893
894 if (t->to_stratum == dummy_stratum)
895 internal_error (__FILE__, __LINE__,
896 _("Attempt to unpush the dummy target"));
897
898 /* Look for the specified target. Note that we assume that a target
899 can only occur once in the target stack. */
900
901 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
902 {
903 if ((*cur) == t)
904 break;
905 }
906
907 /* If we don't find target_ops, quit. Only open targets should be
908 closed. */
909 if ((*cur) == NULL)
910 return 0;
911
912 /* Unchain the target. */
913 tmp = (*cur);
914 (*cur) = (*cur)->beneath;
915 tmp->beneath = NULL;
916
917 update_current_target ();
918
919 /* Finally close the target. Note we do this after unchaining, so
920 any target method calls from within the target_close
921 implementation don't end up in T anymore. */
922 target_close (t);
923
924 return 1;
925 }
926
927 void
928 pop_all_targets_above (enum strata above_stratum)
929 {
930 while ((int) (current_target.to_stratum) > (int) above_stratum)
931 {
932 if (!unpush_target (target_stack))
933 {
934 fprintf_unfiltered (gdb_stderr,
935 "pop_all_targets couldn't find target %s\n",
936 target_stack->to_shortname);
937 internal_error (__FILE__, __LINE__,
938 _("failed internal consistency check"));
939 break;
940 }
941 }
942 }
943
944 void
945 pop_all_targets (void)
946 {
947 pop_all_targets_above (dummy_stratum);
948 }
949
950 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
951
952 int
953 target_is_pushed (struct target_ops *t)
954 {
955 struct target_ops **cur;
956
957 /* Check magic number. If wrong, it probably means someone changed
958 the struct definition, but not all the places that initialize one. */
959 if (t->to_magic != OPS_MAGIC)
960 {
961 fprintf_unfiltered (gdb_stderr,
962 "Magic number of %s target struct wrong\n",
963 t->to_shortname);
964 internal_error (__FILE__, __LINE__,
965 _("failed internal consistency check"));
966 }
967
968 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
969 if (*cur == t)
970 return 1;
971
972 return 0;
973 }
974
975 /* Using the objfile specified in OBJFILE, find the address for the
976 current thread's thread-local storage with offset OFFSET. */
977 CORE_ADDR
978 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
979 {
980 volatile CORE_ADDR addr = 0;
981 struct target_ops *target;
982
983 for (target = current_target.beneath;
984 target != NULL;
985 target = target->beneath)
986 {
987 if (target->to_get_thread_local_address != NULL)
988 break;
989 }
990
991 if (target != NULL
992 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
993 {
994 ptid_t ptid = inferior_ptid;
995 volatile struct gdb_exception ex;
996
997 TRY_CATCH (ex, RETURN_MASK_ALL)
998 {
999 CORE_ADDR lm_addr;
1000
1001 /* Fetch the load module address for this objfile. */
1002 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1003 objfile);
1004 /* If it's 0, throw the appropriate exception. */
1005 if (lm_addr == 0)
1006 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1007 _("TLS load module not found"));
1008
1009 addr = target->to_get_thread_local_address (target, ptid,
1010 lm_addr, offset);
1011 }
1012 /* If an error occurred, print TLS related messages here. Otherwise,
1013 throw the error to some higher catcher. */
1014 if (ex.reason < 0)
1015 {
1016 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1017
1018 switch (ex.error)
1019 {
1020 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1021 error (_("Cannot find thread-local variables "
1022 "in this thread library."));
1023 break;
1024 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1025 if (objfile_is_library)
1026 error (_("Cannot find shared library `%s' in dynamic"
1027 " linker's load module list"), objfile_name (objfile));
1028 else
1029 error (_("Cannot find executable file `%s' in dynamic"
1030 " linker's load module list"), objfile_name (objfile));
1031 break;
1032 case TLS_NOT_ALLOCATED_YET_ERROR:
1033 if (objfile_is_library)
1034 error (_("The inferior has not yet allocated storage for"
1035 " thread-local variables in\n"
1036 "the shared library `%s'\n"
1037 "for %s"),
1038 objfile_name (objfile), target_pid_to_str (ptid));
1039 else
1040 error (_("The inferior has not yet allocated storage for"
1041 " thread-local variables in\n"
1042 "the executable `%s'\n"
1043 "for %s"),
1044 objfile_name (objfile), target_pid_to_str (ptid));
1045 break;
1046 case TLS_GENERIC_ERROR:
1047 if (objfile_is_library)
1048 error (_("Cannot find thread-local storage for %s, "
1049 "shared library %s:\n%s"),
1050 target_pid_to_str (ptid),
1051 objfile_name (objfile), ex.message);
1052 else
1053 error (_("Cannot find thread-local storage for %s, "
1054 "executable file %s:\n%s"),
1055 target_pid_to_str (ptid),
1056 objfile_name (objfile), ex.message);
1057 break;
1058 default:
1059 throw_exception (ex);
1060 break;
1061 }
1062 }
1063 }
1064 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1065 TLS is an ABI-specific thing. But we don't do that yet. */
1066 else
1067 error (_("Cannot find thread-local variables on this target"));
1068
1069 return addr;
1070 }
1071
1072 const char *
1073 target_xfer_status_to_string (enum target_xfer_status err)
1074 {
1075 #define CASE(X) case X: return #X
1076 switch (err)
1077 {
1078 CASE(TARGET_XFER_E_IO);
1079 CASE(TARGET_XFER_E_UNAVAILABLE);
1080 default:
1081 return "<unknown>";
1082 }
1083 #undef CASE
1084 };
1085
1086
1087 #undef MIN
1088 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1089
1090 /* target_read_string -- read a null terminated string, up to LEN bytes,
1091 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1092 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1093 is responsible for freeing it. Return the number of bytes successfully
1094 read. */
1095
1096 int
1097 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1098 {
1099 int tlen, offset, i;
1100 gdb_byte buf[4];
1101 int errcode = 0;
1102 char *buffer;
1103 int buffer_allocated;
1104 char *bufptr;
1105 unsigned int nbytes_read = 0;
1106
1107 gdb_assert (string);
1108
1109 /* Small for testing. */
1110 buffer_allocated = 4;
1111 buffer = xmalloc (buffer_allocated);
1112 bufptr = buffer;
1113
1114 while (len > 0)
1115 {
1116 tlen = MIN (len, 4 - (memaddr & 3));
1117 offset = memaddr & 3;
1118
1119 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1120 if (errcode != 0)
1121 {
1122 /* The transfer request might have crossed the boundary to an
1123 unallocated region of memory. Retry the transfer, requesting
1124 a single byte. */
1125 tlen = 1;
1126 offset = 0;
1127 errcode = target_read_memory (memaddr, buf, 1);
1128 if (errcode != 0)
1129 goto done;
1130 }
1131
1132 if (bufptr - buffer + tlen > buffer_allocated)
1133 {
1134 unsigned int bytes;
1135
1136 bytes = bufptr - buffer;
1137 buffer_allocated *= 2;
1138 buffer = xrealloc (buffer, buffer_allocated);
1139 bufptr = buffer + bytes;
1140 }
1141
1142 for (i = 0; i < tlen; i++)
1143 {
1144 *bufptr++ = buf[i + offset];
1145 if (buf[i + offset] == '\000')
1146 {
1147 nbytes_read += i + 1;
1148 goto done;
1149 }
1150 }
1151
1152 memaddr += tlen;
1153 len -= tlen;
1154 nbytes_read += tlen;
1155 }
1156 done:
1157 *string = buffer;
1158 if (errnop != NULL)
1159 *errnop = errcode;
1160 return nbytes_read;
1161 }
1162
1163 struct target_section_table *
1164 target_get_section_table (struct target_ops *target)
1165 {
1166 struct target_ops *t;
1167
1168 if (targetdebug)
1169 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1170
1171 for (t = target; t != NULL; t = t->beneath)
1172 if (t->to_get_section_table != NULL)
1173 return (*t->to_get_section_table) (t);
1174
1175 return NULL;
1176 }
1177
1178 /* Find a section containing ADDR. */
1179
1180 struct target_section *
1181 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1182 {
1183 struct target_section_table *table = target_get_section_table (target);
1184 struct target_section *secp;
1185
1186 if (table == NULL)
1187 return NULL;
1188
1189 for (secp = table->sections; secp < table->sections_end; secp++)
1190 {
1191 if (addr >= secp->addr && addr < secp->endaddr)
1192 return secp;
1193 }
1194 return NULL;
1195 }
1196
1197 /* Read memory from the live target, even if currently inspecting a
1198 traceframe. The return is the same as that of target_read. */
1199
1200 static enum target_xfer_status
1201 target_read_live_memory (enum target_object object,
1202 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1203 ULONGEST *xfered_len)
1204 {
1205 enum target_xfer_status ret;
1206 struct cleanup *cleanup;
1207
1208 /* Switch momentarily out of tfind mode so to access live memory.
1209 Note that this must not clear global state, such as the frame
1210 cache, which must still remain valid for the previous traceframe.
1211 We may be _building_ the frame cache at this point. */
1212 cleanup = make_cleanup_restore_traceframe_number ();
1213 set_traceframe_number (-1);
1214
1215 ret = target_xfer_partial (current_target.beneath, object, NULL,
1216 myaddr, NULL, memaddr, len, xfered_len);
1217
1218 do_cleanups (cleanup);
1219 return ret;
1220 }
1221
1222 /* Using the set of read-only target sections of OPS, read live
1223 read-only memory. Note that the actual reads start from the
1224 top-most target again.
1225
1226 For interface/parameters/return description see target.h,
1227 to_xfer_partial. */
1228
1229 static enum target_xfer_status
1230 memory_xfer_live_readonly_partial (struct target_ops *ops,
1231 enum target_object object,
1232 gdb_byte *readbuf, ULONGEST memaddr,
1233 ULONGEST len, ULONGEST *xfered_len)
1234 {
1235 struct target_section *secp;
1236 struct target_section_table *table;
1237
1238 secp = target_section_by_addr (ops, memaddr);
1239 if (secp != NULL
1240 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1241 secp->the_bfd_section)
1242 & SEC_READONLY))
1243 {
1244 struct target_section *p;
1245 ULONGEST memend = memaddr + len;
1246
1247 table = target_get_section_table (ops);
1248
1249 for (p = table->sections; p < table->sections_end; p++)
1250 {
1251 if (memaddr >= p->addr)
1252 {
1253 if (memend <= p->endaddr)
1254 {
1255 /* Entire transfer is within this section. */
1256 return target_read_live_memory (object, memaddr,
1257 readbuf, len, xfered_len);
1258 }
1259 else if (memaddr >= p->endaddr)
1260 {
1261 /* This section ends before the transfer starts. */
1262 continue;
1263 }
1264 else
1265 {
1266 /* This section overlaps the transfer. Just do half. */
1267 len = p->endaddr - memaddr;
1268 return target_read_live_memory (object, memaddr,
1269 readbuf, len, xfered_len);
1270 }
1271 }
1272 }
1273 }
1274
1275 return TARGET_XFER_EOF;
1276 }
1277
1278 /* Read memory from more than one valid target. A core file, for
1279 instance, could have some of memory but delegate other bits to
1280 the target below it. So, we must manually try all targets. */
1281
1282 static enum target_xfer_status
1283 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1284 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1285 ULONGEST *xfered_len)
1286 {
1287 enum target_xfer_status res;
1288
1289 do
1290 {
1291 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1292 readbuf, writebuf, memaddr, len,
1293 xfered_len);
1294 if (res == TARGET_XFER_OK)
1295 break;
1296
1297 /* Stop if the target reports that the memory is not available. */
1298 if (res == TARGET_XFER_E_UNAVAILABLE)
1299 break;
1300
1301 /* We want to continue past core files to executables, but not
1302 past a running target's memory. */
1303 if (ops->to_has_all_memory (ops))
1304 break;
1305
1306 ops = ops->beneath;
1307 }
1308 while (ops != NULL);
1309
1310 return res;
1311 }
1312
1313 /* Perform a partial memory transfer.
1314 For docs see target.h, to_xfer_partial. */
1315
1316 static enum target_xfer_status
1317 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1318 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1319 ULONGEST len, ULONGEST *xfered_len)
1320 {
1321 enum target_xfer_status res;
1322 int reg_len;
1323 struct mem_region *region;
1324 struct inferior *inf;
1325
1326 /* For accesses to unmapped overlay sections, read directly from
1327 files. Must do this first, as MEMADDR may need adjustment. */
1328 if (readbuf != NULL && overlay_debugging)
1329 {
1330 struct obj_section *section = find_pc_overlay (memaddr);
1331
1332 if (pc_in_unmapped_range (memaddr, section))
1333 {
1334 struct target_section_table *table
1335 = target_get_section_table (ops);
1336 const char *section_name = section->the_bfd_section->name;
1337
1338 memaddr = overlay_mapped_address (memaddr, section);
1339 return section_table_xfer_memory_partial (readbuf, writebuf,
1340 memaddr, len, xfered_len,
1341 table->sections,
1342 table->sections_end,
1343 section_name);
1344 }
1345 }
1346
1347 /* Try the executable files, if "trust-readonly-sections" is set. */
1348 if (readbuf != NULL && trust_readonly)
1349 {
1350 struct target_section *secp;
1351 struct target_section_table *table;
1352
1353 secp = target_section_by_addr (ops, memaddr);
1354 if (secp != NULL
1355 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1356 secp->the_bfd_section)
1357 & SEC_READONLY))
1358 {
1359 table = target_get_section_table (ops);
1360 return section_table_xfer_memory_partial (readbuf, writebuf,
1361 memaddr, len, xfered_len,
1362 table->sections,
1363 table->sections_end,
1364 NULL);
1365 }
1366 }
1367
1368 /* If reading unavailable memory in the context of traceframes, and
1369 this address falls within a read-only section, fallback to
1370 reading from live memory. */
1371 if (readbuf != NULL && get_traceframe_number () != -1)
1372 {
1373 VEC(mem_range_s) *available;
1374
1375 /* If we fail to get the set of available memory, then the
1376 target does not support querying traceframe info, and so we
1377 attempt reading from the traceframe anyway (assuming the
1378 target implements the old QTro packet then). */
1379 if (traceframe_available_memory (&available, memaddr, len))
1380 {
1381 struct cleanup *old_chain;
1382
1383 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1384
1385 if (VEC_empty (mem_range_s, available)
1386 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1387 {
1388 /* Don't read into the traceframe's available
1389 memory. */
1390 if (!VEC_empty (mem_range_s, available))
1391 {
1392 LONGEST oldlen = len;
1393
1394 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1395 gdb_assert (len <= oldlen);
1396 }
1397
1398 do_cleanups (old_chain);
1399
1400 /* This goes through the topmost target again. */
1401 res = memory_xfer_live_readonly_partial (ops, object,
1402 readbuf, memaddr,
1403 len, xfered_len);
1404 if (res == TARGET_XFER_OK)
1405 return TARGET_XFER_OK;
1406 else
1407 {
1408 /* No use trying further, we know some memory starting
1409 at MEMADDR isn't available. */
1410 *xfered_len = len;
1411 return TARGET_XFER_E_UNAVAILABLE;
1412 }
1413 }
1414
1415 /* Don't try to read more than how much is available, in
1416 case the target implements the deprecated QTro packet to
1417 cater for older GDBs (the target's knowledge of read-only
1418 sections may be outdated by now). */
1419 len = VEC_index (mem_range_s, available, 0)->length;
1420
1421 do_cleanups (old_chain);
1422 }
1423 }
1424
1425 /* Try GDB's internal data cache. */
1426 region = lookup_mem_region (memaddr);
1427 /* region->hi == 0 means there's no upper bound. */
1428 if (memaddr + len < region->hi || region->hi == 0)
1429 reg_len = len;
1430 else
1431 reg_len = region->hi - memaddr;
1432
1433 switch (region->attrib.mode)
1434 {
1435 case MEM_RO:
1436 if (writebuf != NULL)
1437 return TARGET_XFER_E_IO;
1438 break;
1439
1440 case MEM_WO:
1441 if (readbuf != NULL)
1442 return TARGET_XFER_E_IO;
1443 break;
1444
1445 case MEM_FLASH:
1446 /* We only support writing to flash during "load" for now. */
1447 if (writebuf != NULL)
1448 error (_("Writing to flash memory forbidden in this context"));
1449 break;
1450
1451 case MEM_NONE:
1452 return TARGET_XFER_E_IO;
1453 }
1454
1455 if (!ptid_equal (inferior_ptid, null_ptid))
1456 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1457 else
1458 inf = NULL;
1459
1460 if (inf != NULL
1461 /* The dcache reads whole cache lines; that doesn't play well
1462 with reading from a trace buffer, because reading outside of
1463 the collected memory range fails. */
1464 && get_traceframe_number () == -1
1465 && (region->attrib.cache
1466 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1467 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1468 {
1469 DCACHE *dcache = target_dcache_get_or_init ();
1470 int l;
1471
1472 if (readbuf != NULL)
1473 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1474 else
1475 /* FIXME drow/2006-08-09: If we're going to preserve const
1476 correctness dcache_xfer_memory should take readbuf and
1477 writebuf. */
1478 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1479 reg_len, 1);
1480 if (l <= 0)
1481 return TARGET_XFER_E_IO;
1482 else
1483 {
1484 *xfered_len = (ULONGEST) l;
1485 return TARGET_XFER_OK;
1486 }
1487 }
1488
1489 /* If none of those methods found the memory we wanted, fall back
1490 to a target partial transfer. Normally a single call to
1491 to_xfer_partial is enough; if it doesn't recognize an object
1492 it will call the to_xfer_partial of the next target down.
1493 But for memory this won't do. Memory is the only target
1494 object which can be read from more than one valid target.
1495 A core file, for instance, could have some of memory but
1496 delegate other bits to the target below it. So, we must
1497 manually try all targets. */
1498
1499 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1500 xfered_len);
1501
1502 /* Make sure the cache gets updated no matter what - if we are writing
1503 to the stack. Even if this write is not tagged as such, we still need
1504 to update the cache. */
1505
1506 if (res == TARGET_XFER_OK
1507 && inf != NULL
1508 && writebuf != NULL
1509 && target_dcache_init_p ()
1510 && !region->attrib.cache
1511 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1512 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1513 {
1514 DCACHE *dcache = target_dcache_get ();
1515
1516 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1517 }
1518
1519 /* If we still haven't got anything, return the last error. We
1520 give up. */
1521 return res;
1522 }
1523
1524 /* Perform a partial memory transfer. For docs see target.h,
1525 to_xfer_partial. */
1526
1527 static enum target_xfer_status
1528 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1529 gdb_byte *readbuf, const gdb_byte *writebuf,
1530 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1531 {
1532 enum target_xfer_status res;
1533
1534 /* Zero length requests are ok and require no work. */
1535 if (len == 0)
1536 return TARGET_XFER_EOF;
1537
1538 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1539 breakpoint insns, thus hiding out from higher layers whether
1540 there are software breakpoints inserted in the code stream. */
1541 if (readbuf != NULL)
1542 {
1543 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1544 xfered_len);
1545
1546 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1547 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1548 }
1549 else
1550 {
1551 void *buf;
1552 struct cleanup *old_chain;
1553
1554 /* A large write request is likely to be partially satisfied
1555 by memory_xfer_partial_1. We will continually malloc
1556 and free a copy of the entire write request for breakpoint
1557 shadow handling even though we only end up writing a small
1558 subset of it. Cap writes to 4KB to mitigate this. */
1559 len = min (4096, len);
1560
1561 buf = xmalloc (len);
1562 old_chain = make_cleanup (xfree, buf);
1563 memcpy (buf, writebuf, len);
1564
1565 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1566 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1567 xfered_len);
1568
1569 do_cleanups (old_chain);
1570 }
1571
1572 return res;
1573 }
1574
1575 static void
1576 restore_show_memory_breakpoints (void *arg)
1577 {
1578 show_memory_breakpoints = (uintptr_t) arg;
1579 }
1580
1581 struct cleanup *
1582 make_show_memory_breakpoints_cleanup (int show)
1583 {
1584 int current = show_memory_breakpoints;
1585
1586 show_memory_breakpoints = show;
1587 return make_cleanup (restore_show_memory_breakpoints,
1588 (void *) (uintptr_t) current);
1589 }
1590
1591 /* For docs see target.h, to_xfer_partial. */
1592
1593 enum target_xfer_status
1594 target_xfer_partial (struct target_ops *ops,
1595 enum target_object object, const char *annex,
1596 gdb_byte *readbuf, const gdb_byte *writebuf,
1597 ULONGEST offset, ULONGEST len,
1598 ULONGEST *xfered_len)
1599 {
1600 enum target_xfer_status retval;
1601
1602 gdb_assert (ops->to_xfer_partial != NULL);
1603
1604 /* Transfer is done when LEN is zero. */
1605 if (len == 0)
1606 return TARGET_XFER_EOF;
1607
1608 if (writebuf && !may_write_memory)
1609 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1610 core_addr_to_string_nz (offset), plongest (len));
1611
1612 *xfered_len = 0;
1613
1614 /* If this is a memory transfer, let the memory-specific code
1615 have a look at it instead. Memory transfers are more
1616 complicated. */
1617 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1618 || object == TARGET_OBJECT_CODE_MEMORY)
1619 retval = memory_xfer_partial (ops, object, readbuf,
1620 writebuf, offset, len, xfered_len);
1621 else if (object == TARGET_OBJECT_RAW_MEMORY)
1622 {
1623 /* Request the normal memory object from other layers. */
1624 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1625 xfered_len);
1626 }
1627 else
1628 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1629 writebuf, offset, len, xfered_len);
1630
1631 if (targetdebug)
1632 {
1633 const unsigned char *myaddr = NULL;
1634
1635 fprintf_unfiltered (gdb_stdlog,
1636 "%s:target_xfer_partial "
1637 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1638 ops->to_shortname,
1639 (int) object,
1640 (annex ? annex : "(null)"),
1641 host_address_to_string (readbuf),
1642 host_address_to_string (writebuf),
1643 core_addr_to_string_nz (offset),
1644 pulongest (len), retval,
1645 pulongest (*xfered_len));
1646
1647 if (readbuf)
1648 myaddr = readbuf;
1649 if (writebuf)
1650 myaddr = writebuf;
1651 if (retval == TARGET_XFER_OK && myaddr != NULL)
1652 {
1653 int i;
1654
1655 fputs_unfiltered (", bytes =", gdb_stdlog);
1656 for (i = 0; i < *xfered_len; i++)
1657 {
1658 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1659 {
1660 if (targetdebug < 2 && i > 0)
1661 {
1662 fprintf_unfiltered (gdb_stdlog, " ...");
1663 break;
1664 }
1665 fprintf_unfiltered (gdb_stdlog, "\n");
1666 }
1667
1668 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1669 }
1670 }
1671
1672 fputc_unfiltered ('\n', gdb_stdlog);
1673 }
1674
1675 /* Check implementations of to_xfer_partial update *XFERED_LEN
1676 properly. Do assertion after printing debug messages, so that we
1677 can find more clues on assertion failure from debugging messages. */
1678 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1679 gdb_assert (*xfered_len > 0);
1680
1681 return retval;
1682 }
1683
1684 /* Read LEN bytes of target memory at address MEMADDR, placing the
1685 results in GDB's memory at MYADDR. Returns either 0 for success or
1686 TARGET_XFER_E_IO if any error occurs.
1687
1688 If an error occurs, no guarantee is made about the contents of the data at
1689 MYADDR. In particular, the caller should not depend upon partial reads
1690 filling the buffer with good data. There is no way for the caller to know
1691 how much good data might have been transfered anyway. Callers that can
1692 deal with partial reads should call target_read (which will retry until
1693 it makes no progress, and then return how much was transferred). */
1694
1695 int
1696 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1697 {
1698 /* Dispatch to the topmost target, not the flattened current_target.
1699 Memory accesses check target->to_has_(all_)memory, and the
1700 flattened target doesn't inherit those. */
1701 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1702 myaddr, memaddr, len) == len)
1703 return 0;
1704 else
1705 return TARGET_XFER_E_IO;
1706 }
1707
1708 /* Like target_read_memory, but specify explicitly that this is a read
1709 from the target's raw memory. That is, this read bypasses the
1710 dcache, breakpoint shadowing, etc. */
1711
1712 int
1713 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1714 {
1715 /* See comment in target_read_memory about why the request starts at
1716 current_target.beneath. */
1717 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1718 myaddr, memaddr, len) == len)
1719 return 0;
1720 else
1721 return TARGET_XFER_E_IO;
1722 }
1723
1724 /* Like target_read_memory, but specify explicitly that this is a read from
1725 the target's stack. This may trigger different cache behavior. */
1726
1727 int
1728 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1729 {
1730 /* See comment in target_read_memory about why the request starts at
1731 current_target.beneath. */
1732 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1733 myaddr, memaddr, len) == len)
1734 return 0;
1735 else
1736 return TARGET_XFER_E_IO;
1737 }
1738
1739 /* Like target_read_memory, but specify explicitly that this is a read from
1740 the target's code. This may trigger different cache behavior. */
1741
1742 int
1743 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1744 {
1745 /* See comment in target_read_memory about why the request starts at
1746 current_target.beneath. */
1747 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1748 myaddr, memaddr, len) == len)
1749 return 0;
1750 else
1751 return TARGET_XFER_E_IO;
1752 }
1753
1754 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1755 Returns either 0 for success or TARGET_XFER_E_IO if any
1756 error occurs. If an error occurs, no guarantee is made about how
1757 much data got written. Callers that can deal with partial writes
1758 should call target_write. */
1759
1760 int
1761 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1762 {
1763 /* See comment in target_read_memory about why the request starts at
1764 current_target.beneath. */
1765 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1766 myaddr, memaddr, len) == len)
1767 return 0;
1768 else
1769 return TARGET_XFER_E_IO;
1770 }
1771
1772 /* Write LEN bytes from MYADDR to target raw memory at address
1773 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1774 if any error occurs. If an error occurs, no guarantee is made
1775 about how much data got written. Callers that can deal with
1776 partial writes should call target_write. */
1777
1778 int
1779 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1780 {
1781 /* See comment in target_read_memory about why the request starts at
1782 current_target.beneath. */
1783 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1784 myaddr, memaddr, len) == len)
1785 return 0;
1786 else
1787 return TARGET_XFER_E_IO;
1788 }
1789
1790 /* Fetch the target's memory map. */
1791
1792 VEC(mem_region_s) *
1793 target_memory_map (void)
1794 {
1795 VEC(mem_region_s) *result;
1796 struct mem_region *last_one, *this_one;
1797 int ix;
1798 struct target_ops *t;
1799
1800 if (targetdebug)
1801 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1802
1803 for (t = current_target.beneath; t != NULL; t = t->beneath)
1804 if (t->to_memory_map != NULL)
1805 break;
1806
1807 if (t == NULL)
1808 return NULL;
1809
1810 result = t->to_memory_map (t);
1811 if (result == NULL)
1812 return NULL;
1813
1814 qsort (VEC_address (mem_region_s, result),
1815 VEC_length (mem_region_s, result),
1816 sizeof (struct mem_region), mem_region_cmp);
1817
1818 /* Check that regions do not overlap. Simultaneously assign
1819 a numbering for the "mem" commands to use to refer to
1820 each region. */
1821 last_one = NULL;
1822 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1823 {
1824 this_one->number = ix;
1825
1826 if (last_one && last_one->hi > this_one->lo)
1827 {
1828 warning (_("Overlapping regions in memory map: ignoring"));
1829 VEC_free (mem_region_s, result);
1830 return NULL;
1831 }
1832 last_one = this_one;
1833 }
1834
1835 return result;
1836 }
1837
1838 void
1839 target_flash_erase (ULONGEST address, LONGEST length)
1840 {
1841 struct target_ops *t;
1842
1843 for (t = current_target.beneath; t != NULL; t = t->beneath)
1844 if (t->to_flash_erase != NULL)
1845 {
1846 if (targetdebug)
1847 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1848 hex_string (address), phex (length, 0));
1849 t->to_flash_erase (t, address, length);
1850 return;
1851 }
1852
1853 tcomplain ();
1854 }
1855
1856 void
1857 target_flash_done (void)
1858 {
1859 struct target_ops *t;
1860
1861 for (t = current_target.beneath; t != NULL; t = t->beneath)
1862 if (t->to_flash_done != NULL)
1863 {
1864 if (targetdebug)
1865 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1866 t->to_flash_done (t);
1867 return;
1868 }
1869
1870 tcomplain ();
1871 }
1872
1873 static void
1874 show_trust_readonly (struct ui_file *file, int from_tty,
1875 struct cmd_list_element *c, const char *value)
1876 {
1877 fprintf_filtered (file,
1878 _("Mode for reading from readonly sections is %s.\n"),
1879 value);
1880 }
1881
1882 /* More generic transfers. */
1883
1884 static enum target_xfer_status
1885 default_xfer_partial (struct target_ops *ops, enum target_object object,
1886 const char *annex, gdb_byte *readbuf,
1887 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
1888 ULONGEST *xfered_len)
1889 {
1890 if (object == TARGET_OBJECT_MEMORY
1891 && ops->deprecated_xfer_memory != NULL)
1892 /* If available, fall back to the target's
1893 "deprecated_xfer_memory" method. */
1894 {
1895 int xfered = -1;
1896
1897 errno = 0;
1898 if (writebuf != NULL)
1899 {
1900 void *buffer = xmalloc (len);
1901 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1902
1903 memcpy (buffer, writebuf, len);
1904 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1905 1/*write*/, NULL, ops);
1906 do_cleanups (cleanup);
1907 }
1908 if (readbuf != NULL)
1909 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1910 0/*read*/, NULL, ops);
1911 if (xfered > 0)
1912 {
1913 *xfered_len = (ULONGEST) xfered;
1914 return TARGET_XFER_E_IO;
1915 }
1916 else if (xfered == 0 && errno == 0)
1917 /* "deprecated_xfer_memory" uses 0, cross checked against
1918 ERRNO as one indication of an error. */
1919 return TARGET_XFER_EOF;
1920 else
1921 return TARGET_XFER_E_IO;
1922 }
1923 else
1924 {
1925 gdb_assert (ops->beneath != NULL);
1926 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1927 readbuf, writebuf, offset, len,
1928 xfered_len);
1929 }
1930 }
1931
1932 /* Target vector read/write partial wrapper functions. */
1933
1934 static enum target_xfer_status
1935 target_read_partial (struct target_ops *ops,
1936 enum target_object object,
1937 const char *annex, gdb_byte *buf,
1938 ULONGEST offset, ULONGEST len,
1939 ULONGEST *xfered_len)
1940 {
1941 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1942 xfered_len);
1943 }
1944
1945 static enum target_xfer_status
1946 target_write_partial (struct target_ops *ops,
1947 enum target_object object,
1948 const char *annex, const gdb_byte *buf,
1949 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1950 {
1951 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1952 xfered_len);
1953 }
1954
1955 /* Wrappers to perform the full transfer. */
1956
1957 /* For docs on target_read see target.h. */
1958
1959 LONGEST
1960 target_read (struct target_ops *ops,
1961 enum target_object object,
1962 const char *annex, gdb_byte *buf,
1963 ULONGEST offset, LONGEST len)
1964 {
1965 LONGEST xfered = 0;
1966
1967 while (xfered < len)
1968 {
1969 ULONGEST xfered_len;
1970 enum target_xfer_status status;
1971
1972 status = target_read_partial (ops, object, annex,
1973 (gdb_byte *) buf + xfered,
1974 offset + xfered, len - xfered,
1975 &xfered_len);
1976
1977 /* Call an observer, notifying them of the xfer progress? */
1978 if (status == TARGET_XFER_EOF)
1979 return xfered;
1980 else if (status == TARGET_XFER_OK)
1981 {
1982 xfered += xfered_len;
1983 QUIT;
1984 }
1985 else
1986 return -1;
1987
1988 }
1989 return len;
1990 }
1991
1992 /* Assuming that the entire [begin, end) range of memory cannot be
1993 read, try to read whatever subrange is possible to read.
1994
1995 The function returns, in RESULT, either zero or one memory block.
1996 If there's a readable subrange at the beginning, it is completely
1997 read and returned. Any further readable subrange will not be read.
1998 Otherwise, if there's a readable subrange at the end, it will be
1999 completely read and returned. Any readable subranges before it
2000 (obviously, not starting at the beginning), will be ignored. In
2001 other cases -- either no readable subrange, or readable subrange(s)
2002 that is neither at the beginning, or end, nothing is returned.
2003
2004 The purpose of this function is to handle a read across a boundary
2005 of accessible memory in a case when memory map is not available.
2006 The above restrictions are fine for this case, but will give
2007 incorrect results if the memory is 'patchy'. However, supporting
2008 'patchy' memory would require trying to read every single byte,
2009 and it seems unacceptable solution. Explicit memory map is
2010 recommended for this case -- and target_read_memory_robust will
2011 take care of reading multiple ranges then. */
2012
2013 static void
2014 read_whatever_is_readable (struct target_ops *ops,
2015 ULONGEST begin, ULONGEST end,
2016 VEC(memory_read_result_s) **result)
2017 {
2018 gdb_byte *buf = xmalloc (end - begin);
2019 ULONGEST current_begin = begin;
2020 ULONGEST current_end = end;
2021 int forward;
2022 memory_read_result_s r;
2023 ULONGEST xfered_len;
2024
2025 /* If we previously failed to read 1 byte, nothing can be done here. */
2026 if (end - begin <= 1)
2027 {
2028 xfree (buf);
2029 return;
2030 }
2031
2032 /* Check that either first or the last byte is readable, and give up
2033 if not. This heuristic is meant to permit reading accessible memory
2034 at the boundary of accessible region. */
2035 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2036 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
2037 {
2038 forward = 1;
2039 ++current_begin;
2040 }
2041 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2042 buf + (end-begin) - 1, end - 1, 1,
2043 &xfered_len) == TARGET_XFER_OK)
2044 {
2045 forward = 0;
2046 --current_end;
2047 }
2048 else
2049 {
2050 xfree (buf);
2051 return;
2052 }
2053
2054 /* Loop invariant is that the [current_begin, current_end) was previously
2055 found to be not readable as a whole.
2056
2057 Note loop condition -- if the range has 1 byte, we can't divide the range
2058 so there's no point trying further. */
2059 while (current_end - current_begin > 1)
2060 {
2061 ULONGEST first_half_begin, first_half_end;
2062 ULONGEST second_half_begin, second_half_end;
2063 LONGEST xfer;
2064 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2065
2066 if (forward)
2067 {
2068 first_half_begin = current_begin;
2069 first_half_end = middle;
2070 second_half_begin = middle;
2071 second_half_end = current_end;
2072 }
2073 else
2074 {
2075 first_half_begin = middle;
2076 first_half_end = current_end;
2077 second_half_begin = current_begin;
2078 second_half_end = middle;
2079 }
2080
2081 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2082 buf + (first_half_begin - begin),
2083 first_half_begin,
2084 first_half_end - first_half_begin);
2085
2086 if (xfer == first_half_end - first_half_begin)
2087 {
2088 /* This half reads up fine. So, the error must be in the
2089 other half. */
2090 current_begin = second_half_begin;
2091 current_end = second_half_end;
2092 }
2093 else
2094 {
2095 /* This half is not readable. Because we've tried one byte, we
2096 know some part of this half if actually redable. Go to the next
2097 iteration to divide again and try to read.
2098
2099 We don't handle the other half, because this function only tries
2100 to read a single readable subrange. */
2101 current_begin = first_half_begin;
2102 current_end = first_half_end;
2103 }
2104 }
2105
2106 if (forward)
2107 {
2108 /* The [begin, current_begin) range has been read. */
2109 r.begin = begin;
2110 r.end = current_begin;
2111 r.data = buf;
2112 }
2113 else
2114 {
2115 /* The [current_end, end) range has been read. */
2116 LONGEST rlen = end - current_end;
2117
2118 r.data = xmalloc (rlen);
2119 memcpy (r.data, buf + current_end - begin, rlen);
2120 r.begin = current_end;
2121 r.end = end;
2122 xfree (buf);
2123 }
2124 VEC_safe_push(memory_read_result_s, (*result), &r);
2125 }
2126
2127 void
2128 free_memory_read_result_vector (void *x)
2129 {
2130 VEC(memory_read_result_s) *v = x;
2131 memory_read_result_s *current;
2132 int ix;
2133
2134 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2135 {
2136 xfree (current->data);
2137 }
2138 VEC_free (memory_read_result_s, v);
2139 }
2140
2141 VEC(memory_read_result_s) *
2142 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2143 {
2144 VEC(memory_read_result_s) *result = 0;
2145
2146 LONGEST xfered = 0;
2147 while (xfered < len)
2148 {
2149 struct mem_region *region = lookup_mem_region (offset + xfered);
2150 LONGEST rlen;
2151
2152 /* If there is no explicit region, a fake one should be created. */
2153 gdb_assert (region);
2154
2155 if (region->hi == 0)
2156 rlen = len - xfered;
2157 else
2158 rlen = region->hi - offset;
2159
2160 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2161 {
2162 /* Cannot read this region. Note that we can end up here only
2163 if the region is explicitly marked inaccessible, or
2164 'inaccessible-by-default' is in effect. */
2165 xfered += rlen;
2166 }
2167 else
2168 {
2169 LONGEST to_read = min (len - xfered, rlen);
2170 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2171
2172 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2173 (gdb_byte *) buffer,
2174 offset + xfered, to_read);
2175 /* Call an observer, notifying them of the xfer progress? */
2176 if (xfer <= 0)
2177 {
2178 /* Got an error reading full chunk. See if maybe we can read
2179 some subrange. */
2180 xfree (buffer);
2181 read_whatever_is_readable (ops, offset + xfered,
2182 offset + xfered + to_read, &result);
2183 xfered += to_read;
2184 }
2185 else
2186 {
2187 struct memory_read_result r;
2188 r.data = buffer;
2189 r.begin = offset + xfered;
2190 r.end = r.begin + xfer;
2191 VEC_safe_push (memory_read_result_s, result, &r);
2192 xfered += xfer;
2193 }
2194 QUIT;
2195 }
2196 }
2197 return result;
2198 }
2199
2200
2201 /* An alternative to target_write with progress callbacks. */
2202
2203 LONGEST
2204 target_write_with_progress (struct target_ops *ops,
2205 enum target_object object,
2206 const char *annex, const gdb_byte *buf,
2207 ULONGEST offset, LONGEST len,
2208 void (*progress) (ULONGEST, void *), void *baton)
2209 {
2210 LONGEST xfered = 0;
2211
2212 /* Give the progress callback a chance to set up. */
2213 if (progress)
2214 (*progress) (0, baton);
2215
2216 while (xfered < len)
2217 {
2218 ULONGEST xfered_len;
2219 enum target_xfer_status status;
2220
2221 status = target_write_partial (ops, object, annex,
2222 (gdb_byte *) buf + xfered,
2223 offset + xfered, len - xfered,
2224 &xfered_len);
2225
2226 if (status == TARGET_XFER_EOF)
2227 return xfered;
2228 if (TARGET_XFER_STATUS_ERROR_P (status))
2229 return -1;
2230
2231 gdb_assert (status == TARGET_XFER_OK);
2232 if (progress)
2233 (*progress) (xfered_len, baton);
2234
2235 xfered += xfered_len;
2236 QUIT;
2237 }
2238 return len;
2239 }
2240
2241 /* For docs on target_write see target.h. */
2242
2243 LONGEST
2244 target_write (struct target_ops *ops,
2245 enum target_object object,
2246 const char *annex, const gdb_byte *buf,
2247 ULONGEST offset, LONGEST len)
2248 {
2249 return target_write_with_progress (ops, object, annex, buf, offset, len,
2250 NULL, NULL);
2251 }
2252
2253 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2254 the size of the transferred data. PADDING additional bytes are
2255 available in *BUF_P. This is a helper function for
2256 target_read_alloc; see the declaration of that function for more
2257 information. */
2258
2259 static LONGEST
2260 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2261 const char *annex, gdb_byte **buf_p, int padding)
2262 {
2263 size_t buf_alloc, buf_pos;
2264 gdb_byte *buf;
2265
2266 /* This function does not have a length parameter; it reads the
2267 entire OBJECT). Also, it doesn't support objects fetched partly
2268 from one target and partly from another (in a different stratum,
2269 e.g. a core file and an executable). Both reasons make it
2270 unsuitable for reading memory. */
2271 gdb_assert (object != TARGET_OBJECT_MEMORY);
2272
2273 /* Start by reading up to 4K at a time. The target will throttle
2274 this number down if necessary. */
2275 buf_alloc = 4096;
2276 buf = xmalloc (buf_alloc);
2277 buf_pos = 0;
2278 while (1)
2279 {
2280 ULONGEST xfered_len;
2281 enum target_xfer_status status;
2282
2283 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2284 buf_pos, buf_alloc - buf_pos - padding,
2285 &xfered_len);
2286
2287 if (status == TARGET_XFER_EOF)
2288 {
2289 /* Read all there was. */
2290 if (buf_pos == 0)
2291 xfree (buf);
2292 else
2293 *buf_p = buf;
2294 return buf_pos;
2295 }
2296 else if (status != TARGET_XFER_OK)
2297 {
2298 /* An error occurred. */
2299 xfree (buf);
2300 return TARGET_XFER_E_IO;
2301 }
2302
2303 buf_pos += xfered_len;
2304
2305 /* If the buffer is filling up, expand it. */
2306 if (buf_alloc < buf_pos * 2)
2307 {
2308 buf_alloc *= 2;
2309 buf = xrealloc (buf, buf_alloc);
2310 }
2311
2312 QUIT;
2313 }
2314 }
2315
2316 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2317 the size of the transferred data. See the declaration in "target.h"
2318 function for more information about the return value. */
2319
2320 LONGEST
2321 target_read_alloc (struct target_ops *ops, enum target_object object,
2322 const char *annex, gdb_byte **buf_p)
2323 {
2324 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2325 }
2326
2327 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2328 returned as a string, allocated using xmalloc. If an error occurs
2329 or the transfer is unsupported, NULL is returned. Empty objects
2330 are returned as allocated but empty strings. A warning is issued
2331 if the result contains any embedded NUL bytes. */
2332
2333 char *
2334 target_read_stralloc (struct target_ops *ops, enum target_object object,
2335 const char *annex)
2336 {
2337 gdb_byte *buffer;
2338 char *bufstr;
2339 LONGEST i, transferred;
2340
2341 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2342 bufstr = (char *) buffer;
2343
2344 if (transferred < 0)
2345 return NULL;
2346
2347 if (transferred == 0)
2348 return xstrdup ("");
2349
2350 bufstr[transferred] = 0;
2351
2352 /* Check for embedded NUL bytes; but allow trailing NULs. */
2353 for (i = strlen (bufstr); i < transferred; i++)
2354 if (bufstr[i] != 0)
2355 {
2356 warning (_("target object %d, annex %s, "
2357 "contained unexpected null characters"),
2358 (int) object, annex ? annex : "(none)");
2359 break;
2360 }
2361
2362 return bufstr;
2363 }
2364
2365 /* Memory transfer methods. */
2366
2367 void
2368 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2369 LONGEST len)
2370 {
2371 /* This method is used to read from an alternate, non-current
2372 target. This read must bypass the overlay support (as symbols
2373 don't match this target), and GDB's internal cache (wrong cache
2374 for this target). */
2375 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2376 != len)
2377 memory_error (TARGET_XFER_E_IO, addr);
2378 }
2379
2380 ULONGEST
2381 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2382 int len, enum bfd_endian byte_order)
2383 {
2384 gdb_byte buf[sizeof (ULONGEST)];
2385
2386 gdb_assert (len <= sizeof (buf));
2387 get_target_memory (ops, addr, buf, len);
2388 return extract_unsigned_integer (buf, len, byte_order);
2389 }
2390
2391 /* See target.h. */
2392
2393 int
2394 target_insert_breakpoint (struct gdbarch *gdbarch,
2395 struct bp_target_info *bp_tgt)
2396 {
2397 if (!may_insert_breakpoints)
2398 {
2399 warning (_("May not insert breakpoints"));
2400 return 1;
2401 }
2402
2403 return current_target.to_insert_breakpoint (&current_target,
2404 gdbarch, bp_tgt);
2405 }
2406
2407 /* See target.h. */
2408
2409 int
2410 target_remove_breakpoint (struct gdbarch *gdbarch,
2411 struct bp_target_info *bp_tgt)
2412 {
2413 /* This is kind of a weird case to handle, but the permission might
2414 have been changed after breakpoints were inserted - in which case
2415 we should just take the user literally and assume that any
2416 breakpoints should be left in place. */
2417 if (!may_insert_breakpoints)
2418 {
2419 warning (_("May not remove breakpoints"));
2420 return 1;
2421 }
2422
2423 return current_target.to_remove_breakpoint (&current_target,
2424 gdbarch, bp_tgt);
2425 }
2426
2427 static void
2428 target_info (char *args, int from_tty)
2429 {
2430 struct target_ops *t;
2431 int has_all_mem = 0;
2432
2433 if (symfile_objfile != NULL)
2434 printf_unfiltered (_("Symbols from \"%s\".\n"),
2435 objfile_name (symfile_objfile));
2436
2437 for (t = target_stack; t != NULL; t = t->beneath)
2438 {
2439 if (!(*t->to_has_memory) (t))
2440 continue;
2441
2442 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2443 continue;
2444 if (has_all_mem)
2445 printf_unfiltered (_("\tWhile running this, "
2446 "GDB does not access memory from...\n"));
2447 printf_unfiltered ("%s:\n", t->to_longname);
2448 (t->to_files_info) (t);
2449 has_all_mem = (*t->to_has_all_memory) (t);
2450 }
2451 }
2452
2453 /* This function is called before any new inferior is created, e.g.
2454 by running a program, attaching, or connecting to a target.
2455 It cleans up any state from previous invocations which might
2456 change between runs. This is a subset of what target_preopen
2457 resets (things which might change between targets). */
2458
2459 void
2460 target_pre_inferior (int from_tty)
2461 {
2462 /* Clear out solib state. Otherwise the solib state of the previous
2463 inferior might have survived and is entirely wrong for the new
2464 target. This has been observed on GNU/Linux using glibc 2.3. How
2465 to reproduce:
2466
2467 bash$ ./foo&
2468 [1] 4711
2469 bash$ ./foo&
2470 [1] 4712
2471 bash$ gdb ./foo
2472 [...]
2473 (gdb) attach 4711
2474 (gdb) detach
2475 (gdb) attach 4712
2476 Cannot access memory at address 0xdeadbeef
2477 */
2478
2479 /* In some OSs, the shared library list is the same/global/shared
2480 across inferiors. If code is shared between processes, so are
2481 memory regions and features. */
2482 if (!gdbarch_has_global_solist (target_gdbarch ()))
2483 {
2484 no_shared_libraries (NULL, from_tty);
2485
2486 invalidate_target_mem_regions ();
2487
2488 target_clear_description ();
2489 }
2490
2491 agent_capability_invalidate ();
2492 }
2493
2494 /* Callback for iterate_over_inferiors. Gets rid of the given
2495 inferior. */
2496
2497 static int
2498 dispose_inferior (struct inferior *inf, void *args)
2499 {
2500 struct thread_info *thread;
2501
2502 thread = any_thread_of_process (inf->pid);
2503 if (thread)
2504 {
2505 switch_to_thread (thread->ptid);
2506
2507 /* Core inferiors actually should be detached, not killed. */
2508 if (target_has_execution)
2509 target_kill ();
2510 else
2511 target_detach (NULL, 0);
2512 }
2513
2514 return 0;
2515 }
2516
2517 /* This is to be called by the open routine before it does
2518 anything. */
2519
2520 void
2521 target_preopen (int from_tty)
2522 {
2523 dont_repeat ();
2524
2525 if (have_inferiors ())
2526 {
2527 if (!from_tty
2528 || !have_live_inferiors ()
2529 || query (_("A program is being debugged already. Kill it? ")))
2530 iterate_over_inferiors (dispose_inferior, NULL);
2531 else
2532 error (_("Program not killed."));
2533 }
2534
2535 /* Calling target_kill may remove the target from the stack. But if
2536 it doesn't (which seems like a win for UDI), remove it now. */
2537 /* Leave the exec target, though. The user may be switching from a
2538 live process to a core of the same program. */
2539 pop_all_targets_above (file_stratum);
2540
2541 target_pre_inferior (from_tty);
2542 }
2543
2544 /* Detach a target after doing deferred register stores. */
2545
2546 void
2547 target_detach (const char *args, int from_tty)
2548 {
2549 struct target_ops* t;
2550
2551 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2552 /* Don't remove global breakpoints here. They're removed on
2553 disconnection from the target. */
2554 ;
2555 else
2556 /* If we're in breakpoints-always-inserted mode, have to remove
2557 them before detaching. */
2558 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2559
2560 prepare_for_detach ();
2561
2562 current_target.to_detach (&current_target, args, from_tty);
2563 if (targetdebug)
2564 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2565 args, from_tty);
2566 }
2567
2568 void
2569 target_disconnect (char *args, int from_tty)
2570 {
2571 struct target_ops *t;
2572
2573 /* If we're in breakpoints-always-inserted mode or if breakpoints
2574 are global across processes, we have to remove them before
2575 disconnecting. */
2576 remove_breakpoints ();
2577
2578 for (t = current_target.beneath; t != NULL; t = t->beneath)
2579 if (t->to_disconnect != NULL)
2580 {
2581 if (targetdebug)
2582 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2583 args, from_tty);
2584 t->to_disconnect (t, args, from_tty);
2585 return;
2586 }
2587
2588 tcomplain ();
2589 }
2590
2591 ptid_t
2592 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2593 {
2594 struct target_ops *t;
2595 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2596 status, options);
2597
2598 if (targetdebug)
2599 {
2600 char *status_string;
2601 char *options_string;
2602
2603 status_string = target_waitstatus_to_string (status);
2604 options_string = target_options_to_string (options);
2605 fprintf_unfiltered (gdb_stdlog,
2606 "target_wait (%d, status, options={%s})"
2607 " = %d, %s\n",
2608 ptid_get_pid (ptid), options_string,
2609 ptid_get_pid (retval), status_string);
2610 xfree (status_string);
2611 xfree (options_string);
2612 }
2613
2614 return retval;
2615 }
2616
2617 char *
2618 target_pid_to_str (ptid_t ptid)
2619 {
2620 struct target_ops *t;
2621
2622 for (t = current_target.beneath; t != NULL; t = t->beneath)
2623 {
2624 if (t->to_pid_to_str != NULL)
2625 return (*t->to_pid_to_str) (t, ptid);
2626 }
2627
2628 return normal_pid_to_str (ptid);
2629 }
2630
2631 char *
2632 target_thread_name (struct thread_info *info)
2633 {
2634 return current_target.to_thread_name (&current_target, info);
2635 }
2636
2637 void
2638 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2639 {
2640 struct target_ops *t;
2641
2642 target_dcache_invalidate ();
2643
2644 current_target.to_resume (&current_target, ptid, step, signal);
2645 if (targetdebug)
2646 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2647 ptid_get_pid (ptid),
2648 step ? "step" : "continue",
2649 gdb_signal_to_name (signal));
2650
2651 registers_changed_ptid (ptid);
2652 set_executing (ptid, 1);
2653 set_running (ptid, 1);
2654 clear_inline_frame_state (ptid);
2655 }
2656
2657 void
2658 target_pass_signals (int numsigs, unsigned char *pass_signals)
2659 {
2660 struct target_ops *t;
2661
2662 for (t = current_target.beneath; t != NULL; t = t->beneath)
2663 {
2664 if (t->to_pass_signals != NULL)
2665 {
2666 if (targetdebug)
2667 {
2668 int i;
2669
2670 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2671 numsigs);
2672
2673 for (i = 0; i < numsigs; i++)
2674 if (pass_signals[i])
2675 fprintf_unfiltered (gdb_stdlog, " %s",
2676 gdb_signal_to_name (i));
2677
2678 fprintf_unfiltered (gdb_stdlog, " })\n");
2679 }
2680
2681 (*t->to_pass_signals) (t, numsigs, pass_signals);
2682 return;
2683 }
2684 }
2685 }
2686
2687 void
2688 target_program_signals (int numsigs, unsigned char *program_signals)
2689 {
2690 struct target_ops *t;
2691
2692 for (t = current_target.beneath; t != NULL; t = t->beneath)
2693 {
2694 if (t->to_program_signals != NULL)
2695 {
2696 if (targetdebug)
2697 {
2698 int i;
2699
2700 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2701 numsigs);
2702
2703 for (i = 0; i < numsigs; i++)
2704 if (program_signals[i])
2705 fprintf_unfiltered (gdb_stdlog, " %s",
2706 gdb_signal_to_name (i));
2707
2708 fprintf_unfiltered (gdb_stdlog, " })\n");
2709 }
2710
2711 (*t->to_program_signals) (t, numsigs, program_signals);
2712 return;
2713 }
2714 }
2715 }
2716
2717 /* Look through the list of possible targets for a target that can
2718 follow forks. */
2719
2720 int
2721 target_follow_fork (int follow_child, int detach_fork)
2722 {
2723 struct target_ops *t;
2724
2725 for (t = current_target.beneath; t != NULL; t = t->beneath)
2726 {
2727 if (t->to_follow_fork != NULL)
2728 {
2729 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2730
2731 if (targetdebug)
2732 fprintf_unfiltered (gdb_stdlog,
2733 "target_follow_fork (%d, %d) = %d\n",
2734 follow_child, detach_fork, retval);
2735 return retval;
2736 }
2737 }
2738
2739 /* Some target returned a fork event, but did not know how to follow it. */
2740 internal_error (__FILE__, __LINE__,
2741 _("could not find a target to follow fork"));
2742 }
2743
2744 void
2745 target_mourn_inferior (void)
2746 {
2747 struct target_ops *t;
2748
2749 for (t = current_target.beneath; t != NULL; t = t->beneath)
2750 {
2751 if (t->to_mourn_inferior != NULL)
2752 {
2753 t->to_mourn_inferior (t);
2754 if (targetdebug)
2755 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2756
2757 /* We no longer need to keep handles on any of the object files.
2758 Make sure to release them to avoid unnecessarily locking any
2759 of them while we're not actually debugging. */
2760 bfd_cache_close_all ();
2761
2762 return;
2763 }
2764 }
2765
2766 internal_error (__FILE__, __LINE__,
2767 _("could not find a target to follow mourn inferior"));
2768 }
2769
2770 /* Look for a target which can describe architectural features, starting
2771 from TARGET. If we find one, return its description. */
2772
2773 const struct target_desc *
2774 target_read_description (struct target_ops *target)
2775 {
2776 struct target_ops *t;
2777
2778 for (t = target; t != NULL; t = t->beneath)
2779 if (t->to_read_description != NULL)
2780 {
2781 const struct target_desc *tdesc;
2782
2783 tdesc = t->to_read_description (t);
2784 if (tdesc)
2785 return tdesc;
2786 }
2787
2788 return NULL;
2789 }
2790
2791 /* The default implementation of to_search_memory.
2792 This implements a basic search of memory, reading target memory and
2793 performing the search here (as opposed to performing the search in on the
2794 target side with, for example, gdbserver). */
2795
2796 int
2797 simple_search_memory (struct target_ops *ops,
2798 CORE_ADDR start_addr, ULONGEST search_space_len,
2799 const gdb_byte *pattern, ULONGEST pattern_len,
2800 CORE_ADDR *found_addrp)
2801 {
2802 /* NOTE: also defined in find.c testcase. */
2803 #define SEARCH_CHUNK_SIZE 16000
2804 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2805 /* Buffer to hold memory contents for searching. */
2806 gdb_byte *search_buf;
2807 unsigned search_buf_size;
2808 struct cleanup *old_cleanups;
2809
2810 search_buf_size = chunk_size + pattern_len - 1;
2811
2812 /* No point in trying to allocate a buffer larger than the search space. */
2813 if (search_space_len < search_buf_size)
2814 search_buf_size = search_space_len;
2815
2816 search_buf = malloc (search_buf_size);
2817 if (search_buf == NULL)
2818 error (_("Unable to allocate memory to perform the search."));
2819 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2820
2821 /* Prime the search buffer. */
2822
2823 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2824 search_buf, start_addr, search_buf_size) != search_buf_size)
2825 {
2826 warning (_("Unable to access %s bytes of target "
2827 "memory at %s, halting search."),
2828 pulongest (search_buf_size), hex_string (start_addr));
2829 do_cleanups (old_cleanups);
2830 return -1;
2831 }
2832
2833 /* Perform the search.
2834
2835 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2836 When we've scanned N bytes we copy the trailing bytes to the start and
2837 read in another N bytes. */
2838
2839 while (search_space_len >= pattern_len)
2840 {
2841 gdb_byte *found_ptr;
2842 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2843
2844 found_ptr = memmem (search_buf, nr_search_bytes,
2845 pattern, pattern_len);
2846
2847 if (found_ptr != NULL)
2848 {
2849 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2850
2851 *found_addrp = found_addr;
2852 do_cleanups (old_cleanups);
2853 return 1;
2854 }
2855
2856 /* Not found in this chunk, skip to next chunk. */
2857
2858 /* Don't let search_space_len wrap here, it's unsigned. */
2859 if (search_space_len >= chunk_size)
2860 search_space_len -= chunk_size;
2861 else
2862 search_space_len = 0;
2863
2864 if (search_space_len >= pattern_len)
2865 {
2866 unsigned keep_len = search_buf_size - chunk_size;
2867 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2868 int nr_to_read;
2869
2870 /* Copy the trailing part of the previous iteration to the front
2871 of the buffer for the next iteration. */
2872 gdb_assert (keep_len == pattern_len - 1);
2873 memcpy (search_buf, search_buf + chunk_size, keep_len);
2874
2875 nr_to_read = min (search_space_len - keep_len, chunk_size);
2876
2877 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2878 search_buf + keep_len, read_addr,
2879 nr_to_read) != nr_to_read)
2880 {
2881 warning (_("Unable to access %s bytes of target "
2882 "memory at %s, halting search."),
2883 plongest (nr_to_read),
2884 hex_string (read_addr));
2885 do_cleanups (old_cleanups);
2886 return -1;
2887 }
2888
2889 start_addr += chunk_size;
2890 }
2891 }
2892
2893 /* Not found. */
2894
2895 do_cleanups (old_cleanups);
2896 return 0;
2897 }
2898
2899 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2900 sequence of bytes in PATTERN with length PATTERN_LEN.
2901
2902 The result is 1 if found, 0 if not found, and -1 if there was an error
2903 requiring halting of the search (e.g. memory read error).
2904 If the pattern is found the address is recorded in FOUND_ADDRP. */
2905
2906 int
2907 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2908 const gdb_byte *pattern, ULONGEST pattern_len,
2909 CORE_ADDR *found_addrp)
2910 {
2911 struct target_ops *t;
2912 int found;
2913
2914 /* We don't use INHERIT to set current_target.to_search_memory,
2915 so we have to scan the target stack and handle targetdebug
2916 ourselves. */
2917
2918 if (targetdebug)
2919 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2920 hex_string (start_addr));
2921
2922 for (t = current_target.beneath; t != NULL; t = t->beneath)
2923 if (t->to_search_memory != NULL)
2924 break;
2925
2926 if (t != NULL)
2927 {
2928 found = t->to_search_memory (t, start_addr, search_space_len,
2929 pattern, pattern_len, found_addrp);
2930 }
2931 else
2932 {
2933 /* If a special version of to_search_memory isn't available, use the
2934 simple version. */
2935 found = simple_search_memory (current_target.beneath,
2936 start_addr, search_space_len,
2937 pattern, pattern_len, found_addrp);
2938 }
2939
2940 if (targetdebug)
2941 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2942
2943 return found;
2944 }
2945
2946 /* Look through the currently pushed targets. If none of them will
2947 be able to restart the currently running process, issue an error
2948 message. */
2949
2950 void
2951 target_require_runnable (void)
2952 {
2953 struct target_ops *t;
2954
2955 for (t = target_stack; t != NULL; t = t->beneath)
2956 {
2957 /* If this target knows how to create a new program, then
2958 assume we will still be able to after killing the current
2959 one. Either killing and mourning will not pop T, or else
2960 find_default_run_target will find it again. */
2961 if (t->to_create_inferior != NULL)
2962 return;
2963
2964 /* Do not worry about thread_stratum targets that can not
2965 create inferiors. Assume they will be pushed again if
2966 necessary, and continue to the process_stratum. */
2967 if (t->to_stratum == thread_stratum
2968 || t->to_stratum == arch_stratum)
2969 continue;
2970
2971 error (_("The \"%s\" target does not support \"run\". "
2972 "Try \"help target\" or \"continue\"."),
2973 t->to_shortname);
2974 }
2975
2976 /* This function is only called if the target is running. In that
2977 case there should have been a process_stratum target and it
2978 should either know how to create inferiors, or not... */
2979 internal_error (__FILE__, __LINE__, _("No targets found"));
2980 }
2981
2982 /* Look through the list of possible targets for a target that can
2983 execute a run or attach command without any other data. This is
2984 used to locate the default process stratum.
2985
2986 If DO_MESG is not NULL, the result is always valid (error() is
2987 called for errors); else, return NULL on error. */
2988
2989 static struct target_ops *
2990 find_default_run_target (char *do_mesg)
2991 {
2992 struct target_ops **t;
2993 struct target_ops *runable = NULL;
2994 int count;
2995
2996 count = 0;
2997
2998 for (t = target_structs; t < target_structs + target_struct_size;
2999 ++t)
3000 {
3001 if ((*t)->to_can_run && target_can_run (*t))
3002 {
3003 runable = *t;
3004 ++count;
3005 }
3006 }
3007
3008 if (count != 1)
3009 {
3010 if (do_mesg)
3011 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3012 else
3013 return NULL;
3014 }
3015
3016 return runable;
3017 }
3018
3019 void
3020 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3021 {
3022 struct target_ops *t;
3023
3024 t = find_default_run_target ("attach");
3025 (t->to_attach) (t, args, from_tty);
3026 return;
3027 }
3028
3029 void
3030 find_default_create_inferior (struct target_ops *ops,
3031 char *exec_file, char *allargs, char **env,
3032 int from_tty)
3033 {
3034 struct target_ops *t;
3035
3036 t = find_default_run_target ("run");
3037 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3038 return;
3039 }
3040
3041 static int
3042 find_default_can_async_p (struct target_ops *ignore)
3043 {
3044 struct target_ops *t;
3045
3046 /* This may be called before the target is pushed on the stack;
3047 look for the default process stratum. If there's none, gdb isn't
3048 configured with a native debugger, and target remote isn't
3049 connected yet. */
3050 t = find_default_run_target (NULL);
3051 if (t && t->to_can_async_p != delegate_can_async_p)
3052 return (t->to_can_async_p) (t);
3053 return 0;
3054 }
3055
3056 static int
3057 find_default_is_async_p (struct target_ops *ignore)
3058 {
3059 struct target_ops *t;
3060
3061 /* This may be called before the target is pushed on the stack;
3062 look for the default process stratum. If there's none, gdb isn't
3063 configured with a native debugger, and target remote isn't
3064 connected yet. */
3065 t = find_default_run_target (NULL);
3066 if (t && t->to_is_async_p != delegate_is_async_p)
3067 return (t->to_is_async_p) (t);
3068 return 0;
3069 }
3070
3071 static int
3072 find_default_supports_non_stop (struct target_ops *self)
3073 {
3074 struct target_ops *t;
3075
3076 t = find_default_run_target (NULL);
3077 if (t && t->to_supports_non_stop)
3078 return (t->to_supports_non_stop) (t);
3079 return 0;
3080 }
3081
3082 int
3083 target_supports_non_stop (void)
3084 {
3085 struct target_ops *t;
3086
3087 for (t = &current_target; t != NULL; t = t->beneath)
3088 if (t->to_supports_non_stop)
3089 return t->to_supports_non_stop (t);
3090
3091 return 0;
3092 }
3093
3094 /* Implement the "info proc" command. */
3095
3096 int
3097 target_info_proc (char *args, enum info_proc_what what)
3098 {
3099 struct target_ops *t;
3100
3101 /* If we're already connected to something that can get us OS
3102 related data, use it. Otherwise, try using the native
3103 target. */
3104 if (current_target.to_stratum >= process_stratum)
3105 t = current_target.beneath;
3106 else
3107 t = find_default_run_target (NULL);
3108
3109 for (; t != NULL; t = t->beneath)
3110 {
3111 if (t->to_info_proc != NULL)
3112 {
3113 t->to_info_proc (t, args, what);
3114
3115 if (targetdebug)
3116 fprintf_unfiltered (gdb_stdlog,
3117 "target_info_proc (\"%s\", %d)\n", args, what);
3118
3119 return 1;
3120 }
3121 }
3122
3123 return 0;
3124 }
3125
3126 static int
3127 find_default_supports_disable_randomization (struct target_ops *self)
3128 {
3129 struct target_ops *t;
3130
3131 t = find_default_run_target (NULL);
3132 if (t && t->to_supports_disable_randomization)
3133 return (t->to_supports_disable_randomization) (t);
3134 return 0;
3135 }
3136
3137 int
3138 target_supports_disable_randomization (void)
3139 {
3140 struct target_ops *t;
3141
3142 for (t = &current_target; t != NULL; t = t->beneath)
3143 if (t->to_supports_disable_randomization)
3144 return t->to_supports_disable_randomization (t);
3145
3146 return 0;
3147 }
3148
3149 char *
3150 target_get_osdata (const char *type)
3151 {
3152 struct target_ops *t;
3153
3154 /* If we're already connected to something that can get us OS
3155 related data, use it. Otherwise, try using the native
3156 target. */
3157 if (current_target.to_stratum >= process_stratum)
3158 t = current_target.beneath;
3159 else
3160 t = find_default_run_target ("get OS data");
3161
3162 if (!t)
3163 return NULL;
3164
3165 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3166 }
3167
3168 /* Determine the current address space of thread PTID. */
3169
3170 struct address_space *
3171 target_thread_address_space (ptid_t ptid)
3172 {
3173 struct address_space *aspace;
3174 struct inferior *inf;
3175 struct target_ops *t;
3176
3177 for (t = current_target.beneath; t != NULL; t = t->beneath)
3178 {
3179 if (t->to_thread_address_space != NULL)
3180 {
3181 aspace = t->to_thread_address_space (t, ptid);
3182 gdb_assert (aspace);
3183
3184 if (targetdebug)
3185 fprintf_unfiltered (gdb_stdlog,
3186 "target_thread_address_space (%s) = %d\n",
3187 target_pid_to_str (ptid),
3188 address_space_num (aspace));
3189 return aspace;
3190 }
3191 }
3192
3193 /* Fall-back to the "main" address space of the inferior. */
3194 inf = find_inferior_pid (ptid_get_pid (ptid));
3195
3196 if (inf == NULL || inf->aspace == NULL)
3197 internal_error (__FILE__, __LINE__,
3198 _("Can't determine the current "
3199 "address space of thread %s\n"),
3200 target_pid_to_str (ptid));
3201
3202 return inf->aspace;
3203 }
3204
3205
3206 /* Target file operations. */
3207
3208 static struct target_ops *
3209 default_fileio_target (void)
3210 {
3211 /* If we're already connected to something that can perform
3212 file I/O, use it. Otherwise, try using the native target. */
3213 if (current_target.to_stratum >= process_stratum)
3214 return current_target.beneath;
3215 else
3216 return find_default_run_target ("file I/O");
3217 }
3218
3219 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3220 target file descriptor, or -1 if an error occurs (and set
3221 *TARGET_ERRNO). */
3222 int
3223 target_fileio_open (const char *filename, int flags, int mode,
3224 int *target_errno)
3225 {
3226 struct target_ops *t;
3227
3228 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3229 {
3230 if (t->to_fileio_open != NULL)
3231 {
3232 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
3233
3234 if (targetdebug)
3235 fprintf_unfiltered (gdb_stdlog,
3236 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3237 filename, flags, mode,
3238 fd, fd != -1 ? 0 : *target_errno);
3239 return fd;
3240 }
3241 }
3242
3243 *target_errno = FILEIO_ENOSYS;
3244 return -1;
3245 }
3246
3247 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3248 Return the number of bytes written, or -1 if an error occurs
3249 (and set *TARGET_ERRNO). */
3250 int
3251 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3252 ULONGEST offset, int *target_errno)
3253 {
3254 struct target_ops *t;
3255
3256 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3257 {
3258 if (t->to_fileio_pwrite != NULL)
3259 {
3260 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
3261 target_errno);
3262
3263 if (targetdebug)
3264 fprintf_unfiltered (gdb_stdlog,
3265 "target_fileio_pwrite (%d,...,%d,%s) "
3266 "= %d (%d)\n",
3267 fd, len, pulongest (offset),
3268 ret, ret != -1 ? 0 : *target_errno);
3269 return ret;
3270 }
3271 }
3272
3273 *target_errno = FILEIO_ENOSYS;
3274 return -1;
3275 }
3276
3277 /* Read up to LEN bytes FD on the target into READ_BUF.
3278 Return the number of bytes read, or -1 if an error occurs
3279 (and set *TARGET_ERRNO). */
3280 int
3281 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3282 ULONGEST offset, int *target_errno)
3283 {
3284 struct target_ops *t;
3285
3286 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3287 {
3288 if (t->to_fileio_pread != NULL)
3289 {
3290 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3291 target_errno);
3292
3293 if (targetdebug)
3294 fprintf_unfiltered (gdb_stdlog,
3295 "target_fileio_pread (%d,...,%d,%s) "
3296 "= %d (%d)\n",
3297 fd, len, pulongest (offset),
3298 ret, ret != -1 ? 0 : *target_errno);
3299 return ret;
3300 }
3301 }
3302
3303 *target_errno = FILEIO_ENOSYS;
3304 return -1;
3305 }
3306
3307 /* Close FD on the target. Return 0, or -1 if an error occurs
3308 (and set *TARGET_ERRNO). */
3309 int
3310 target_fileio_close (int fd, int *target_errno)
3311 {
3312 struct target_ops *t;
3313
3314 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3315 {
3316 if (t->to_fileio_close != NULL)
3317 {
3318 int ret = t->to_fileio_close (t, fd, target_errno);
3319
3320 if (targetdebug)
3321 fprintf_unfiltered (gdb_stdlog,
3322 "target_fileio_close (%d) = %d (%d)\n",
3323 fd, ret, ret != -1 ? 0 : *target_errno);
3324 return ret;
3325 }
3326 }
3327
3328 *target_errno = FILEIO_ENOSYS;
3329 return -1;
3330 }
3331
3332 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3333 occurs (and set *TARGET_ERRNO). */
3334 int
3335 target_fileio_unlink (const char *filename, int *target_errno)
3336 {
3337 struct target_ops *t;
3338
3339 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3340 {
3341 if (t->to_fileio_unlink != NULL)
3342 {
3343 int ret = t->to_fileio_unlink (t, filename, target_errno);
3344
3345 if (targetdebug)
3346 fprintf_unfiltered (gdb_stdlog,
3347 "target_fileio_unlink (%s) = %d (%d)\n",
3348 filename, ret, ret != -1 ? 0 : *target_errno);
3349 return ret;
3350 }
3351 }
3352
3353 *target_errno = FILEIO_ENOSYS;
3354 return -1;
3355 }
3356
3357 /* Read value of symbolic link FILENAME on the target. Return a
3358 null-terminated string allocated via xmalloc, or NULL if an error
3359 occurs (and set *TARGET_ERRNO). */
3360 char *
3361 target_fileio_readlink (const char *filename, int *target_errno)
3362 {
3363 struct target_ops *t;
3364
3365 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3366 {
3367 if (t->to_fileio_readlink != NULL)
3368 {
3369 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3370
3371 if (targetdebug)
3372 fprintf_unfiltered (gdb_stdlog,
3373 "target_fileio_readlink (%s) = %s (%d)\n",
3374 filename, ret? ret : "(nil)",
3375 ret? 0 : *target_errno);
3376 return ret;
3377 }
3378 }
3379
3380 *target_errno = FILEIO_ENOSYS;
3381 return NULL;
3382 }
3383
3384 static void
3385 target_fileio_close_cleanup (void *opaque)
3386 {
3387 int fd = *(int *) opaque;
3388 int target_errno;
3389
3390 target_fileio_close (fd, &target_errno);
3391 }
3392
3393 /* Read target file FILENAME. Store the result in *BUF_P and
3394 return the size of the transferred data. PADDING additional bytes are
3395 available in *BUF_P. This is a helper function for
3396 target_fileio_read_alloc; see the declaration of that function for more
3397 information. */
3398
3399 static LONGEST
3400 target_fileio_read_alloc_1 (const char *filename,
3401 gdb_byte **buf_p, int padding)
3402 {
3403 struct cleanup *close_cleanup;
3404 size_t buf_alloc, buf_pos;
3405 gdb_byte *buf;
3406 LONGEST n;
3407 int fd;
3408 int target_errno;
3409
3410 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3411 if (fd == -1)
3412 return -1;
3413
3414 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3415
3416 /* Start by reading up to 4K at a time. The target will throttle
3417 this number down if necessary. */
3418 buf_alloc = 4096;
3419 buf = xmalloc (buf_alloc);
3420 buf_pos = 0;
3421 while (1)
3422 {
3423 n = target_fileio_pread (fd, &buf[buf_pos],
3424 buf_alloc - buf_pos - padding, buf_pos,
3425 &target_errno);
3426 if (n < 0)
3427 {
3428 /* An error occurred. */
3429 do_cleanups (close_cleanup);
3430 xfree (buf);
3431 return -1;
3432 }
3433 else if (n == 0)
3434 {
3435 /* Read all there was. */
3436 do_cleanups (close_cleanup);
3437 if (buf_pos == 0)
3438 xfree (buf);
3439 else
3440 *buf_p = buf;
3441 return buf_pos;
3442 }
3443
3444 buf_pos += n;
3445
3446 /* If the buffer is filling up, expand it. */
3447 if (buf_alloc < buf_pos * 2)
3448 {
3449 buf_alloc *= 2;
3450 buf = xrealloc (buf, buf_alloc);
3451 }
3452
3453 QUIT;
3454 }
3455 }
3456
3457 /* Read target file FILENAME. Store the result in *BUF_P and return
3458 the size of the transferred data. See the declaration in "target.h"
3459 function for more information about the return value. */
3460
3461 LONGEST
3462 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3463 {
3464 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3465 }
3466
3467 /* Read target file FILENAME. The result is NUL-terminated and
3468 returned as a string, allocated using xmalloc. If an error occurs
3469 or the transfer is unsupported, NULL is returned. Empty objects
3470 are returned as allocated but empty strings. A warning is issued
3471 if the result contains any embedded NUL bytes. */
3472
3473 char *
3474 target_fileio_read_stralloc (const char *filename)
3475 {
3476 gdb_byte *buffer;
3477 char *bufstr;
3478 LONGEST i, transferred;
3479
3480 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3481 bufstr = (char *) buffer;
3482
3483 if (transferred < 0)
3484 return NULL;
3485
3486 if (transferred == 0)
3487 return xstrdup ("");
3488
3489 bufstr[transferred] = 0;
3490
3491 /* Check for embedded NUL bytes; but allow trailing NULs. */
3492 for (i = strlen (bufstr); i < transferred; i++)
3493 if (bufstr[i] != 0)
3494 {
3495 warning (_("target file %s "
3496 "contained unexpected null characters"),
3497 filename);
3498 break;
3499 }
3500
3501 return bufstr;
3502 }
3503
3504
3505 static int
3506 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3507 CORE_ADDR addr, int len)
3508 {
3509 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3510 }
3511
3512 static int
3513 default_watchpoint_addr_within_range (struct target_ops *target,
3514 CORE_ADDR addr,
3515 CORE_ADDR start, int length)
3516 {
3517 return addr >= start && addr < start + length;
3518 }
3519
3520 static struct gdbarch *
3521 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3522 {
3523 return target_gdbarch ();
3524 }
3525
3526 static int
3527 return_zero (void)
3528 {
3529 return 0;
3530 }
3531
3532 static int
3533 return_minus_one (void)
3534 {
3535 return -1;
3536 }
3537
3538 static void *
3539 return_null (void)
3540 {
3541 return 0;
3542 }
3543
3544 /*
3545 * Find the next target down the stack from the specified target.
3546 */
3547
3548 struct target_ops *
3549 find_target_beneath (struct target_ops *t)
3550 {
3551 return t->beneath;
3552 }
3553
3554 /* See target.h. */
3555
3556 struct target_ops *
3557 find_target_at (enum strata stratum)
3558 {
3559 struct target_ops *t;
3560
3561 for (t = current_target.beneath; t != NULL; t = t->beneath)
3562 if (t->to_stratum == stratum)
3563 return t;
3564
3565 return NULL;
3566 }
3567
3568 \f
3569 /* The inferior process has died. Long live the inferior! */
3570
3571 void
3572 generic_mourn_inferior (void)
3573 {
3574 ptid_t ptid;
3575
3576 ptid = inferior_ptid;
3577 inferior_ptid = null_ptid;
3578
3579 /* Mark breakpoints uninserted in case something tries to delete a
3580 breakpoint while we delete the inferior's threads (which would
3581 fail, since the inferior is long gone). */
3582 mark_breakpoints_out ();
3583
3584 if (!ptid_equal (ptid, null_ptid))
3585 {
3586 int pid = ptid_get_pid (ptid);
3587 exit_inferior (pid);
3588 }
3589
3590 /* Note this wipes step-resume breakpoints, so needs to be done
3591 after exit_inferior, which ends up referencing the step-resume
3592 breakpoints through clear_thread_inferior_resources. */
3593 breakpoint_init_inferior (inf_exited);
3594
3595 registers_changed ();
3596
3597 reopen_exec_file ();
3598 reinit_frame_cache ();
3599
3600 if (deprecated_detach_hook)
3601 deprecated_detach_hook ();
3602 }
3603 \f
3604 /* Convert a normal process ID to a string. Returns the string in a
3605 static buffer. */
3606
3607 char *
3608 normal_pid_to_str (ptid_t ptid)
3609 {
3610 static char buf[32];
3611
3612 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3613 return buf;
3614 }
3615
3616 static char *
3617 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3618 {
3619 return normal_pid_to_str (ptid);
3620 }
3621
3622 /* Error-catcher for target_find_memory_regions. */
3623 static int
3624 dummy_find_memory_regions (struct target_ops *self,
3625 find_memory_region_ftype ignore1, void *ignore2)
3626 {
3627 error (_("Command not implemented for this target."));
3628 return 0;
3629 }
3630
3631 /* Error-catcher for target_make_corefile_notes. */
3632 static char *
3633 dummy_make_corefile_notes (struct target_ops *self,
3634 bfd *ignore1, int *ignore2)
3635 {
3636 error (_("Command not implemented for this target."));
3637 return NULL;
3638 }
3639
3640 /* Set up the handful of non-empty slots needed by the dummy target
3641 vector. */
3642
3643 static void
3644 init_dummy_target (void)
3645 {
3646 dummy_target.to_shortname = "None";
3647 dummy_target.to_longname = "None";
3648 dummy_target.to_doc = "";
3649 dummy_target.to_create_inferior = find_default_create_inferior;
3650 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3651 dummy_target.to_supports_disable_randomization
3652 = find_default_supports_disable_randomization;
3653 dummy_target.to_pid_to_str = dummy_pid_to_str;
3654 dummy_target.to_stratum = dummy_stratum;
3655 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3656 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3657 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3658 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3659 dummy_target.to_has_execution
3660 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3661 dummy_target.to_magic = OPS_MAGIC;
3662
3663 install_dummy_methods (&dummy_target);
3664 }
3665 \f
3666 static void
3667 debug_to_open (char *args, int from_tty)
3668 {
3669 debug_target.to_open (args, from_tty);
3670
3671 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3672 }
3673
3674 void
3675 target_close (struct target_ops *targ)
3676 {
3677 gdb_assert (!target_is_pushed (targ));
3678
3679 if (targ->to_xclose != NULL)
3680 targ->to_xclose (targ);
3681 else if (targ->to_close != NULL)
3682 targ->to_close (targ);
3683
3684 if (targetdebug)
3685 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3686 }
3687
3688 void
3689 target_attach (char *args, int from_tty)
3690 {
3691 current_target.to_attach (&current_target, args, from_tty);
3692 if (targetdebug)
3693 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3694 args, from_tty);
3695 }
3696
3697 int
3698 target_thread_alive (ptid_t ptid)
3699 {
3700 struct target_ops *t;
3701
3702 for (t = current_target.beneath; t != NULL; t = t->beneath)
3703 {
3704 if (t->to_thread_alive != NULL)
3705 {
3706 int retval;
3707
3708 retval = t->to_thread_alive (t, ptid);
3709 if (targetdebug)
3710 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3711 ptid_get_pid (ptid), retval);
3712
3713 return retval;
3714 }
3715 }
3716
3717 return 0;
3718 }
3719
3720 void
3721 target_find_new_threads (void)
3722 {
3723 struct target_ops *t;
3724
3725 for (t = current_target.beneath; t != NULL; t = t->beneath)
3726 {
3727 if (t->to_find_new_threads != NULL)
3728 {
3729 t->to_find_new_threads (t);
3730 if (targetdebug)
3731 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3732
3733 return;
3734 }
3735 }
3736 }
3737
3738 void
3739 target_stop (ptid_t ptid)
3740 {
3741 if (!may_stop)
3742 {
3743 warning (_("May not interrupt or stop the target, ignoring attempt"));
3744 return;
3745 }
3746
3747 (*current_target.to_stop) (&current_target, ptid);
3748 }
3749
3750 static void
3751 debug_to_post_attach (struct target_ops *self, int pid)
3752 {
3753 debug_target.to_post_attach (&debug_target, pid);
3754
3755 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3756 }
3757
3758 /* Concatenate ELEM to LIST, a comma separate list, and return the
3759 result. The LIST incoming argument is released. */
3760
3761 static char *
3762 str_comma_list_concat_elem (char *list, const char *elem)
3763 {
3764 if (list == NULL)
3765 return xstrdup (elem);
3766 else
3767 return reconcat (list, list, ", ", elem, (char *) NULL);
3768 }
3769
3770 /* Helper for target_options_to_string. If OPT is present in
3771 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3772 Returns the new resulting string. OPT is removed from
3773 TARGET_OPTIONS. */
3774
3775 static char *
3776 do_option (int *target_options, char *ret,
3777 int opt, char *opt_str)
3778 {
3779 if ((*target_options & opt) != 0)
3780 {
3781 ret = str_comma_list_concat_elem (ret, opt_str);
3782 *target_options &= ~opt;
3783 }
3784
3785 return ret;
3786 }
3787
3788 char *
3789 target_options_to_string (int target_options)
3790 {
3791 char *ret = NULL;
3792
3793 #define DO_TARG_OPTION(OPT) \
3794 ret = do_option (&target_options, ret, OPT, #OPT)
3795
3796 DO_TARG_OPTION (TARGET_WNOHANG);
3797
3798 if (target_options != 0)
3799 ret = str_comma_list_concat_elem (ret, "unknown???");
3800
3801 if (ret == NULL)
3802 ret = xstrdup ("");
3803 return ret;
3804 }
3805
3806 static void
3807 debug_print_register (const char * func,
3808 struct regcache *regcache, int regno)
3809 {
3810 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3811
3812 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3813 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3814 && gdbarch_register_name (gdbarch, regno) != NULL
3815 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3816 fprintf_unfiltered (gdb_stdlog, "(%s)",
3817 gdbarch_register_name (gdbarch, regno));
3818 else
3819 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3820 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3821 {
3822 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3823 int i, size = register_size (gdbarch, regno);
3824 gdb_byte buf[MAX_REGISTER_SIZE];
3825
3826 regcache_raw_collect (regcache, regno, buf);
3827 fprintf_unfiltered (gdb_stdlog, " = ");
3828 for (i = 0; i < size; i++)
3829 {
3830 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3831 }
3832 if (size <= sizeof (LONGEST))
3833 {
3834 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3835
3836 fprintf_unfiltered (gdb_stdlog, " %s %s",
3837 core_addr_to_string_nz (val), plongest (val));
3838 }
3839 }
3840 fprintf_unfiltered (gdb_stdlog, "\n");
3841 }
3842
3843 void
3844 target_fetch_registers (struct regcache *regcache, int regno)
3845 {
3846 struct target_ops *t;
3847
3848 for (t = current_target.beneath; t != NULL; t = t->beneath)
3849 {
3850 if (t->to_fetch_registers != NULL)
3851 {
3852 t->to_fetch_registers (t, regcache, regno);
3853 if (targetdebug)
3854 debug_print_register ("target_fetch_registers", regcache, regno);
3855 return;
3856 }
3857 }
3858 }
3859
3860 void
3861 target_store_registers (struct regcache *regcache, int regno)
3862 {
3863 struct target_ops *t;
3864
3865 if (!may_write_registers)
3866 error (_("Writing to registers is not allowed (regno %d)"), regno);
3867
3868 current_target.to_store_registers (&current_target, regcache, regno);
3869 if (targetdebug)
3870 {
3871 debug_print_register ("target_store_registers", regcache, regno);
3872 }
3873 }
3874
3875 int
3876 target_core_of_thread (ptid_t ptid)
3877 {
3878 struct target_ops *t;
3879
3880 for (t = current_target.beneath; t != NULL; t = t->beneath)
3881 {
3882 if (t->to_core_of_thread != NULL)
3883 {
3884 int retval = t->to_core_of_thread (t, ptid);
3885
3886 if (targetdebug)
3887 fprintf_unfiltered (gdb_stdlog,
3888 "target_core_of_thread (%d) = %d\n",
3889 ptid_get_pid (ptid), retval);
3890 return retval;
3891 }
3892 }
3893
3894 return -1;
3895 }
3896
3897 int
3898 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3899 {
3900 struct target_ops *t;
3901
3902 for (t = current_target.beneath; t != NULL; t = t->beneath)
3903 {
3904 if (t->to_verify_memory != NULL)
3905 {
3906 int retval = t->to_verify_memory (t, data, memaddr, size);
3907
3908 if (targetdebug)
3909 fprintf_unfiltered (gdb_stdlog,
3910 "target_verify_memory (%s, %s) = %d\n",
3911 paddress (target_gdbarch (), memaddr),
3912 pulongest (size),
3913 retval);
3914 return retval;
3915 }
3916 }
3917
3918 tcomplain ();
3919 }
3920
3921 /* The documentation for this function is in its prototype declaration in
3922 target.h. */
3923
3924 int
3925 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3926 {
3927 struct target_ops *t;
3928
3929 for (t = current_target.beneath; t != NULL; t = t->beneath)
3930 if (t->to_insert_mask_watchpoint != NULL)
3931 {
3932 int ret;
3933
3934 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
3935
3936 if (targetdebug)
3937 fprintf_unfiltered (gdb_stdlog, "\
3938 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3939 core_addr_to_string (addr),
3940 core_addr_to_string (mask), rw, ret);
3941
3942 return ret;
3943 }
3944
3945 return 1;
3946 }
3947
3948 /* The documentation for this function is in its prototype declaration in
3949 target.h. */
3950
3951 int
3952 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3953 {
3954 struct target_ops *t;
3955
3956 for (t = current_target.beneath; t != NULL; t = t->beneath)
3957 if (t->to_remove_mask_watchpoint != NULL)
3958 {
3959 int ret;
3960
3961 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
3962
3963 if (targetdebug)
3964 fprintf_unfiltered (gdb_stdlog, "\
3965 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3966 core_addr_to_string (addr),
3967 core_addr_to_string (mask), rw, ret);
3968
3969 return ret;
3970 }
3971
3972 return 1;
3973 }
3974
3975 /* The documentation for this function is in its prototype declaration
3976 in target.h. */
3977
3978 int
3979 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3980 {
3981 struct target_ops *t;
3982
3983 for (t = current_target.beneath; t != NULL; t = t->beneath)
3984 if (t->to_masked_watch_num_registers != NULL)
3985 return t->to_masked_watch_num_registers (t, addr, mask);
3986
3987 return -1;
3988 }
3989
3990 /* The documentation for this function is in its prototype declaration
3991 in target.h. */
3992
3993 int
3994 target_ranged_break_num_registers (void)
3995 {
3996 struct target_ops *t;
3997
3998 for (t = current_target.beneath; t != NULL; t = t->beneath)
3999 if (t->to_ranged_break_num_registers != NULL)
4000 return t->to_ranged_break_num_registers (t);
4001
4002 return -1;
4003 }
4004
4005 /* See target.h. */
4006
4007 struct btrace_target_info *
4008 target_enable_btrace (ptid_t ptid)
4009 {
4010 struct target_ops *t;
4011
4012 for (t = current_target.beneath; t != NULL; t = t->beneath)
4013 if (t->to_enable_btrace != NULL)
4014 return t->to_enable_btrace (t, ptid);
4015
4016 tcomplain ();
4017 return NULL;
4018 }
4019
4020 /* See target.h. */
4021
4022 void
4023 target_disable_btrace (struct btrace_target_info *btinfo)
4024 {
4025 struct target_ops *t;
4026
4027 for (t = current_target.beneath; t != NULL; t = t->beneath)
4028 if (t->to_disable_btrace != NULL)
4029 {
4030 t->to_disable_btrace (t, btinfo);
4031 return;
4032 }
4033
4034 tcomplain ();
4035 }
4036
4037 /* See target.h. */
4038
4039 void
4040 target_teardown_btrace (struct btrace_target_info *btinfo)
4041 {
4042 struct target_ops *t;
4043
4044 for (t = current_target.beneath; t != NULL; t = t->beneath)
4045 if (t->to_teardown_btrace != NULL)
4046 {
4047 t->to_teardown_btrace (t, btinfo);
4048 return;
4049 }
4050
4051 tcomplain ();
4052 }
4053
4054 /* See target.h. */
4055
4056 enum btrace_error
4057 target_read_btrace (VEC (btrace_block_s) **btrace,
4058 struct btrace_target_info *btinfo,
4059 enum btrace_read_type type)
4060 {
4061 struct target_ops *t;
4062
4063 for (t = current_target.beneath; t != NULL; t = t->beneath)
4064 if (t->to_read_btrace != NULL)
4065 return t->to_read_btrace (t, btrace, btinfo, type);
4066
4067 tcomplain ();
4068 return BTRACE_ERR_NOT_SUPPORTED;
4069 }
4070
4071 /* See target.h. */
4072
4073 void
4074 target_stop_recording (void)
4075 {
4076 struct target_ops *t;
4077
4078 for (t = current_target.beneath; t != NULL; t = t->beneath)
4079 if (t->to_stop_recording != NULL)
4080 {
4081 t->to_stop_recording (t);
4082 return;
4083 }
4084
4085 /* This is optional. */
4086 }
4087
4088 /* See target.h. */
4089
4090 void
4091 target_info_record (void)
4092 {
4093 struct target_ops *t;
4094
4095 for (t = current_target.beneath; t != NULL; t = t->beneath)
4096 if (t->to_info_record != NULL)
4097 {
4098 t->to_info_record (t);
4099 return;
4100 }
4101
4102 tcomplain ();
4103 }
4104
4105 /* See target.h. */
4106
4107 void
4108 target_save_record (const char *filename)
4109 {
4110 struct target_ops *t;
4111
4112 for (t = current_target.beneath; t != NULL; t = t->beneath)
4113 if (t->to_save_record != NULL)
4114 {
4115 t->to_save_record (t, filename);
4116 return;
4117 }
4118
4119 tcomplain ();
4120 }
4121
4122 /* See target.h. */
4123
4124 int
4125 target_supports_delete_record (void)
4126 {
4127 struct target_ops *t;
4128
4129 for (t = current_target.beneath; t != NULL; t = t->beneath)
4130 if (t->to_delete_record != NULL)
4131 return 1;
4132
4133 return 0;
4134 }
4135
4136 /* See target.h. */
4137
4138 void
4139 target_delete_record (void)
4140 {
4141 struct target_ops *t;
4142
4143 for (t = current_target.beneath; t != NULL; t = t->beneath)
4144 if (t->to_delete_record != NULL)
4145 {
4146 t->to_delete_record (t);
4147 return;
4148 }
4149
4150 tcomplain ();
4151 }
4152
4153 /* See target.h. */
4154
4155 int
4156 target_record_is_replaying (void)
4157 {
4158 struct target_ops *t;
4159
4160 for (t = current_target.beneath; t != NULL; t = t->beneath)
4161 if (t->to_record_is_replaying != NULL)
4162 return t->to_record_is_replaying (t);
4163
4164 return 0;
4165 }
4166
4167 /* See target.h. */
4168
4169 void
4170 target_goto_record_begin (void)
4171 {
4172 struct target_ops *t;
4173
4174 for (t = current_target.beneath; t != NULL; t = t->beneath)
4175 if (t->to_goto_record_begin != NULL)
4176 {
4177 t->to_goto_record_begin (t);
4178 return;
4179 }
4180
4181 tcomplain ();
4182 }
4183
4184 /* See target.h. */
4185
4186 void
4187 target_goto_record_end (void)
4188 {
4189 struct target_ops *t;
4190
4191 for (t = current_target.beneath; t != NULL; t = t->beneath)
4192 if (t->to_goto_record_end != NULL)
4193 {
4194 t->to_goto_record_end (t);
4195 return;
4196 }
4197
4198 tcomplain ();
4199 }
4200
4201 /* See target.h. */
4202
4203 void
4204 target_goto_record (ULONGEST insn)
4205 {
4206 struct target_ops *t;
4207
4208 for (t = current_target.beneath; t != NULL; t = t->beneath)
4209 if (t->to_goto_record != NULL)
4210 {
4211 t->to_goto_record (t, insn);
4212 return;
4213 }
4214
4215 tcomplain ();
4216 }
4217
4218 /* See target.h. */
4219
4220 void
4221 target_insn_history (int size, int flags)
4222 {
4223 struct target_ops *t;
4224
4225 for (t = current_target.beneath; t != NULL; t = t->beneath)
4226 if (t->to_insn_history != NULL)
4227 {
4228 t->to_insn_history (t, size, flags);
4229 return;
4230 }
4231
4232 tcomplain ();
4233 }
4234
4235 /* See target.h. */
4236
4237 void
4238 target_insn_history_from (ULONGEST from, int size, int flags)
4239 {
4240 struct target_ops *t;
4241
4242 for (t = current_target.beneath; t != NULL; t = t->beneath)
4243 if (t->to_insn_history_from != NULL)
4244 {
4245 t->to_insn_history_from (t, from, size, flags);
4246 return;
4247 }
4248
4249 tcomplain ();
4250 }
4251
4252 /* See target.h. */
4253
4254 void
4255 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4256 {
4257 struct target_ops *t;
4258
4259 for (t = current_target.beneath; t != NULL; t = t->beneath)
4260 if (t->to_insn_history_range != NULL)
4261 {
4262 t->to_insn_history_range (t, begin, end, flags);
4263 return;
4264 }
4265
4266 tcomplain ();
4267 }
4268
4269 /* See target.h. */
4270
4271 void
4272 target_call_history (int size, int flags)
4273 {
4274 struct target_ops *t;
4275
4276 for (t = current_target.beneath; t != NULL; t = t->beneath)
4277 if (t->to_call_history != NULL)
4278 {
4279 t->to_call_history (t, size, flags);
4280 return;
4281 }
4282
4283 tcomplain ();
4284 }
4285
4286 /* See target.h. */
4287
4288 void
4289 target_call_history_from (ULONGEST begin, int size, int flags)
4290 {
4291 struct target_ops *t;
4292
4293 for (t = current_target.beneath; t != NULL; t = t->beneath)
4294 if (t->to_call_history_from != NULL)
4295 {
4296 t->to_call_history_from (t, begin, size, flags);
4297 return;
4298 }
4299
4300 tcomplain ();
4301 }
4302
4303 /* See target.h. */
4304
4305 void
4306 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4307 {
4308 struct target_ops *t;
4309
4310 for (t = current_target.beneath; t != NULL; t = t->beneath)
4311 if (t->to_call_history_range != NULL)
4312 {
4313 t->to_call_history_range (t, begin, end, flags);
4314 return;
4315 }
4316
4317 tcomplain ();
4318 }
4319
4320 static void
4321 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4322 {
4323 debug_target.to_prepare_to_store (&debug_target, regcache);
4324
4325 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4326 }
4327
4328 /* See target.h. */
4329
4330 const struct frame_unwind *
4331 target_get_unwinder (void)
4332 {
4333 struct target_ops *t;
4334
4335 for (t = current_target.beneath; t != NULL; t = t->beneath)
4336 if (t->to_get_unwinder != NULL)
4337 return t->to_get_unwinder;
4338
4339 return NULL;
4340 }
4341
4342 /* See target.h. */
4343
4344 const struct frame_unwind *
4345 target_get_tailcall_unwinder (void)
4346 {
4347 struct target_ops *t;
4348
4349 for (t = current_target.beneath; t != NULL; t = t->beneath)
4350 if (t->to_get_tailcall_unwinder != NULL)
4351 return t->to_get_tailcall_unwinder;
4352
4353 return NULL;
4354 }
4355
4356 /* See target.h. */
4357
4358 CORE_ADDR
4359 forward_target_decr_pc_after_break (struct target_ops *ops,
4360 struct gdbarch *gdbarch)
4361 {
4362 for (; ops != NULL; ops = ops->beneath)
4363 if (ops->to_decr_pc_after_break != NULL)
4364 return ops->to_decr_pc_after_break (ops, gdbarch);
4365
4366 return gdbarch_decr_pc_after_break (gdbarch);
4367 }
4368
4369 /* See target.h. */
4370
4371 CORE_ADDR
4372 target_decr_pc_after_break (struct gdbarch *gdbarch)
4373 {
4374 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4375 }
4376
4377 static int
4378 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4379 int write, struct mem_attrib *attrib,
4380 struct target_ops *target)
4381 {
4382 int retval;
4383
4384 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4385 attrib, target);
4386
4387 fprintf_unfiltered (gdb_stdlog,
4388 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4389 paddress (target_gdbarch (), memaddr), len,
4390 write ? "write" : "read", retval);
4391
4392 if (retval > 0)
4393 {
4394 int i;
4395
4396 fputs_unfiltered (", bytes =", gdb_stdlog);
4397 for (i = 0; i < retval; i++)
4398 {
4399 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4400 {
4401 if (targetdebug < 2 && i > 0)
4402 {
4403 fprintf_unfiltered (gdb_stdlog, " ...");
4404 break;
4405 }
4406 fprintf_unfiltered (gdb_stdlog, "\n");
4407 }
4408
4409 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4410 }
4411 }
4412
4413 fputc_unfiltered ('\n', gdb_stdlog);
4414
4415 return retval;
4416 }
4417
4418 static void
4419 debug_to_files_info (struct target_ops *target)
4420 {
4421 debug_target.to_files_info (target);
4422
4423 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4424 }
4425
4426 static int
4427 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4428 struct bp_target_info *bp_tgt)
4429 {
4430 int retval;
4431
4432 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4433
4434 fprintf_unfiltered (gdb_stdlog,
4435 "target_insert_breakpoint (%s, xxx) = %ld\n",
4436 core_addr_to_string (bp_tgt->placed_address),
4437 (unsigned long) retval);
4438 return retval;
4439 }
4440
4441 static int
4442 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4443 struct bp_target_info *bp_tgt)
4444 {
4445 int retval;
4446
4447 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4448
4449 fprintf_unfiltered (gdb_stdlog,
4450 "target_remove_breakpoint (%s, xxx) = %ld\n",
4451 core_addr_to_string (bp_tgt->placed_address),
4452 (unsigned long) retval);
4453 return retval;
4454 }
4455
4456 static int
4457 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4458 int type, int cnt, int from_tty)
4459 {
4460 int retval;
4461
4462 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4463 type, cnt, from_tty);
4464
4465 fprintf_unfiltered (gdb_stdlog,
4466 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4467 (unsigned long) type,
4468 (unsigned long) cnt,
4469 (unsigned long) from_tty,
4470 (unsigned long) retval);
4471 return retval;
4472 }
4473
4474 static int
4475 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4476 CORE_ADDR addr, int len)
4477 {
4478 CORE_ADDR retval;
4479
4480 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4481 addr, len);
4482
4483 fprintf_unfiltered (gdb_stdlog,
4484 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4485 core_addr_to_string (addr), (unsigned long) len,
4486 core_addr_to_string (retval));
4487 return retval;
4488 }
4489
4490 static int
4491 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4492 CORE_ADDR addr, int len, int rw,
4493 struct expression *cond)
4494 {
4495 int retval;
4496
4497 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4498 addr, len,
4499 rw, cond);
4500
4501 fprintf_unfiltered (gdb_stdlog,
4502 "target_can_accel_watchpoint_condition "
4503 "(%s, %d, %d, %s) = %ld\n",
4504 core_addr_to_string (addr), len, rw,
4505 host_address_to_string (cond), (unsigned long) retval);
4506 return retval;
4507 }
4508
4509 static int
4510 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4511 {
4512 int retval;
4513
4514 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4515
4516 fprintf_unfiltered (gdb_stdlog,
4517 "target_stopped_by_watchpoint () = %ld\n",
4518 (unsigned long) retval);
4519 return retval;
4520 }
4521
4522 static int
4523 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4524 {
4525 int retval;
4526
4527 retval = debug_target.to_stopped_data_address (target, addr);
4528
4529 fprintf_unfiltered (gdb_stdlog,
4530 "target_stopped_data_address ([%s]) = %ld\n",
4531 core_addr_to_string (*addr),
4532 (unsigned long)retval);
4533 return retval;
4534 }
4535
4536 static int
4537 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4538 CORE_ADDR addr,
4539 CORE_ADDR start, int length)
4540 {
4541 int retval;
4542
4543 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4544 start, length);
4545
4546 fprintf_filtered (gdb_stdlog,
4547 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4548 core_addr_to_string (addr), core_addr_to_string (start),
4549 length, retval);
4550 return retval;
4551 }
4552
4553 static int
4554 debug_to_insert_hw_breakpoint (struct target_ops *self,
4555 struct gdbarch *gdbarch,
4556 struct bp_target_info *bp_tgt)
4557 {
4558 int retval;
4559
4560 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4561 gdbarch, bp_tgt);
4562
4563 fprintf_unfiltered (gdb_stdlog,
4564 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4565 core_addr_to_string (bp_tgt->placed_address),
4566 (unsigned long) retval);
4567 return retval;
4568 }
4569
4570 static int
4571 debug_to_remove_hw_breakpoint (struct target_ops *self,
4572 struct gdbarch *gdbarch,
4573 struct bp_target_info *bp_tgt)
4574 {
4575 int retval;
4576
4577 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4578 gdbarch, bp_tgt);
4579
4580 fprintf_unfiltered (gdb_stdlog,
4581 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4582 core_addr_to_string (bp_tgt->placed_address),
4583 (unsigned long) retval);
4584 return retval;
4585 }
4586
4587 static int
4588 debug_to_insert_watchpoint (struct target_ops *self,
4589 CORE_ADDR addr, int len, int type,
4590 struct expression *cond)
4591 {
4592 int retval;
4593
4594 retval = debug_target.to_insert_watchpoint (&debug_target,
4595 addr, len, type, cond);
4596
4597 fprintf_unfiltered (gdb_stdlog,
4598 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4599 core_addr_to_string (addr), len, type,
4600 host_address_to_string (cond), (unsigned long) retval);
4601 return retval;
4602 }
4603
4604 static int
4605 debug_to_remove_watchpoint (struct target_ops *self,
4606 CORE_ADDR addr, int len, int type,
4607 struct expression *cond)
4608 {
4609 int retval;
4610
4611 retval = debug_target.to_remove_watchpoint (&debug_target,
4612 addr, len, type, cond);
4613
4614 fprintf_unfiltered (gdb_stdlog,
4615 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4616 core_addr_to_string (addr), len, type,
4617 host_address_to_string (cond), (unsigned long) retval);
4618 return retval;
4619 }
4620
4621 static void
4622 debug_to_terminal_init (struct target_ops *self)
4623 {
4624 debug_target.to_terminal_init (&debug_target);
4625
4626 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4627 }
4628
4629 static void
4630 debug_to_terminal_inferior (struct target_ops *self)
4631 {
4632 debug_target.to_terminal_inferior (&debug_target);
4633
4634 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4635 }
4636
4637 static void
4638 debug_to_terminal_ours_for_output (struct target_ops *self)
4639 {
4640 debug_target.to_terminal_ours_for_output (&debug_target);
4641
4642 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4643 }
4644
4645 static void
4646 debug_to_terminal_ours (struct target_ops *self)
4647 {
4648 debug_target.to_terminal_ours (&debug_target);
4649
4650 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4651 }
4652
4653 static void
4654 debug_to_terminal_save_ours (struct target_ops *self)
4655 {
4656 debug_target.to_terminal_save_ours (&debug_target);
4657
4658 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4659 }
4660
4661 static void
4662 debug_to_terminal_info (struct target_ops *self,
4663 const char *arg, int from_tty)
4664 {
4665 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4666
4667 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4668 from_tty);
4669 }
4670
4671 static void
4672 debug_to_load (struct target_ops *self, char *args, int from_tty)
4673 {
4674 debug_target.to_load (&debug_target, args, from_tty);
4675
4676 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4677 }
4678
4679 static void
4680 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4681 {
4682 debug_target.to_post_startup_inferior (&debug_target, ptid);
4683
4684 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4685 ptid_get_pid (ptid));
4686 }
4687
4688 static int
4689 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4690 {
4691 int retval;
4692
4693 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4694
4695 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4696 pid, retval);
4697
4698 return retval;
4699 }
4700
4701 static int
4702 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4703 {
4704 int retval;
4705
4706 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4707
4708 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4709 pid, retval);
4710
4711 return retval;
4712 }
4713
4714 static int
4715 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4716 {
4717 int retval;
4718
4719 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4720
4721 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4722 pid, retval);
4723
4724 return retval;
4725 }
4726
4727 static int
4728 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4729 {
4730 int retval;
4731
4732 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4733
4734 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4735 pid, retval);
4736
4737 return retval;
4738 }
4739
4740 static int
4741 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4742 {
4743 int retval;
4744
4745 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4746
4747 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4748 pid, retval);
4749
4750 return retval;
4751 }
4752
4753 static int
4754 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4755 {
4756 int retval;
4757
4758 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4759
4760 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4761 pid, retval);
4762
4763 return retval;
4764 }
4765
4766 static int
4767 debug_to_has_exited (struct target_ops *self,
4768 int pid, int wait_status, int *exit_status)
4769 {
4770 int has_exited;
4771
4772 has_exited = debug_target.to_has_exited (&debug_target,
4773 pid, wait_status, exit_status);
4774
4775 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4776 pid, wait_status, *exit_status, has_exited);
4777
4778 return has_exited;
4779 }
4780
4781 static int
4782 debug_to_can_run (struct target_ops *self)
4783 {
4784 int retval;
4785
4786 retval = debug_target.to_can_run (&debug_target);
4787
4788 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4789
4790 return retval;
4791 }
4792
4793 static struct gdbarch *
4794 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4795 {
4796 struct gdbarch *retval;
4797
4798 retval = debug_target.to_thread_architecture (ops, ptid);
4799
4800 fprintf_unfiltered (gdb_stdlog,
4801 "target_thread_architecture (%s) = %s [%s]\n",
4802 target_pid_to_str (ptid),
4803 host_address_to_string (retval),
4804 gdbarch_bfd_arch_info (retval)->printable_name);
4805 return retval;
4806 }
4807
4808 static void
4809 debug_to_stop (struct target_ops *self, ptid_t ptid)
4810 {
4811 debug_target.to_stop (&debug_target, ptid);
4812
4813 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4814 target_pid_to_str (ptid));
4815 }
4816
4817 static void
4818 debug_to_rcmd (struct target_ops *self, char *command,
4819 struct ui_file *outbuf)
4820 {
4821 debug_target.to_rcmd (&debug_target, command, outbuf);
4822 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4823 }
4824
4825 static char *
4826 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4827 {
4828 char *exec_file;
4829
4830 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4831
4832 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4833 pid, exec_file);
4834
4835 return exec_file;
4836 }
4837
4838 static void
4839 setup_target_debug (void)
4840 {
4841 memcpy (&debug_target, &current_target, sizeof debug_target);
4842
4843 current_target.to_open = debug_to_open;
4844 current_target.to_post_attach = debug_to_post_attach;
4845 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4846 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4847 current_target.to_files_info = debug_to_files_info;
4848 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4849 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4850 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4851 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4852 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4853 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4854 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4855 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4856 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4857 current_target.to_watchpoint_addr_within_range
4858 = debug_to_watchpoint_addr_within_range;
4859 current_target.to_region_ok_for_hw_watchpoint
4860 = debug_to_region_ok_for_hw_watchpoint;
4861 current_target.to_can_accel_watchpoint_condition
4862 = debug_to_can_accel_watchpoint_condition;
4863 current_target.to_terminal_init = debug_to_terminal_init;
4864 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4865 current_target.to_terminal_ours_for_output
4866 = debug_to_terminal_ours_for_output;
4867 current_target.to_terminal_ours = debug_to_terminal_ours;
4868 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4869 current_target.to_terminal_info = debug_to_terminal_info;
4870 current_target.to_load = debug_to_load;
4871 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4872 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4873 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4874 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4875 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4876 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4877 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4878 current_target.to_has_exited = debug_to_has_exited;
4879 current_target.to_can_run = debug_to_can_run;
4880 current_target.to_stop = debug_to_stop;
4881 current_target.to_rcmd = debug_to_rcmd;
4882 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4883 current_target.to_thread_architecture = debug_to_thread_architecture;
4884 }
4885 \f
4886
4887 static char targ_desc[] =
4888 "Names of targets and files being debugged.\nShows the entire \
4889 stack of targets currently in use (including the exec-file,\n\
4890 core-file, and process, if any), as well as the symbol file name.";
4891
4892 static void
4893 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
4894 {
4895 error (_("\"monitor\" command not supported by this target."));
4896 }
4897
4898 static void
4899 do_monitor_command (char *cmd,
4900 int from_tty)
4901 {
4902 target_rcmd (cmd, gdb_stdtarg);
4903 }
4904
4905 /* Print the name of each layers of our target stack. */
4906
4907 static void
4908 maintenance_print_target_stack (char *cmd, int from_tty)
4909 {
4910 struct target_ops *t;
4911
4912 printf_filtered (_("The current target stack is:\n"));
4913
4914 for (t = target_stack; t != NULL; t = t->beneath)
4915 {
4916 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4917 }
4918 }
4919
4920 /* Controls if async mode is permitted. */
4921 int target_async_permitted = 0;
4922
4923 /* The set command writes to this variable. If the inferior is
4924 executing, target_async_permitted is *not* updated. */
4925 static int target_async_permitted_1 = 0;
4926
4927 static void
4928 set_target_async_command (char *args, int from_tty,
4929 struct cmd_list_element *c)
4930 {
4931 if (have_live_inferiors ())
4932 {
4933 target_async_permitted_1 = target_async_permitted;
4934 error (_("Cannot change this setting while the inferior is running."));
4935 }
4936
4937 target_async_permitted = target_async_permitted_1;
4938 }
4939
4940 static void
4941 show_target_async_command (struct ui_file *file, int from_tty,
4942 struct cmd_list_element *c,
4943 const char *value)
4944 {
4945 fprintf_filtered (file,
4946 _("Controlling the inferior in "
4947 "asynchronous mode is %s.\n"), value);
4948 }
4949
4950 /* Temporary copies of permission settings. */
4951
4952 static int may_write_registers_1 = 1;
4953 static int may_write_memory_1 = 1;
4954 static int may_insert_breakpoints_1 = 1;
4955 static int may_insert_tracepoints_1 = 1;
4956 static int may_insert_fast_tracepoints_1 = 1;
4957 static int may_stop_1 = 1;
4958
4959 /* Make the user-set values match the real values again. */
4960
4961 void
4962 update_target_permissions (void)
4963 {
4964 may_write_registers_1 = may_write_registers;
4965 may_write_memory_1 = may_write_memory;
4966 may_insert_breakpoints_1 = may_insert_breakpoints;
4967 may_insert_tracepoints_1 = may_insert_tracepoints;
4968 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4969 may_stop_1 = may_stop;
4970 }
4971
4972 /* The one function handles (most of) the permission flags in the same
4973 way. */
4974
4975 static void
4976 set_target_permissions (char *args, int from_tty,
4977 struct cmd_list_element *c)
4978 {
4979 if (target_has_execution)
4980 {
4981 update_target_permissions ();
4982 error (_("Cannot change this setting while the inferior is running."));
4983 }
4984
4985 /* Make the real values match the user-changed values. */
4986 may_write_registers = may_write_registers_1;
4987 may_insert_breakpoints = may_insert_breakpoints_1;
4988 may_insert_tracepoints = may_insert_tracepoints_1;
4989 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4990 may_stop = may_stop_1;
4991 update_observer_mode ();
4992 }
4993
4994 /* Set memory write permission independently of observer mode. */
4995
4996 static void
4997 set_write_memory_permission (char *args, int from_tty,
4998 struct cmd_list_element *c)
4999 {
5000 /* Make the real values match the user-changed values. */
5001 may_write_memory = may_write_memory_1;
5002 update_observer_mode ();
5003 }
5004
5005
5006 void
5007 initialize_targets (void)
5008 {
5009 init_dummy_target ();
5010 push_target (&dummy_target);
5011
5012 add_info ("target", target_info, targ_desc);
5013 add_info ("files", target_info, targ_desc);
5014
5015 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5016 Set target debugging."), _("\
5017 Show target debugging."), _("\
5018 When non-zero, target debugging is enabled. Higher numbers are more\n\
5019 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5020 command."),
5021 NULL,
5022 show_targetdebug,
5023 &setdebuglist, &showdebuglist);
5024
5025 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5026 &trust_readonly, _("\
5027 Set mode for reading from readonly sections."), _("\
5028 Show mode for reading from readonly sections."), _("\
5029 When this mode is on, memory reads from readonly sections (such as .text)\n\
5030 will be read from the object file instead of from the target. This will\n\
5031 result in significant performance improvement for remote targets."),
5032 NULL,
5033 show_trust_readonly,
5034 &setlist, &showlist);
5035
5036 add_com ("monitor", class_obscure, do_monitor_command,
5037 _("Send a command to the remote monitor (remote targets only)."));
5038
5039 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5040 _("Print the name of each layer of the internal target stack."),
5041 &maintenanceprintlist);
5042
5043 add_setshow_boolean_cmd ("target-async", no_class,
5044 &target_async_permitted_1, _("\
5045 Set whether gdb controls the inferior in asynchronous mode."), _("\
5046 Show whether gdb controls the inferior in asynchronous mode."), _("\
5047 Tells gdb whether to control the inferior in asynchronous mode."),
5048 set_target_async_command,
5049 show_target_async_command,
5050 &setlist,
5051 &showlist);
5052
5053 add_setshow_boolean_cmd ("may-write-registers", class_support,
5054 &may_write_registers_1, _("\
5055 Set permission to write into registers."), _("\
5056 Show permission to write into registers."), _("\
5057 When this permission is on, GDB may write into the target's registers.\n\
5058 Otherwise, any sort of write attempt will result in an error."),
5059 set_target_permissions, NULL,
5060 &setlist, &showlist);
5061
5062 add_setshow_boolean_cmd ("may-write-memory", class_support,
5063 &may_write_memory_1, _("\
5064 Set permission to write into target memory."), _("\
5065 Show permission to write into target memory."), _("\
5066 When this permission is on, GDB may write into the target's memory.\n\
5067 Otherwise, any sort of write attempt will result in an error."),
5068 set_write_memory_permission, NULL,
5069 &setlist, &showlist);
5070
5071 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5072 &may_insert_breakpoints_1, _("\
5073 Set permission to insert breakpoints in the target."), _("\
5074 Show permission to insert breakpoints in the target."), _("\
5075 When this permission is on, GDB may insert breakpoints in the program.\n\
5076 Otherwise, any sort of insertion attempt will result in an error."),
5077 set_target_permissions, NULL,
5078 &setlist, &showlist);
5079
5080 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5081 &may_insert_tracepoints_1, _("\
5082 Set permission to insert tracepoints in the target."), _("\
5083 Show permission to insert tracepoints in the target."), _("\
5084 When this permission is on, GDB may insert tracepoints in the program.\n\
5085 Otherwise, any sort of insertion attempt will result in an error."),
5086 set_target_permissions, NULL,
5087 &setlist, &showlist);
5088
5089 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5090 &may_insert_fast_tracepoints_1, _("\
5091 Set permission to insert fast tracepoints in the target."), _("\
5092 Show permission to insert fast tracepoints in the target."), _("\
5093 When this permission is on, GDB may insert fast tracepoints.\n\
5094 Otherwise, any sort of insertion attempt will result in an error."),
5095 set_target_permissions, NULL,
5096 &setlist, &showlist);
5097
5098 add_setshow_boolean_cmd ("may-interrupt", class_support,
5099 &may_stop_1, _("\
5100 Set permission to interrupt or signal the target."), _("\
5101 Show permission to interrupt or signal the target."), _("\
5102 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5103 Otherwise, any attempt to interrupt or stop will be ignored."),
5104 set_target_permissions, NULL,
5105 &setlist, &showlist);
5106 }