convert to_set_permissions
[binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (struct target_ops *, const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
59
60 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
61 long lwp, long tid);
62
63 static void tcomplain (void) ATTRIBUTE_NORETURN;
64
65 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
66
67 static int return_zero (void);
68
69 static void *return_null (void);
70
71 void target_ignore (void);
72
73 static void target_command (char *, int);
74
75 static struct target_ops *find_default_run_target (char *);
76
77 static target_xfer_partial_ftype default_xfer_partial;
78
79 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
80 ptid_t ptid);
81
82 static int dummy_find_memory_regions (struct target_ops *self,
83 find_memory_region_ftype ignore1,
84 void *ignore2);
85
86 static char *dummy_make_corefile_notes (struct target_ops *self,
87 bfd *ignore1, int *ignore2);
88
89 static int find_default_can_async_p (struct target_ops *ignore);
90
91 static int find_default_is_async_p (struct target_ops *ignore);
92
93 static enum exec_direction_kind default_execution_direction
94 (struct target_ops *self);
95
96 #include "target-delegates.c"
97
98 static void init_dummy_target (void);
99
100 static struct target_ops debug_target;
101
102 static void debug_to_open (char *, int);
103
104 static void debug_to_prepare_to_store (struct target_ops *self,
105 struct regcache *);
106
107 static void debug_to_files_info (struct target_ops *);
108
109 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
110 struct bp_target_info *);
111
112 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
113 struct bp_target_info *);
114
115 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
116 int, int, int);
117
118 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
119 struct gdbarch *,
120 struct bp_target_info *);
121
122 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
123 struct gdbarch *,
124 struct bp_target_info *);
125
126 static int debug_to_insert_watchpoint (struct target_ops *self,
127 CORE_ADDR, int, int,
128 struct expression *);
129
130 static int debug_to_remove_watchpoint (struct target_ops *self,
131 CORE_ADDR, int, int,
132 struct expression *);
133
134 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
135
136 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
137 CORE_ADDR, CORE_ADDR, int);
138
139 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
140 CORE_ADDR, int);
141
142 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
143 CORE_ADDR, int, int,
144 struct expression *);
145
146 static void debug_to_terminal_init (struct target_ops *self);
147
148 static void debug_to_terminal_inferior (struct target_ops *self);
149
150 static void debug_to_terminal_ours_for_output (struct target_ops *self);
151
152 static void debug_to_terminal_save_ours (struct target_ops *self);
153
154 static void debug_to_terminal_ours (struct target_ops *self);
155
156 static void debug_to_load (struct target_ops *self, char *, int);
157
158 static int debug_to_can_run (struct target_ops *self);
159
160 static void debug_to_stop (struct target_ops *self, ptid_t);
161
162 /* Pointer to array of target architecture structures; the size of the
163 array; the current index into the array; the allocated size of the
164 array. */
165 struct target_ops **target_structs;
166 unsigned target_struct_size;
167 unsigned target_struct_allocsize;
168 #define DEFAULT_ALLOCSIZE 10
169
170 /* The initial current target, so that there is always a semi-valid
171 current target. */
172
173 static struct target_ops dummy_target;
174
175 /* Top of target stack. */
176
177 static struct target_ops *target_stack;
178
179 /* The target structure we are currently using to talk to a process
180 or file or whatever "inferior" we have. */
181
182 struct target_ops current_target;
183
184 /* Command list for target. */
185
186 static struct cmd_list_element *targetlist = NULL;
187
188 /* Nonzero if we should trust readonly sections from the
189 executable when reading memory. */
190
191 static int trust_readonly = 0;
192
193 /* Nonzero if we should show true memory content including
194 memory breakpoint inserted by gdb. */
195
196 static int show_memory_breakpoints = 0;
197
198 /* These globals control whether GDB attempts to perform these
199 operations; they are useful for targets that need to prevent
200 inadvertant disruption, such as in non-stop mode. */
201
202 int may_write_registers = 1;
203
204 int may_write_memory = 1;
205
206 int may_insert_breakpoints = 1;
207
208 int may_insert_tracepoints = 1;
209
210 int may_insert_fast_tracepoints = 1;
211
212 int may_stop = 1;
213
214 /* Non-zero if we want to see trace of target level stuff. */
215
216 static unsigned int targetdebug = 0;
217 static void
218 show_targetdebug (struct ui_file *file, int from_tty,
219 struct cmd_list_element *c, const char *value)
220 {
221 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
222 }
223
224 static void setup_target_debug (void);
225
226 /* The user just typed 'target' without the name of a target. */
227
228 static void
229 target_command (char *arg, int from_tty)
230 {
231 fputs_filtered ("Argument required (target name). Try `help target'\n",
232 gdb_stdout);
233 }
234
235 /* Default target_has_* methods for process_stratum targets. */
236
237 int
238 default_child_has_all_memory (struct target_ops *ops)
239 {
240 /* If no inferior selected, then we can't read memory here. */
241 if (ptid_equal (inferior_ptid, null_ptid))
242 return 0;
243
244 return 1;
245 }
246
247 int
248 default_child_has_memory (struct target_ops *ops)
249 {
250 /* If no inferior selected, then we can't read memory here. */
251 if (ptid_equal (inferior_ptid, null_ptid))
252 return 0;
253
254 return 1;
255 }
256
257 int
258 default_child_has_stack (struct target_ops *ops)
259 {
260 /* If no inferior selected, there's no stack. */
261 if (ptid_equal (inferior_ptid, null_ptid))
262 return 0;
263
264 return 1;
265 }
266
267 int
268 default_child_has_registers (struct target_ops *ops)
269 {
270 /* Can't read registers from no inferior. */
271 if (ptid_equal (inferior_ptid, null_ptid))
272 return 0;
273
274 return 1;
275 }
276
277 int
278 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
279 {
280 /* If there's no thread selected, then we can't make it run through
281 hoops. */
282 if (ptid_equal (the_ptid, null_ptid))
283 return 0;
284
285 return 1;
286 }
287
288
289 int
290 target_has_all_memory_1 (void)
291 {
292 struct target_ops *t;
293
294 for (t = current_target.beneath; t != NULL; t = t->beneath)
295 if (t->to_has_all_memory (t))
296 return 1;
297
298 return 0;
299 }
300
301 int
302 target_has_memory_1 (void)
303 {
304 struct target_ops *t;
305
306 for (t = current_target.beneath; t != NULL; t = t->beneath)
307 if (t->to_has_memory (t))
308 return 1;
309
310 return 0;
311 }
312
313 int
314 target_has_stack_1 (void)
315 {
316 struct target_ops *t;
317
318 for (t = current_target.beneath; t != NULL; t = t->beneath)
319 if (t->to_has_stack (t))
320 return 1;
321
322 return 0;
323 }
324
325 int
326 target_has_registers_1 (void)
327 {
328 struct target_ops *t;
329
330 for (t = current_target.beneath; t != NULL; t = t->beneath)
331 if (t->to_has_registers (t))
332 return 1;
333
334 return 0;
335 }
336
337 int
338 target_has_execution_1 (ptid_t the_ptid)
339 {
340 struct target_ops *t;
341
342 for (t = current_target.beneath; t != NULL; t = t->beneath)
343 if (t->to_has_execution (t, the_ptid))
344 return 1;
345
346 return 0;
347 }
348
349 int
350 target_has_execution_current (void)
351 {
352 return target_has_execution_1 (inferior_ptid);
353 }
354
355 /* Complete initialization of T. This ensures that various fields in
356 T are set, if needed by the target implementation. */
357
358 void
359 complete_target_initialization (struct target_ops *t)
360 {
361 /* Provide default values for all "must have" methods. */
362 if (t->to_xfer_partial == NULL)
363 t->to_xfer_partial = default_xfer_partial;
364
365 if (t->to_has_all_memory == NULL)
366 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
367
368 if (t->to_has_memory == NULL)
369 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
370
371 if (t->to_has_stack == NULL)
372 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
373
374 if (t->to_has_registers == NULL)
375 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
376
377 if (t->to_has_execution == NULL)
378 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
379
380 install_delegators (t);
381 }
382
383 /* Add possible target architecture T to the list and add a new
384 command 'target T->to_shortname'. Set COMPLETER as the command's
385 completer if not NULL. */
386
387 void
388 add_target_with_completer (struct target_ops *t,
389 completer_ftype *completer)
390 {
391 struct cmd_list_element *c;
392
393 complete_target_initialization (t);
394
395 if (!target_structs)
396 {
397 target_struct_allocsize = DEFAULT_ALLOCSIZE;
398 target_structs = (struct target_ops **) xmalloc
399 (target_struct_allocsize * sizeof (*target_structs));
400 }
401 if (target_struct_size >= target_struct_allocsize)
402 {
403 target_struct_allocsize *= 2;
404 target_structs = (struct target_ops **)
405 xrealloc ((char *) target_structs,
406 target_struct_allocsize * sizeof (*target_structs));
407 }
408 target_structs[target_struct_size++] = t;
409
410 if (targetlist == NULL)
411 add_prefix_cmd ("target", class_run, target_command, _("\
412 Connect to a target machine or process.\n\
413 The first argument is the type or protocol of the target machine.\n\
414 Remaining arguments are interpreted by the target protocol. For more\n\
415 information on the arguments for a particular protocol, type\n\
416 `help target ' followed by the protocol name."),
417 &targetlist, "target ", 0, &cmdlist);
418 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
419 &targetlist);
420 if (completer != NULL)
421 set_cmd_completer (c, completer);
422 }
423
424 /* Add a possible target architecture to the list. */
425
426 void
427 add_target (struct target_ops *t)
428 {
429 add_target_with_completer (t, NULL);
430 }
431
432 /* See target.h. */
433
434 void
435 add_deprecated_target_alias (struct target_ops *t, char *alias)
436 {
437 struct cmd_list_element *c;
438 char *alt;
439
440 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
441 see PR cli/15104. */
442 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
443 alt = xstrprintf ("target %s", t->to_shortname);
444 deprecate_cmd (c, alt);
445 }
446
447 /* Stub functions */
448
449 void
450 target_ignore (void)
451 {
452 }
453
454 void
455 target_kill (void)
456 {
457 struct target_ops *t;
458
459 for (t = current_target.beneath; t != NULL; t = t->beneath)
460 if (t->to_kill != NULL)
461 {
462 if (targetdebug)
463 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
464
465 t->to_kill (t);
466 return;
467 }
468
469 noprocess ();
470 }
471
472 void
473 target_load (char *arg, int from_tty)
474 {
475 target_dcache_invalidate ();
476 (*current_target.to_load) (&current_target, arg, from_tty);
477 }
478
479 void
480 target_create_inferior (char *exec_file, char *args,
481 char **env, int from_tty)
482 {
483 struct target_ops *t;
484
485 for (t = current_target.beneath; t != NULL; t = t->beneath)
486 {
487 if (t->to_create_inferior != NULL)
488 {
489 t->to_create_inferior (t, exec_file, args, env, from_tty);
490 if (targetdebug)
491 fprintf_unfiltered (gdb_stdlog,
492 "target_create_inferior (%s, %s, xxx, %d)\n",
493 exec_file, args, from_tty);
494 return;
495 }
496 }
497
498 internal_error (__FILE__, __LINE__,
499 _("could not find a target to create inferior"));
500 }
501
502 void
503 target_terminal_inferior (void)
504 {
505 /* A background resume (``run&'') should leave GDB in control of the
506 terminal. Use target_can_async_p, not target_is_async_p, since at
507 this point the target is not async yet. However, if sync_execution
508 is not set, we know it will become async prior to resume. */
509 if (target_can_async_p () && !sync_execution)
510 return;
511
512 /* If GDB is resuming the inferior in the foreground, install
513 inferior's terminal modes. */
514 (*current_target.to_terminal_inferior) (&current_target);
515 }
516
517 static int
518 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
519 struct target_ops *t)
520 {
521 errno = EIO; /* Can't read/write this location. */
522 return 0; /* No bytes handled. */
523 }
524
525 static void
526 tcomplain (void)
527 {
528 error (_("You can't do that when your target is `%s'"),
529 current_target.to_shortname);
530 }
531
532 void
533 noprocess (void)
534 {
535 error (_("You can't do that without a process to debug."));
536 }
537
538 static void
539 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
540 {
541 printf_unfiltered (_("No saved terminal information.\n"));
542 }
543
544 /* A default implementation for the to_get_ada_task_ptid target method.
545
546 This function builds the PTID by using both LWP and TID as part of
547 the PTID lwp and tid elements. The pid used is the pid of the
548 inferior_ptid. */
549
550 static ptid_t
551 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
552 {
553 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
554 }
555
556 static enum exec_direction_kind
557 default_execution_direction (struct target_ops *self)
558 {
559 if (!target_can_execute_reverse)
560 return EXEC_FORWARD;
561 else if (!target_can_async_p ())
562 return EXEC_FORWARD;
563 else
564 gdb_assert_not_reached ("\
565 to_execution_direction must be implemented for reverse async");
566 }
567
568 /* Go through the target stack from top to bottom, copying over zero
569 entries in current_target, then filling in still empty entries. In
570 effect, we are doing class inheritance through the pushed target
571 vectors.
572
573 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
574 is currently implemented, is that it discards any knowledge of
575 which target an inherited method originally belonged to.
576 Consequently, new new target methods should instead explicitly and
577 locally search the target stack for the target that can handle the
578 request. */
579
580 static void
581 update_current_target (void)
582 {
583 struct target_ops *t;
584
585 /* First, reset current's contents. */
586 memset (&current_target, 0, sizeof (current_target));
587
588 /* Install the delegators. */
589 install_delegators (&current_target);
590
591 #define INHERIT(FIELD, TARGET) \
592 if (!current_target.FIELD) \
593 current_target.FIELD = (TARGET)->FIELD
594
595 for (t = target_stack; t; t = t->beneath)
596 {
597 INHERIT (to_shortname, t);
598 INHERIT (to_longname, t);
599 INHERIT (to_doc, t);
600 /* Do not inherit to_open. */
601 /* Do not inherit to_close. */
602 /* Do not inherit to_attach. */
603 /* Do not inherit to_post_attach. */
604 INHERIT (to_attach_no_wait, t);
605 /* Do not inherit to_detach. */
606 /* Do not inherit to_disconnect. */
607 /* Do not inherit to_resume. */
608 /* Do not inherit to_wait. */
609 /* Do not inherit to_fetch_registers. */
610 /* Do not inherit to_store_registers. */
611 /* Do not inherit to_prepare_to_store. */
612 INHERIT (deprecated_xfer_memory, t);
613 /* Do not inherit to_files_info. */
614 /* Do not inherit to_insert_breakpoint. */
615 /* Do not inherit to_remove_breakpoint. */
616 /* Do not inherit to_can_use_hw_breakpoint. */
617 /* Do not inherit to_insert_hw_breakpoint. */
618 /* Do not inherit to_remove_hw_breakpoint. */
619 /* Do not inherit to_ranged_break_num_registers. */
620 /* Do not inherit to_insert_watchpoint. */
621 /* Do not inherit to_remove_watchpoint. */
622 /* Do not inherit to_insert_mask_watchpoint. */
623 /* Do not inherit to_remove_mask_watchpoint. */
624 /* Do not inherit to_stopped_data_address. */
625 INHERIT (to_have_steppable_watchpoint, t);
626 INHERIT (to_have_continuable_watchpoint, t);
627 /* Do not inherit to_stopped_by_watchpoint. */
628 /* Do not inherit to_watchpoint_addr_within_range. */
629 /* Do not inherit to_region_ok_for_hw_watchpoint. */
630 /* Do not inherit to_can_accel_watchpoint_condition. */
631 /* Do not inherit to_masked_watch_num_registers. */
632 /* Do not inherit to_terminal_init. */
633 /* Do not inherit to_terminal_inferior. */
634 /* Do not inherit to_terminal_ours_for_output. */
635 /* Do not inherit to_terminal_ours. */
636 /* Do not inherit to_terminal_save_ours. */
637 /* Do not inherit to_terminal_info. */
638 /* Do not inherit to_kill. */
639 /* Do not inherit to_load. */
640 /* Do no inherit to_create_inferior. */
641 /* Do not inherit to_post_startup_inferior. */
642 /* Do not inherit to_insert_fork_catchpoint. */
643 /* Do not inherit to_remove_fork_catchpoint. */
644 /* Do not inherit to_insert_vfork_catchpoint. */
645 /* Do not inherit to_remove_vfork_catchpoint. */
646 /* Do not inherit to_follow_fork. */
647 /* Do not inherit to_insert_exec_catchpoint. */
648 /* Do not inherit to_remove_exec_catchpoint. */
649 /* Do not inherit to_set_syscall_catchpoint. */
650 /* Do not inherit to_has_exited. */
651 /* Do not inherit to_mourn_inferior. */
652 INHERIT (to_can_run, t);
653 /* Do not inherit to_pass_signals. */
654 /* Do not inherit to_program_signals. */
655 /* Do not inherit to_thread_alive. */
656 /* Do not inherit to_find_new_threads. */
657 /* Do not inherit to_pid_to_str. */
658 /* Do not inherit to_extra_thread_info. */
659 /* Do not inherit to_thread_name. */
660 INHERIT (to_stop, t);
661 /* Do not inherit to_xfer_partial. */
662 /* Do not inherit to_rcmd. */
663 /* Do not inherit to_pid_to_exec_file. */
664 /* Do not inherit to_log_command. */
665 INHERIT (to_stratum, t);
666 /* Do not inherit to_has_all_memory. */
667 /* Do not inherit to_has_memory. */
668 /* Do not inherit to_has_stack. */
669 /* Do not inherit to_has_registers. */
670 /* Do not inherit to_has_execution. */
671 INHERIT (to_has_thread_control, t);
672 /* Do not inherit to_can_async_p. */
673 /* Do not inherit to_is_async_p. */
674 /* Do not inherit to_async. */
675 /* Do not inherit to_find_memory_regions. */
676 /* Do not inherit to_make_corefile_notes. */
677 /* Do not inherit to_get_bookmark. */
678 /* Do not inherit to_goto_bookmark. */
679 /* Do not inherit to_get_thread_local_address. */
680 /* Do not inherit to_can_execute_reverse. */
681 /* Do not inherit to_execution_direction. */
682 /* Do not inherit to_thread_architecture. */
683 /* Do not inherit to_read_description. */
684 /* Do not inherit to_get_ada_task_ptid. */
685 /* Do not inherit to_search_memory. */
686 /* Do not inherit to_supports_multi_process. */
687 /* Do not inherit to_supports_enable_disable_tracepoint. */
688 /* Do not inherit to_supports_string_tracing. */
689 /* Do not inherit to_trace_init. */
690 /* Do not inherit to_download_tracepoint. */
691 /* Do not inherit to_can_download_tracepoint. */
692 /* Do not inherit to_download_trace_state_variable. */
693 /* Do not inherit to_enable_tracepoint. */
694 /* Do not inherit to_disable_tracepoint. */
695 /* Do not inherit to_trace_set_readonly_regions. */
696 /* Do not inherit to_trace_start. */
697 /* Do not inherit to_get_trace_status. */
698 /* Do not inherit to_get_tracepoint_status. */
699 /* Do not inherit to_trace_stop. */
700 /* Do not inherit to_trace_find. */
701 /* Do not inherit to_get_trace_state_variable_value. */
702 /* Do not inherit to_save_trace_data. */
703 /* Do not inherit to_upload_tracepoints. */
704 /* Do not inherit to_upload_trace_state_variables. */
705 /* Do not inherit to_get_raw_trace_data. */
706 /* Do not inherit to_get_min_fast_tracepoint_insn_len. */
707 /* Do not inherit to_set_disconnected_tracing. */
708 /* Do not inherit to_set_circular_trace_buffer. */
709 /* Do not inherit to_set_trace_buffer_size. */
710 /* Do not inherit to_set_trace_notes. */
711 /* Do not inherit to_get_tib_address. */
712 /* Do not inherit to_set_permissions. */
713 INHERIT (to_static_tracepoint_marker_at, t);
714 INHERIT (to_static_tracepoint_markers_by_strid, t);
715 INHERIT (to_traceframe_info, t);
716 INHERIT (to_use_agent, t);
717 INHERIT (to_can_use_agent, t);
718 INHERIT (to_augmented_libraries_svr4_read, t);
719 INHERIT (to_magic, t);
720 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
721 INHERIT (to_can_run_breakpoint_commands, t);
722 /* Do not inherit to_memory_map. */
723 /* Do not inherit to_flash_erase. */
724 /* Do not inherit to_flash_done. */
725 }
726 #undef INHERIT
727
728 /* Clean up a target struct so it no longer has any zero pointers in
729 it. Some entries are defaulted to a method that print an error,
730 others are hard-wired to a standard recursive default. */
731
732 #define de_fault(field, value) \
733 if (!current_target.field) \
734 current_target.field = value
735
736 de_fault (to_open,
737 (void (*) (char *, int))
738 tcomplain);
739 de_fault (to_close,
740 (void (*) (struct target_ops *))
741 target_ignore);
742 de_fault (deprecated_xfer_memory,
743 (int (*) (CORE_ADDR, gdb_byte *, int, int,
744 struct mem_attrib *, struct target_ops *))
745 nomemory);
746 de_fault (to_can_run,
747 (int (*) (struct target_ops *))
748 return_zero);
749 de_fault (to_stop,
750 (void (*) (struct target_ops *, ptid_t))
751 target_ignore);
752 current_target.to_read_description = NULL;
753 de_fault (to_static_tracepoint_marker_at,
754 (int (*) (struct target_ops *,
755 CORE_ADDR, struct static_tracepoint_marker *))
756 return_zero);
757 de_fault (to_static_tracepoint_markers_by_strid,
758 (VEC(static_tracepoint_marker_p) * (*) (struct target_ops *,
759 const char *))
760 tcomplain);
761 de_fault (to_traceframe_info,
762 (struct traceframe_info * (*) (struct target_ops *))
763 return_null);
764 de_fault (to_supports_evaluation_of_breakpoint_conditions,
765 (int (*) (struct target_ops *))
766 return_zero);
767 de_fault (to_can_run_breakpoint_commands,
768 (int (*) (struct target_ops *))
769 return_zero);
770 de_fault (to_use_agent,
771 (int (*) (struct target_ops *, int))
772 tcomplain);
773 de_fault (to_can_use_agent,
774 (int (*) (struct target_ops *))
775 return_zero);
776 de_fault (to_augmented_libraries_svr4_read,
777 (int (*) (struct target_ops *))
778 return_zero);
779
780 #undef de_fault
781
782 /* Finally, position the target-stack beneath the squashed
783 "current_target". That way code looking for a non-inherited
784 target method can quickly and simply find it. */
785 current_target.beneath = target_stack;
786
787 if (targetdebug)
788 setup_target_debug ();
789 }
790
791 /* Push a new target type into the stack of the existing target accessors,
792 possibly superseding some of the existing accessors.
793
794 Rather than allow an empty stack, we always have the dummy target at
795 the bottom stratum, so we can call the function vectors without
796 checking them. */
797
798 void
799 push_target (struct target_ops *t)
800 {
801 struct target_ops **cur;
802
803 /* Check magic number. If wrong, it probably means someone changed
804 the struct definition, but not all the places that initialize one. */
805 if (t->to_magic != OPS_MAGIC)
806 {
807 fprintf_unfiltered (gdb_stderr,
808 "Magic number of %s target struct wrong\n",
809 t->to_shortname);
810 internal_error (__FILE__, __LINE__,
811 _("failed internal consistency check"));
812 }
813
814 /* Find the proper stratum to install this target in. */
815 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
816 {
817 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
818 break;
819 }
820
821 /* If there's already targets at this stratum, remove them. */
822 /* FIXME: cagney/2003-10-15: I think this should be popping all
823 targets to CUR, and not just those at this stratum level. */
824 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
825 {
826 /* There's already something at this stratum level. Close it,
827 and un-hook it from the stack. */
828 struct target_ops *tmp = (*cur);
829
830 (*cur) = (*cur)->beneath;
831 tmp->beneath = NULL;
832 target_close (tmp);
833 }
834
835 /* We have removed all targets in our stratum, now add the new one. */
836 t->beneath = (*cur);
837 (*cur) = t;
838
839 update_current_target ();
840 }
841
842 /* Remove a target_ops vector from the stack, wherever it may be.
843 Return how many times it was removed (0 or 1). */
844
845 int
846 unpush_target (struct target_ops *t)
847 {
848 struct target_ops **cur;
849 struct target_ops *tmp;
850
851 if (t->to_stratum == dummy_stratum)
852 internal_error (__FILE__, __LINE__,
853 _("Attempt to unpush the dummy target"));
854
855 /* Look for the specified target. Note that we assume that a target
856 can only occur once in the target stack. */
857
858 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
859 {
860 if ((*cur) == t)
861 break;
862 }
863
864 /* If we don't find target_ops, quit. Only open targets should be
865 closed. */
866 if ((*cur) == NULL)
867 return 0;
868
869 /* Unchain the target. */
870 tmp = (*cur);
871 (*cur) = (*cur)->beneath;
872 tmp->beneath = NULL;
873
874 update_current_target ();
875
876 /* Finally close the target. Note we do this after unchaining, so
877 any target method calls from within the target_close
878 implementation don't end up in T anymore. */
879 target_close (t);
880
881 return 1;
882 }
883
884 void
885 pop_all_targets_above (enum strata above_stratum)
886 {
887 while ((int) (current_target.to_stratum) > (int) above_stratum)
888 {
889 if (!unpush_target (target_stack))
890 {
891 fprintf_unfiltered (gdb_stderr,
892 "pop_all_targets couldn't find target %s\n",
893 target_stack->to_shortname);
894 internal_error (__FILE__, __LINE__,
895 _("failed internal consistency check"));
896 break;
897 }
898 }
899 }
900
901 void
902 pop_all_targets (void)
903 {
904 pop_all_targets_above (dummy_stratum);
905 }
906
907 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
908
909 int
910 target_is_pushed (struct target_ops *t)
911 {
912 struct target_ops **cur;
913
914 /* Check magic number. If wrong, it probably means someone changed
915 the struct definition, but not all the places that initialize one. */
916 if (t->to_magic != OPS_MAGIC)
917 {
918 fprintf_unfiltered (gdb_stderr,
919 "Magic number of %s target struct wrong\n",
920 t->to_shortname);
921 internal_error (__FILE__, __LINE__,
922 _("failed internal consistency check"));
923 }
924
925 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
926 if (*cur == t)
927 return 1;
928
929 return 0;
930 }
931
932 /* Using the objfile specified in OBJFILE, find the address for the
933 current thread's thread-local storage with offset OFFSET. */
934 CORE_ADDR
935 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
936 {
937 volatile CORE_ADDR addr = 0;
938 struct target_ops *target;
939
940 for (target = current_target.beneath;
941 target != NULL;
942 target = target->beneath)
943 {
944 if (target->to_get_thread_local_address != NULL)
945 break;
946 }
947
948 if (target != NULL
949 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
950 {
951 ptid_t ptid = inferior_ptid;
952 volatile struct gdb_exception ex;
953
954 TRY_CATCH (ex, RETURN_MASK_ALL)
955 {
956 CORE_ADDR lm_addr;
957
958 /* Fetch the load module address for this objfile. */
959 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
960 objfile);
961 /* If it's 0, throw the appropriate exception. */
962 if (lm_addr == 0)
963 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
964 _("TLS load module not found"));
965
966 addr = target->to_get_thread_local_address (target, ptid,
967 lm_addr, offset);
968 }
969 /* If an error occurred, print TLS related messages here. Otherwise,
970 throw the error to some higher catcher. */
971 if (ex.reason < 0)
972 {
973 int objfile_is_library = (objfile->flags & OBJF_SHARED);
974
975 switch (ex.error)
976 {
977 case TLS_NO_LIBRARY_SUPPORT_ERROR:
978 error (_("Cannot find thread-local variables "
979 "in this thread library."));
980 break;
981 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
982 if (objfile_is_library)
983 error (_("Cannot find shared library `%s' in dynamic"
984 " linker's load module list"), objfile_name (objfile));
985 else
986 error (_("Cannot find executable file `%s' in dynamic"
987 " linker's load module list"), objfile_name (objfile));
988 break;
989 case TLS_NOT_ALLOCATED_YET_ERROR:
990 if (objfile_is_library)
991 error (_("The inferior has not yet allocated storage for"
992 " thread-local variables in\n"
993 "the shared library `%s'\n"
994 "for %s"),
995 objfile_name (objfile), target_pid_to_str (ptid));
996 else
997 error (_("The inferior has not yet allocated storage for"
998 " thread-local variables in\n"
999 "the executable `%s'\n"
1000 "for %s"),
1001 objfile_name (objfile), target_pid_to_str (ptid));
1002 break;
1003 case TLS_GENERIC_ERROR:
1004 if (objfile_is_library)
1005 error (_("Cannot find thread-local storage for %s, "
1006 "shared library %s:\n%s"),
1007 target_pid_to_str (ptid),
1008 objfile_name (objfile), ex.message);
1009 else
1010 error (_("Cannot find thread-local storage for %s, "
1011 "executable file %s:\n%s"),
1012 target_pid_to_str (ptid),
1013 objfile_name (objfile), ex.message);
1014 break;
1015 default:
1016 throw_exception (ex);
1017 break;
1018 }
1019 }
1020 }
1021 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1022 TLS is an ABI-specific thing. But we don't do that yet. */
1023 else
1024 error (_("Cannot find thread-local variables on this target"));
1025
1026 return addr;
1027 }
1028
1029 const char *
1030 target_xfer_status_to_string (enum target_xfer_status err)
1031 {
1032 #define CASE(X) case X: return #X
1033 switch (err)
1034 {
1035 CASE(TARGET_XFER_E_IO);
1036 CASE(TARGET_XFER_E_UNAVAILABLE);
1037 default:
1038 return "<unknown>";
1039 }
1040 #undef CASE
1041 };
1042
1043
1044 #undef MIN
1045 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1046
1047 /* target_read_string -- read a null terminated string, up to LEN bytes,
1048 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1049 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1050 is responsible for freeing it. Return the number of bytes successfully
1051 read. */
1052
1053 int
1054 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1055 {
1056 int tlen, offset, i;
1057 gdb_byte buf[4];
1058 int errcode = 0;
1059 char *buffer;
1060 int buffer_allocated;
1061 char *bufptr;
1062 unsigned int nbytes_read = 0;
1063
1064 gdb_assert (string);
1065
1066 /* Small for testing. */
1067 buffer_allocated = 4;
1068 buffer = xmalloc (buffer_allocated);
1069 bufptr = buffer;
1070
1071 while (len > 0)
1072 {
1073 tlen = MIN (len, 4 - (memaddr & 3));
1074 offset = memaddr & 3;
1075
1076 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1077 if (errcode != 0)
1078 {
1079 /* The transfer request might have crossed the boundary to an
1080 unallocated region of memory. Retry the transfer, requesting
1081 a single byte. */
1082 tlen = 1;
1083 offset = 0;
1084 errcode = target_read_memory (memaddr, buf, 1);
1085 if (errcode != 0)
1086 goto done;
1087 }
1088
1089 if (bufptr - buffer + tlen > buffer_allocated)
1090 {
1091 unsigned int bytes;
1092
1093 bytes = bufptr - buffer;
1094 buffer_allocated *= 2;
1095 buffer = xrealloc (buffer, buffer_allocated);
1096 bufptr = buffer + bytes;
1097 }
1098
1099 for (i = 0; i < tlen; i++)
1100 {
1101 *bufptr++ = buf[i + offset];
1102 if (buf[i + offset] == '\000')
1103 {
1104 nbytes_read += i + 1;
1105 goto done;
1106 }
1107 }
1108
1109 memaddr += tlen;
1110 len -= tlen;
1111 nbytes_read += tlen;
1112 }
1113 done:
1114 *string = buffer;
1115 if (errnop != NULL)
1116 *errnop = errcode;
1117 return nbytes_read;
1118 }
1119
1120 struct target_section_table *
1121 target_get_section_table (struct target_ops *target)
1122 {
1123 struct target_ops *t;
1124
1125 if (targetdebug)
1126 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1127
1128 for (t = target; t != NULL; t = t->beneath)
1129 if (t->to_get_section_table != NULL)
1130 return (*t->to_get_section_table) (t);
1131
1132 return NULL;
1133 }
1134
1135 /* Find a section containing ADDR. */
1136
1137 struct target_section *
1138 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1139 {
1140 struct target_section_table *table = target_get_section_table (target);
1141 struct target_section *secp;
1142
1143 if (table == NULL)
1144 return NULL;
1145
1146 for (secp = table->sections; secp < table->sections_end; secp++)
1147 {
1148 if (addr >= secp->addr && addr < secp->endaddr)
1149 return secp;
1150 }
1151 return NULL;
1152 }
1153
1154 /* Read memory from the live target, even if currently inspecting a
1155 traceframe. The return is the same as that of target_read. */
1156
1157 static enum target_xfer_status
1158 target_read_live_memory (enum target_object object,
1159 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1160 ULONGEST *xfered_len)
1161 {
1162 enum target_xfer_status ret;
1163 struct cleanup *cleanup;
1164
1165 /* Switch momentarily out of tfind mode so to access live memory.
1166 Note that this must not clear global state, such as the frame
1167 cache, which must still remain valid for the previous traceframe.
1168 We may be _building_ the frame cache at this point. */
1169 cleanup = make_cleanup_restore_traceframe_number ();
1170 set_traceframe_number (-1);
1171
1172 ret = target_xfer_partial (current_target.beneath, object, NULL,
1173 myaddr, NULL, memaddr, len, xfered_len);
1174
1175 do_cleanups (cleanup);
1176 return ret;
1177 }
1178
1179 /* Using the set of read-only target sections of OPS, read live
1180 read-only memory. Note that the actual reads start from the
1181 top-most target again.
1182
1183 For interface/parameters/return description see target.h,
1184 to_xfer_partial. */
1185
1186 static enum target_xfer_status
1187 memory_xfer_live_readonly_partial (struct target_ops *ops,
1188 enum target_object object,
1189 gdb_byte *readbuf, ULONGEST memaddr,
1190 ULONGEST len, ULONGEST *xfered_len)
1191 {
1192 struct target_section *secp;
1193 struct target_section_table *table;
1194
1195 secp = target_section_by_addr (ops, memaddr);
1196 if (secp != NULL
1197 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1198 secp->the_bfd_section)
1199 & SEC_READONLY))
1200 {
1201 struct target_section *p;
1202 ULONGEST memend = memaddr + len;
1203
1204 table = target_get_section_table (ops);
1205
1206 for (p = table->sections; p < table->sections_end; p++)
1207 {
1208 if (memaddr >= p->addr)
1209 {
1210 if (memend <= p->endaddr)
1211 {
1212 /* Entire transfer is within this section. */
1213 return target_read_live_memory (object, memaddr,
1214 readbuf, len, xfered_len);
1215 }
1216 else if (memaddr >= p->endaddr)
1217 {
1218 /* This section ends before the transfer starts. */
1219 continue;
1220 }
1221 else
1222 {
1223 /* This section overlaps the transfer. Just do half. */
1224 len = p->endaddr - memaddr;
1225 return target_read_live_memory (object, memaddr,
1226 readbuf, len, xfered_len);
1227 }
1228 }
1229 }
1230 }
1231
1232 return TARGET_XFER_EOF;
1233 }
1234
1235 /* Read memory from more than one valid target. A core file, for
1236 instance, could have some of memory but delegate other bits to
1237 the target below it. So, we must manually try all targets. */
1238
1239 static enum target_xfer_status
1240 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1241 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1242 ULONGEST *xfered_len)
1243 {
1244 enum target_xfer_status res;
1245
1246 do
1247 {
1248 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1249 readbuf, writebuf, memaddr, len,
1250 xfered_len);
1251 if (res == TARGET_XFER_OK)
1252 break;
1253
1254 /* Stop if the target reports that the memory is not available. */
1255 if (res == TARGET_XFER_E_UNAVAILABLE)
1256 break;
1257
1258 /* We want to continue past core files to executables, but not
1259 past a running target's memory. */
1260 if (ops->to_has_all_memory (ops))
1261 break;
1262
1263 ops = ops->beneath;
1264 }
1265 while (ops != NULL);
1266
1267 return res;
1268 }
1269
1270 /* Perform a partial memory transfer.
1271 For docs see target.h, to_xfer_partial. */
1272
1273 static enum target_xfer_status
1274 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1275 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1276 ULONGEST len, ULONGEST *xfered_len)
1277 {
1278 enum target_xfer_status res;
1279 int reg_len;
1280 struct mem_region *region;
1281 struct inferior *inf;
1282
1283 /* For accesses to unmapped overlay sections, read directly from
1284 files. Must do this first, as MEMADDR may need adjustment. */
1285 if (readbuf != NULL && overlay_debugging)
1286 {
1287 struct obj_section *section = find_pc_overlay (memaddr);
1288
1289 if (pc_in_unmapped_range (memaddr, section))
1290 {
1291 struct target_section_table *table
1292 = target_get_section_table (ops);
1293 const char *section_name = section->the_bfd_section->name;
1294
1295 memaddr = overlay_mapped_address (memaddr, section);
1296 return section_table_xfer_memory_partial (readbuf, writebuf,
1297 memaddr, len, xfered_len,
1298 table->sections,
1299 table->sections_end,
1300 section_name);
1301 }
1302 }
1303
1304 /* Try the executable files, if "trust-readonly-sections" is set. */
1305 if (readbuf != NULL && trust_readonly)
1306 {
1307 struct target_section *secp;
1308 struct target_section_table *table;
1309
1310 secp = target_section_by_addr (ops, memaddr);
1311 if (secp != NULL
1312 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1313 secp->the_bfd_section)
1314 & SEC_READONLY))
1315 {
1316 table = target_get_section_table (ops);
1317 return section_table_xfer_memory_partial (readbuf, writebuf,
1318 memaddr, len, xfered_len,
1319 table->sections,
1320 table->sections_end,
1321 NULL);
1322 }
1323 }
1324
1325 /* If reading unavailable memory in the context of traceframes, and
1326 this address falls within a read-only section, fallback to
1327 reading from live memory. */
1328 if (readbuf != NULL && get_traceframe_number () != -1)
1329 {
1330 VEC(mem_range_s) *available;
1331
1332 /* If we fail to get the set of available memory, then the
1333 target does not support querying traceframe info, and so we
1334 attempt reading from the traceframe anyway (assuming the
1335 target implements the old QTro packet then). */
1336 if (traceframe_available_memory (&available, memaddr, len))
1337 {
1338 struct cleanup *old_chain;
1339
1340 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1341
1342 if (VEC_empty (mem_range_s, available)
1343 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1344 {
1345 /* Don't read into the traceframe's available
1346 memory. */
1347 if (!VEC_empty (mem_range_s, available))
1348 {
1349 LONGEST oldlen = len;
1350
1351 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1352 gdb_assert (len <= oldlen);
1353 }
1354
1355 do_cleanups (old_chain);
1356
1357 /* This goes through the topmost target again. */
1358 res = memory_xfer_live_readonly_partial (ops, object,
1359 readbuf, memaddr,
1360 len, xfered_len);
1361 if (res == TARGET_XFER_OK)
1362 return TARGET_XFER_OK;
1363 else
1364 {
1365 /* No use trying further, we know some memory starting
1366 at MEMADDR isn't available. */
1367 *xfered_len = len;
1368 return TARGET_XFER_E_UNAVAILABLE;
1369 }
1370 }
1371
1372 /* Don't try to read more than how much is available, in
1373 case the target implements the deprecated QTro packet to
1374 cater for older GDBs (the target's knowledge of read-only
1375 sections may be outdated by now). */
1376 len = VEC_index (mem_range_s, available, 0)->length;
1377
1378 do_cleanups (old_chain);
1379 }
1380 }
1381
1382 /* Try GDB's internal data cache. */
1383 region = lookup_mem_region (memaddr);
1384 /* region->hi == 0 means there's no upper bound. */
1385 if (memaddr + len < region->hi || region->hi == 0)
1386 reg_len = len;
1387 else
1388 reg_len = region->hi - memaddr;
1389
1390 switch (region->attrib.mode)
1391 {
1392 case MEM_RO:
1393 if (writebuf != NULL)
1394 return TARGET_XFER_E_IO;
1395 break;
1396
1397 case MEM_WO:
1398 if (readbuf != NULL)
1399 return TARGET_XFER_E_IO;
1400 break;
1401
1402 case MEM_FLASH:
1403 /* We only support writing to flash during "load" for now. */
1404 if (writebuf != NULL)
1405 error (_("Writing to flash memory forbidden in this context"));
1406 break;
1407
1408 case MEM_NONE:
1409 return TARGET_XFER_E_IO;
1410 }
1411
1412 if (!ptid_equal (inferior_ptid, null_ptid))
1413 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1414 else
1415 inf = NULL;
1416
1417 if (inf != NULL
1418 /* The dcache reads whole cache lines; that doesn't play well
1419 with reading from a trace buffer, because reading outside of
1420 the collected memory range fails. */
1421 && get_traceframe_number () == -1
1422 && (region->attrib.cache
1423 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1424 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1425 {
1426 DCACHE *dcache = target_dcache_get_or_init ();
1427 int l;
1428
1429 if (readbuf != NULL)
1430 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1431 else
1432 /* FIXME drow/2006-08-09: If we're going to preserve const
1433 correctness dcache_xfer_memory should take readbuf and
1434 writebuf. */
1435 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1436 reg_len, 1);
1437 if (l <= 0)
1438 return TARGET_XFER_E_IO;
1439 else
1440 {
1441 *xfered_len = (ULONGEST) l;
1442 return TARGET_XFER_OK;
1443 }
1444 }
1445
1446 /* If none of those methods found the memory we wanted, fall back
1447 to a target partial transfer. Normally a single call to
1448 to_xfer_partial is enough; if it doesn't recognize an object
1449 it will call the to_xfer_partial of the next target down.
1450 But for memory this won't do. Memory is the only target
1451 object which can be read from more than one valid target.
1452 A core file, for instance, could have some of memory but
1453 delegate other bits to the target below it. So, we must
1454 manually try all targets. */
1455
1456 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1457 xfered_len);
1458
1459 /* Make sure the cache gets updated no matter what - if we are writing
1460 to the stack. Even if this write is not tagged as such, we still need
1461 to update the cache. */
1462
1463 if (res == TARGET_XFER_OK
1464 && inf != NULL
1465 && writebuf != NULL
1466 && target_dcache_init_p ()
1467 && !region->attrib.cache
1468 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1469 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1470 {
1471 DCACHE *dcache = target_dcache_get ();
1472
1473 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1474 }
1475
1476 /* If we still haven't got anything, return the last error. We
1477 give up. */
1478 return res;
1479 }
1480
1481 /* Perform a partial memory transfer. For docs see target.h,
1482 to_xfer_partial. */
1483
1484 static enum target_xfer_status
1485 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1486 gdb_byte *readbuf, const gdb_byte *writebuf,
1487 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1488 {
1489 enum target_xfer_status res;
1490
1491 /* Zero length requests are ok and require no work. */
1492 if (len == 0)
1493 return TARGET_XFER_EOF;
1494
1495 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1496 breakpoint insns, thus hiding out from higher layers whether
1497 there are software breakpoints inserted in the code stream. */
1498 if (readbuf != NULL)
1499 {
1500 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1501 xfered_len);
1502
1503 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1504 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1505 }
1506 else
1507 {
1508 void *buf;
1509 struct cleanup *old_chain;
1510
1511 /* A large write request is likely to be partially satisfied
1512 by memory_xfer_partial_1. We will continually malloc
1513 and free a copy of the entire write request for breakpoint
1514 shadow handling even though we only end up writing a small
1515 subset of it. Cap writes to 4KB to mitigate this. */
1516 len = min (4096, len);
1517
1518 buf = xmalloc (len);
1519 old_chain = make_cleanup (xfree, buf);
1520 memcpy (buf, writebuf, len);
1521
1522 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1523 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1524 xfered_len);
1525
1526 do_cleanups (old_chain);
1527 }
1528
1529 return res;
1530 }
1531
1532 static void
1533 restore_show_memory_breakpoints (void *arg)
1534 {
1535 show_memory_breakpoints = (uintptr_t) arg;
1536 }
1537
1538 struct cleanup *
1539 make_show_memory_breakpoints_cleanup (int show)
1540 {
1541 int current = show_memory_breakpoints;
1542
1543 show_memory_breakpoints = show;
1544 return make_cleanup (restore_show_memory_breakpoints,
1545 (void *) (uintptr_t) current);
1546 }
1547
1548 /* For docs see target.h, to_xfer_partial. */
1549
1550 enum target_xfer_status
1551 target_xfer_partial (struct target_ops *ops,
1552 enum target_object object, const char *annex,
1553 gdb_byte *readbuf, const gdb_byte *writebuf,
1554 ULONGEST offset, ULONGEST len,
1555 ULONGEST *xfered_len)
1556 {
1557 enum target_xfer_status retval;
1558
1559 gdb_assert (ops->to_xfer_partial != NULL);
1560
1561 /* Transfer is done when LEN is zero. */
1562 if (len == 0)
1563 return TARGET_XFER_EOF;
1564
1565 if (writebuf && !may_write_memory)
1566 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1567 core_addr_to_string_nz (offset), plongest (len));
1568
1569 *xfered_len = 0;
1570
1571 /* If this is a memory transfer, let the memory-specific code
1572 have a look at it instead. Memory transfers are more
1573 complicated. */
1574 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1575 || object == TARGET_OBJECT_CODE_MEMORY)
1576 retval = memory_xfer_partial (ops, object, readbuf,
1577 writebuf, offset, len, xfered_len);
1578 else if (object == TARGET_OBJECT_RAW_MEMORY)
1579 {
1580 /* Request the normal memory object from other layers. */
1581 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1582 xfered_len);
1583 }
1584 else
1585 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1586 writebuf, offset, len, xfered_len);
1587
1588 if (targetdebug)
1589 {
1590 const unsigned char *myaddr = NULL;
1591
1592 fprintf_unfiltered (gdb_stdlog,
1593 "%s:target_xfer_partial "
1594 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1595 ops->to_shortname,
1596 (int) object,
1597 (annex ? annex : "(null)"),
1598 host_address_to_string (readbuf),
1599 host_address_to_string (writebuf),
1600 core_addr_to_string_nz (offset),
1601 pulongest (len), retval,
1602 pulongest (*xfered_len));
1603
1604 if (readbuf)
1605 myaddr = readbuf;
1606 if (writebuf)
1607 myaddr = writebuf;
1608 if (retval == TARGET_XFER_OK && myaddr != NULL)
1609 {
1610 int i;
1611
1612 fputs_unfiltered (", bytes =", gdb_stdlog);
1613 for (i = 0; i < *xfered_len; i++)
1614 {
1615 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1616 {
1617 if (targetdebug < 2 && i > 0)
1618 {
1619 fprintf_unfiltered (gdb_stdlog, " ...");
1620 break;
1621 }
1622 fprintf_unfiltered (gdb_stdlog, "\n");
1623 }
1624
1625 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1626 }
1627 }
1628
1629 fputc_unfiltered ('\n', gdb_stdlog);
1630 }
1631
1632 /* Check implementations of to_xfer_partial update *XFERED_LEN
1633 properly. Do assertion after printing debug messages, so that we
1634 can find more clues on assertion failure from debugging messages. */
1635 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1636 gdb_assert (*xfered_len > 0);
1637
1638 return retval;
1639 }
1640
1641 /* Read LEN bytes of target memory at address MEMADDR, placing the
1642 results in GDB's memory at MYADDR. Returns either 0 for success or
1643 TARGET_XFER_E_IO if any error occurs.
1644
1645 If an error occurs, no guarantee is made about the contents of the data at
1646 MYADDR. In particular, the caller should not depend upon partial reads
1647 filling the buffer with good data. There is no way for the caller to know
1648 how much good data might have been transfered anyway. Callers that can
1649 deal with partial reads should call target_read (which will retry until
1650 it makes no progress, and then return how much was transferred). */
1651
1652 int
1653 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1654 {
1655 /* Dispatch to the topmost target, not the flattened current_target.
1656 Memory accesses check target->to_has_(all_)memory, and the
1657 flattened target doesn't inherit those. */
1658 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1659 myaddr, memaddr, len) == len)
1660 return 0;
1661 else
1662 return TARGET_XFER_E_IO;
1663 }
1664
1665 /* Like target_read_memory, but specify explicitly that this is a read
1666 from the target's raw memory. That is, this read bypasses the
1667 dcache, breakpoint shadowing, etc. */
1668
1669 int
1670 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1671 {
1672 /* See comment in target_read_memory about why the request starts at
1673 current_target.beneath. */
1674 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1675 myaddr, memaddr, len) == len)
1676 return 0;
1677 else
1678 return TARGET_XFER_E_IO;
1679 }
1680
1681 /* Like target_read_memory, but specify explicitly that this is a read from
1682 the target's stack. This may trigger different cache behavior. */
1683
1684 int
1685 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1686 {
1687 /* See comment in target_read_memory about why the request starts at
1688 current_target.beneath. */
1689 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1690 myaddr, memaddr, len) == len)
1691 return 0;
1692 else
1693 return TARGET_XFER_E_IO;
1694 }
1695
1696 /* Like target_read_memory, but specify explicitly that this is a read from
1697 the target's code. This may trigger different cache behavior. */
1698
1699 int
1700 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1701 {
1702 /* See comment in target_read_memory about why the request starts at
1703 current_target.beneath. */
1704 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1705 myaddr, memaddr, len) == len)
1706 return 0;
1707 else
1708 return TARGET_XFER_E_IO;
1709 }
1710
1711 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1712 Returns either 0 for success or TARGET_XFER_E_IO if any
1713 error occurs. If an error occurs, no guarantee is made about how
1714 much data got written. Callers that can deal with partial writes
1715 should call target_write. */
1716
1717 int
1718 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1719 {
1720 /* See comment in target_read_memory about why the request starts at
1721 current_target.beneath. */
1722 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1723 myaddr, memaddr, len) == len)
1724 return 0;
1725 else
1726 return TARGET_XFER_E_IO;
1727 }
1728
1729 /* Write LEN bytes from MYADDR to target raw memory at address
1730 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1731 if any error occurs. If an error occurs, no guarantee is made
1732 about how much data got written. Callers that can deal with
1733 partial writes should call target_write. */
1734
1735 int
1736 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1737 {
1738 /* See comment in target_read_memory about why the request starts at
1739 current_target.beneath. */
1740 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1741 myaddr, memaddr, len) == len)
1742 return 0;
1743 else
1744 return TARGET_XFER_E_IO;
1745 }
1746
1747 /* Fetch the target's memory map. */
1748
1749 VEC(mem_region_s) *
1750 target_memory_map (void)
1751 {
1752 VEC(mem_region_s) *result;
1753 struct mem_region *last_one, *this_one;
1754 int ix;
1755 struct target_ops *t;
1756
1757 if (targetdebug)
1758 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1759
1760 for (t = current_target.beneath; t != NULL; t = t->beneath)
1761 if (t->to_memory_map != NULL)
1762 break;
1763
1764 if (t == NULL)
1765 return NULL;
1766
1767 result = t->to_memory_map (t);
1768 if (result == NULL)
1769 return NULL;
1770
1771 qsort (VEC_address (mem_region_s, result),
1772 VEC_length (mem_region_s, result),
1773 sizeof (struct mem_region), mem_region_cmp);
1774
1775 /* Check that regions do not overlap. Simultaneously assign
1776 a numbering for the "mem" commands to use to refer to
1777 each region. */
1778 last_one = NULL;
1779 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1780 {
1781 this_one->number = ix;
1782
1783 if (last_one && last_one->hi > this_one->lo)
1784 {
1785 warning (_("Overlapping regions in memory map: ignoring"));
1786 VEC_free (mem_region_s, result);
1787 return NULL;
1788 }
1789 last_one = this_one;
1790 }
1791
1792 return result;
1793 }
1794
1795 void
1796 target_flash_erase (ULONGEST address, LONGEST length)
1797 {
1798 struct target_ops *t;
1799
1800 for (t = current_target.beneath; t != NULL; t = t->beneath)
1801 if (t->to_flash_erase != NULL)
1802 {
1803 if (targetdebug)
1804 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1805 hex_string (address), phex (length, 0));
1806 t->to_flash_erase (t, address, length);
1807 return;
1808 }
1809
1810 tcomplain ();
1811 }
1812
1813 void
1814 target_flash_done (void)
1815 {
1816 struct target_ops *t;
1817
1818 for (t = current_target.beneath; t != NULL; t = t->beneath)
1819 if (t->to_flash_done != NULL)
1820 {
1821 if (targetdebug)
1822 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1823 t->to_flash_done (t);
1824 return;
1825 }
1826
1827 tcomplain ();
1828 }
1829
1830 static void
1831 show_trust_readonly (struct ui_file *file, int from_tty,
1832 struct cmd_list_element *c, const char *value)
1833 {
1834 fprintf_filtered (file,
1835 _("Mode for reading from readonly sections is %s.\n"),
1836 value);
1837 }
1838
1839 /* More generic transfers. */
1840
1841 static enum target_xfer_status
1842 default_xfer_partial (struct target_ops *ops, enum target_object object,
1843 const char *annex, gdb_byte *readbuf,
1844 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
1845 ULONGEST *xfered_len)
1846 {
1847 if (object == TARGET_OBJECT_MEMORY
1848 && ops->deprecated_xfer_memory != NULL)
1849 /* If available, fall back to the target's
1850 "deprecated_xfer_memory" method. */
1851 {
1852 int xfered = -1;
1853
1854 errno = 0;
1855 if (writebuf != NULL)
1856 {
1857 void *buffer = xmalloc (len);
1858 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1859
1860 memcpy (buffer, writebuf, len);
1861 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1862 1/*write*/, NULL, ops);
1863 do_cleanups (cleanup);
1864 }
1865 if (readbuf != NULL)
1866 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1867 0/*read*/, NULL, ops);
1868 if (xfered > 0)
1869 {
1870 *xfered_len = (ULONGEST) xfered;
1871 return TARGET_XFER_E_IO;
1872 }
1873 else if (xfered == 0 && errno == 0)
1874 /* "deprecated_xfer_memory" uses 0, cross checked against
1875 ERRNO as one indication of an error. */
1876 return TARGET_XFER_EOF;
1877 else
1878 return TARGET_XFER_E_IO;
1879 }
1880 else
1881 {
1882 gdb_assert (ops->beneath != NULL);
1883 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1884 readbuf, writebuf, offset, len,
1885 xfered_len);
1886 }
1887 }
1888
1889 /* Target vector read/write partial wrapper functions. */
1890
1891 static enum target_xfer_status
1892 target_read_partial (struct target_ops *ops,
1893 enum target_object object,
1894 const char *annex, gdb_byte *buf,
1895 ULONGEST offset, ULONGEST len,
1896 ULONGEST *xfered_len)
1897 {
1898 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1899 xfered_len);
1900 }
1901
1902 static enum target_xfer_status
1903 target_write_partial (struct target_ops *ops,
1904 enum target_object object,
1905 const char *annex, const gdb_byte *buf,
1906 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1907 {
1908 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1909 xfered_len);
1910 }
1911
1912 /* Wrappers to perform the full transfer. */
1913
1914 /* For docs on target_read see target.h. */
1915
1916 LONGEST
1917 target_read (struct target_ops *ops,
1918 enum target_object object,
1919 const char *annex, gdb_byte *buf,
1920 ULONGEST offset, LONGEST len)
1921 {
1922 LONGEST xfered = 0;
1923
1924 while (xfered < len)
1925 {
1926 ULONGEST xfered_len;
1927 enum target_xfer_status status;
1928
1929 status = target_read_partial (ops, object, annex,
1930 (gdb_byte *) buf + xfered,
1931 offset + xfered, len - xfered,
1932 &xfered_len);
1933
1934 /* Call an observer, notifying them of the xfer progress? */
1935 if (status == TARGET_XFER_EOF)
1936 return xfered;
1937 else if (status == TARGET_XFER_OK)
1938 {
1939 xfered += xfered_len;
1940 QUIT;
1941 }
1942 else
1943 return -1;
1944
1945 }
1946 return len;
1947 }
1948
1949 /* Assuming that the entire [begin, end) range of memory cannot be
1950 read, try to read whatever subrange is possible to read.
1951
1952 The function returns, in RESULT, either zero or one memory block.
1953 If there's a readable subrange at the beginning, it is completely
1954 read and returned. Any further readable subrange will not be read.
1955 Otherwise, if there's a readable subrange at the end, it will be
1956 completely read and returned. Any readable subranges before it
1957 (obviously, not starting at the beginning), will be ignored. In
1958 other cases -- either no readable subrange, or readable subrange(s)
1959 that is neither at the beginning, or end, nothing is returned.
1960
1961 The purpose of this function is to handle a read across a boundary
1962 of accessible memory in a case when memory map is not available.
1963 The above restrictions are fine for this case, but will give
1964 incorrect results if the memory is 'patchy'. However, supporting
1965 'patchy' memory would require trying to read every single byte,
1966 and it seems unacceptable solution. Explicit memory map is
1967 recommended for this case -- and target_read_memory_robust will
1968 take care of reading multiple ranges then. */
1969
1970 static void
1971 read_whatever_is_readable (struct target_ops *ops,
1972 ULONGEST begin, ULONGEST end,
1973 VEC(memory_read_result_s) **result)
1974 {
1975 gdb_byte *buf = xmalloc (end - begin);
1976 ULONGEST current_begin = begin;
1977 ULONGEST current_end = end;
1978 int forward;
1979 memory_read_result_s r;
1980 ULONGEST xfered_len;
1981
1982 /* If we previously failed to read 1 byte, nothing can be done here. */
1983 if (end - begin <= 1)
1984 {
1985 xfree (buf);
1986 return;
1987 }
1988
1989 /* Check that either first or the last byte is readable, and give up
1990 if not. This heuristic is meant to permit reading accessible memory
1991 at the boundary of accessible region. */
1992 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1993 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
1994 {
1995 forward = 1;
1996 ++current_begin;
1997 }
1998 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1999 buf + (end-begin) - 1, end - 1, 1,
2000 &xfered_len) == TARGET_XFER_OK)
2001 {
2002 forward = 0;
2003 --current_end;
2004 }
2005 else
2006 {
2007 xfree (buf);
2008 return;
2009 }
2010
2011 /* Loop invariant is that the [current_begin, current_end) was previously
2012 found to be not readable as a whole.
2013
2014 Note loop condition -- if the range has 1 byte, we can't divide the range
2015 so there's no point trying further. */
2016 while (current_end - current_begin > 1)
2017 {
2018 ULONGEST first_half_begin, first_half_end;
2019 ULONGEST second_half_begin, second_half_end;
2020 LONGEST xfer;
2021 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2022
2023 if (forward)
2024 {
2025 first_half_begin = current_begin;
2026 first_half_end = middle;
2027 second_half_begin = middle;
2028 second_half_end = current_end;
2029 }
2030 else
2031 {
2032 first_half_begin = middle;
2033 first_half_end = current_end;
2034 second_half_begin = current_begin;
2035 second_half_end = middle;
2036 }
2037
2038 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2039 buf + (first_half_begin - begin),
2040 first_half_begin,
2041 first_half_end - first_half_begin);
2042
2043 if (xfer == first_half_end - first_half_begin)
2044 {
2045 /* This half reads up fine. So, the error must be in the
2046 other half. */
2047 current_begin = second_half_begin;
2048 current_end = second_half_end;
2049 }
2050 else
2051 {
2052 /* This half is not readable. Because we've tried one byte, we
2053 know some part of this half if actually redable. Go to the next
2054 iteration to divide again and try to read.
2055
2056 We don't handle the other half, because this function only tries
2057 to read a single readable subrange. */
2058 current_begin = first_half_begin;
2059 current_end = first_half_end;
2060 }
2061 }
2062
2063 if (forward)
2064 {
2065 /* The [begin, current_begin) range has been read. */
2066 r.begin = begin;
2067 r.end = current_begin;
2068 r.data = buf;
2069 }
2070 else
2071 {
2072 /* The [current_end, end) range has been read. */
2073 LONGEST rlen = end - current_end;
2074
2075 r.data = xmalloc (rlen);
2076 memcpy (r.data, buf + current_end - begin, rlen);
2077 r.begin = current_end;
2078 r.end = end;
2079 xfree (buf);
2080 }
2081 VEC_safe_push(memory_read_result_s, (*result), &r);
2082 }
2083
2084 void
2085 free_memory_read_result_vector (void *x)
2086 {
2087 VEC(memory_read_result_s) *v = x;
2088 memory_read_result_s *current;
2089 int ix;
2090
2091 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2092 {
2093 xfree (current->data);
2094 }
2095 VEC_free (memory_read_result_s, v);
2096 }
2097
2098 VEC(memory_read_result_s) *
2099 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2100 {
2101 VEC(memory_read_result_s) *result = 0;
2102
2103 LONGEST xfered = 0;
2104 while (xfered < len)
2105 {
2106 struct mem_region *region = lookup_mem_region (offset + xfered);
2107 LONGEST rlen;
2108
2109 /* If there is no explicit region, a fake one should be created. */
2110 gdb_assert (region);
2111
2112 if (region->hi == 0)
2113 rlen = len - xfered;
2114 else
2115 rlen = region->hi - offset;
2116
2117 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2118 {
2119 /* Cannot read this region. Note that we can end up here only
2120 if the region is explicitly marked inaccessible, or
2121 'inaccessible-by-default' is in effect. */
2122 xfered += rlen;
2123 }
2124 else
2125 {
2126 LONGEST to_read = min (len - xfered, rlen);
2127 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2128
2129 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2130 (gdb_byte *) buffer,
2131 offset + xfered, to_read);
2132 /* Call an observer, notifying them of the xfer progress? */
2133 if (xfer <= 0)
2134 {
2135 /* Got an error reading full chunk. See if maybe we can read
2136 some subrange. */
2137 xfree (buffer);
2138 read_whatever_is_readable (ops, offset + xfered,
2139 offset + xfered + to_read, &result);
2140 xfered += to_read;
2141 }
2142 else
2143 {
2144 struct memory_read_result r;
2145 r.data = buffer;
2146 r.begin = offset + xfered;
2147 r.end = r.begin + xfer;
2148 VEC_safe_push (memory_read_result_s, result, &r);
2149 xfered += xfer;
2150 }
2151 QUIT;
2152 }
2153 }
2154 return result;
2155 }
2156
2157
2158 /* An alternative to target_write with progress callbacks. */
2159
2160 LONGEST
2161 target_write_with_progress (struct target_ops *ops,
2162 enum target_object object,
2163 const char *annex, const gdb_byte *buf,
2164 ULONGEST offset, LONGEST len,
2165 void (*progress) (ULONGEST, void *), void *baton)
2166 {
2167 LONGEST xfered = 0;
2168
2169 /* Give the progress callback a chance to set up. */
2170 if (progress)
2171 (*progress) (0, baton);
2172
2173 while (xfered < len)
2174 {
2175 ULONGEST xfered_len;
2176 enum target_xfer_status status;
2177
2178 status = target_write_partial (ops, object, annex,
2179 (gdb_byte *) buf + xfered,
2180 offset + xfered, len - xfered,
2181 &xfered_len);
2182
2183 if (status == TARGET_XFER_EOF)
2184 return xfered;
2185 if (TARGET_XFER_STATUS_ERROR_P (status))
2186 return -1;
2187
2188 gdb_assert (status == TARGET_XFER_OK);
2189 if (progress)
2190 (*progress) (xfered_len, baton);
2191
2192 xfered += xfered_len;
2193 QUIT;
2194 }
2195 return len;
2196 }
2197
2198 /* For docs on target_write see target.h. */
2199
2200 LONGEST
2201 target_write (struct target_ops *ops,
2202 enum target_object object,
2203 const char *annex, const gdb_byte *buf,
2204 ULONGEST offset, LONGEST len)
2205 {
2206 return target_write_with_progress (ops, object, annex, buf, offset, len,
2207 NULL, NULL);
2208 }
2209
2210 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2211 the size of the transferred data. PADDING additional bytes are
2212 available in *BUF_P. This is a helper function for
2213 target_read_alloc; see the declaration of that function for more
2214 information. */
2215
2216 static LONGEST
2217 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2218 const char *annex, gdb_byte **buf_p, int padding)
2219 {
2220 size_t buf_alloc, buf_pos;
2221 gdb_byte *buf;
2222
2223 /* This function does not have a length parameter; it reads the
2224 entire OBJECT). Also, it doesn't support objects fetched partly
2225 from one target and partly from another (in a different stratum,
2226 e.g. a core file and an executable). Both reasons make it
2227 unsuitable for reading memory. */
2228 gdb_assert (object != TARGET_OBJECT_MEMORY);
2229
2230 /* Start by reading up to 4K at a time. The target will throttle
2231 this number down if necessary. */
2232 buf_alloc = 4096;
2233 buf = xmalloc (buf_alloc);
2234 buf_pos = 0;
2235 while (1)
2236 {
2237 ULONGEST xfered_len;
2238 enum target_xfer_status status;
2239
2240 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2241 buf_pos, buf_alloc - buf_pos - padding,
2242 &xfered_len);
2243
2244 if (status == TARGET_XFER_EOF)
2245 {
2246 /* Read all there was. */
2247 if (buf_pos == 0)
2248 xfree (buf);
2249 else
2250 *buf_p = buf;
2251 return buf_pos;
2252 }
2253 else if (status != TARGET_XFER_OK)
2254 {
2255 /* An error occurred. */
2256 xfree (buf);
2257 return TARGET_XFER_E_IO;
2258 }
2259
2260 buf_pos += xfered_len;
2261
2262 /* If the buffer is filling up, expand it. */
2263 if (buf_alloc < buf_pos * 2)
2264 {
2265 buf_alloc *= 2;
2266 buf = xrealloc (buf, buf_alloc);
2267 }
2268
2269 QUIT;
2270 }
2271 }
2272
2273 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2274 the size of the transferred data. See the declaration in "target.h"
2275 function for more information about the return value. */
2276
2277 LONGEST
2278 target_read_alloc (struct target_ops *ops, enum target_object object,
2279 const char *annex, gdb_byte **buf_p)
2280 {
2281 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2282 }
2283
2284 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2285 returned as a string, allocated using xmalloc. If an error occurs
2286 or the transfer is unsupported, NULL is returned. Empty objects
2287 are returned as allocated but empty strings. A warning is issued
2288 if the result contains any embedded NUL bytes. */
2289
2290 char *
2291 target_read_stralloc (struct target_ops *ops, enum target_object object,
2292 const char *annex)
2293 {
2294 gdb_byte *buffer;
2295 char *bufstr;
2296 LONGEST i, transferred;
2297
2298 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2299 bufstr = (char *) buffer;
2300
2301 if (transferred < 0)
2302 return NULL;
2303
2304 if (transferred == 0)
2305 return xstrdup ("");
2306
2307 bufstr[transferred] = 0;
2308
2309 /* Check for embedded NUL bytes; but allow trailing NULs. */
2310 for (i = strlen (bufstr); i < transferred; i++)
2311 if (bufstr[i] != 0)
2312 {
2313 warning (_("target object %d, annex %s, "
2314 "contained unexpected null characters"),
2315 (int) object, annex ? annex : "(none)");
2316 break;
2317 }
2318
2319 return bufstr;
2320 }
2321
2322 /* Memory transfer methods. */
2323
2324 void
2325 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2326 LONGEST len)
2327 {
2328 /* This method is used to read from an alternate, non-current
2329 target. This read must bypass the overlay support (as symbols
2330 don't match this target), and GDB's internal cache (wrong cache
2331 for this target). */
2332 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2333 != len)
2334 memory_error (TARGET_XFER_E_IO, addr);
2335 }
2336
2337 ULONGEST
2338 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2339 int len, enum bfd_endian byte_order)
2340 {
2341 gdb_byte buf[sizeof (ULONGEST)];
2342
2343 gdb_assert (len <= sizeof (buf));
2344 get_target_memory (ops, addr, buf, len);
2345 return extract_unsigned_integer (buf, len, byte_order);
2346 }
2347
2348 /* See target.h. */
2349
2350 int
2351 target_insert_breakpoint (struct gdbarch *gdbarch,
2352 struct bp_target_info *bp_tgt)
2353 {
2354 if (!may_insert_breakpoints)
2355 {
2356 warning (_("May not insert breakpoints"));
2357 return 1;
2358 }
2359
2360 return current_target.to_insert_breakpoint (&current_target,
2361 gdbarch, bp_tgt);
2362 }
2363
2364 /* See target.h. */
2365
2366 int
2367 target_remove_breakpoint (struct gdbarch *gdbarch,
2368 struct bp_target_info *bp_tgt)
2369 {
2370 /* This is kind of a weird case to handle, but the permission might
2371 have been changed after breakpoints were inserted - in which case
2372 we should just take the user literally and assume that any
2373 breakpoints should be left in place. */
2374 if (!may_insert_breakpoints)
2375 {
2376 warning (_("May not remove breakpoints"));
2377 return 1;
2378 }
2379
2380 return current_target.to_remove_breakpoint (&current_target,
2381 gdbarch, bp_tgt);
2382 }
2383
2384 static void
2385 target_info (char *args, int from_tty)
2386 {
2387 struct target_ops *t;
2388 int has_all_mem = 0;
2389
2390 if (symfile_objfile != NULL)
2391 printf_unfiltered (_("Symbols from \"%s\".\n"),
2392 objfile_name (symfile_objfile));
2393
2394 for (t = target_stack; t != NULL; t = t->beneath)
2395 {
2396 if (!(*t->to_has_memory) (t))
2397 continue;
2398
2399 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2400 continue;
2401 if (has_all_mem)
2402 printf_unfiltered (_("\tWhile running this, "
2403 "GDB does not access memory from...\n"));
2404 printf_unfiltered ("%s:\n", t->to_longname);
2405 (t->to_files_info) (t);
2406 has_all_mem = (*t->to_has_all_memory) (t);
2407 }
2408 }
2409
2410 /* This function is called before any new inferior is created, e.g.
2411 by running a program, attaching, or connecting to a target.
2412 It cleans up any state from previous invocations which might
2413 change between runs. This is a subset of what target_preopen
2414 resets (things which might change between targets). */
2415
2416 void
2417 target_pre_inferior (int from_tty)
2418 {
2419 /* Clear out solib state. Otherwise the solib state of the previous
2420 inferior might have survived and is entirely wrong for the new
2421 target. This has been observed on GNU/Linux using glibc 2.3. How
2422 to reproduce:
2423
2424 bash$ ./foo&
2425 [1] 4711
2426 bash$ ./foo&
2427 [1] 4712
2428 bash$ gdb ./foo
2429 [...]
2430 (gdb) attach 4711
2431 (gdb) detach
2432 (gdb) attach 4712
2433 Cannot access memory at address 0xdeadbeef
2434 */
2435
2436 /* In some OSs, the shared library list is the same/global/shared
2437 across inferiors. If code is shared between processes, so are
2438 memory regions and features. */
2439 if (!gdbarch_has_global_solist (target_gdbarch ()))
2440 {
2441 no_shared_libraries (NULL, from_tty);
2442
2443 invalidate_target_mem_regions ();
2444
2445 target_clear_description ();
2446 }
2447
2448 agent_capability_invalidate ();
2449 }
2450
2451 /* Callback for iterate_over_inferiors. Gets rid of the given
2452 inferior. */
2453
2454 static int
2455 dispose_inferior (struct inferior *inf, void *args)
2456 {
2457 struct thread_info *thread;
2458
2459 thread = any_thread_of_process (inf->pid);
2460 if (thread)
2461 {
2462 switch_to_thread (thread->ptid);
2463
2464 /* Core inferiors actually should be detached, not killed. */
2465 if (target_has_execution)
2466 target_kill ();
2467 else
2468 target_detach (NULL, 0);
2469 }
2470
2471 return 0;
2472 }
2473
2474 /* This is to be called by the open routine before it does
2475 anything. */
2476
2477 void
2478 target_preopen (int from_tty)
2479 {
2480 dont_repeat ();
2481
2482 if (have_inferiors ())
2483 {
2484 if (!from_tty
2485 || !have_live_inferiors ()
2486 || query (_("A program is being debugged already. Kill it? ")))
2487 iterate_over_inferiors (dispose_inferior, NULL);
2488 else
2489 error (_("Program not killed."));
2490 }
2491
2492 /* Calling target_kill may remove the target from the stack. But if
2493 it doesn't (which seems like a win for UDI), remove it now. */
2494 /* Leave the exec target, though. The user may be switching from a
2495 live process to a core of the same program. */
2496 pop_all_targets_above (file_stratum);
2497
2498 target_pre_inferior (from_tty);
2499 }
2500
2501 /* Detach a target after doing deferred register stores. */
2502
2503 void
2504 target_detach (const char *args, int from_tty)
2505 {
2506 struct target_ops* t;
2507
2508 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2509 /* Don't remove global breakpoints here. They're removed on
2510 disconnection from the target. */
2511 ;
2512 else
2513 /* If we're in breakpoints-always-inserted mode, have to remove
2514 them before detaching. */
2515 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2516
2517 prepare_for_detach ();
2518
2519 current_target.to_detach (&current_target, args, from_tty);
2520 if (targetdebug)
2521 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2522 args, from_tty);
2523 }
2524
2525 void
2526 target_disconnect (char *args, int from_tty)
2527 {
2528 struct target_ops *t;
2529
2530 /* If we're in breakpoints-always-inserted mode or if breakpoints
2531 are global across processes, we have to remove them before
2532 disconnecting. */
2533 remove_breakpoints ();
2534
2535 for (t = current_target.beneath; t != NULL; t = t->beneath)
2536 if (t->to_disconnect != NULL)
2537 {
2538 if (targetdebug)
2539 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2540 args, from_tty);
2541 t->to_disconnect (t, args, from_tty);
2542 return;
2543 }
2544
2545 tcomplain ();
2546 }
2547
2548 ptid_t
2549 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2550 {
2551 struct target_ops *t;
2552 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2553 status, options);
2554
2555 if (targetdebug)
2556 {
2557 char *status_string;
2558 char *options_string;
2559
2560 status_string = target_waitstatus_to_string (status);
2561 options_string = target_options_to_string (options);
2562 fprintf_unfiltered (gdb_stdlog,
2563 "target_wait (%d, status, options={%s})"
2564 " = %d, %s\n",
2565 ptid_get_pid (ptid), options_string,
2566 ptid_get_pid (retval), status_string);
2567 xfree (status_string);
2568 xfree (options_string);
2569 }
2570
2571 return retval;
2572 }
2573
2574 char *
2575 target_pid_to_str (ptid_t ptid)
2576 {
2577 struct target_ops *t;
2578
2579 for (t = current_target.beneath; t != NULL; t = t->beneath)
2580 {
2581 if (t->to_pid_to_str != NULL)
2582 return (*t->to_pid_to_str) (t, ptid);
2583 }
2584
2585 return normal_pid_to_str (ptid);
2586 }
2587
2588 char *
2589 target_thread_name (struct thread_info *info)
2590 {
2591 return current_target.to_thread_name (&current_target, info);
2592 }
2593
2594 void
2595 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2596 {
2597 struct target_ops *t;
2598
2599 target_dcache_invalidate ();
2600
2601 current_target.to_resume (&current_target, ptid, step, signal);
2602 if (targetdebug)
2603 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2604 ptid_get_pid (ptid),
2605 step ? "step" : "continue",
2606 gdb_signal_to_name (signal));
2607
2608 registers_changed_ptid (ptid);
2609 set_executing (ptid, 1);
2610 set_running (ptid, 1);
2611 clear_inline_frame_state (ptid);
2612 }
2613
2614 void
2615 target_pass_signals (int numsigs, unsigned char *pass_signals)
2616 {
2617 struct target_ops *t;
2618
2619 for (t = current_target.beneath; t != NULL; t = t->beneath)
2620 {
2621 if (t->to_pass_signals != NULL)
2622 {
2623 if (targetdebug)
2624 {
2625 int i;
2626
2627 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2628 numsigs);
2629
2630 for (i = 0; i < numsigs; i++)
2631 if (pass_signals[i])
2632 fprintf_unfiltered (gdb_stdlog, " %s",
2633 gdb_signal_to_name (i));
2634
2635 fprintf_unfiltered (gdb_stdlog, " })\n");
2636 }
2637
2638 (*t->to_pass_signals) (t, numsigs, pass_signals);
2639 return;
2640 }
2641 }
2642 }
2643
2644 void
2645 target_program_signals (int numsigs, unsigned char *program_signals)
2646 {
2647 struct target_ops *t;
2648
2649 for (t = current_target.beneath; t != NULL; t = t->beneath)
2650 {
2651 if (t->to_program_signals != NULL)
2652 {
2653 if (targetdebug)
2654 {
2655 int i;
2656
2657 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2658 numsigs);
2659
2660 for (i = 0; i < numsigs; i++)
2661 if (program_signals[i])
2662 fprintf_unfiltered (gdb_stdlog, " %s",
2663 gdb_signal_to_name (i));
2664
2665 fprintf_unfiltered (gdb_stdlog, " })\n");
2666 }
2667
2668 (*t->to_program_signals) (t, numsigs, program_signals);
2669 return;
2670 }
2671 }
2672 }
2673
2674 /* Look through the list of possible targets for a target that can
2675 follow forks. */
2676
2677 int
2678 target_follow_fork (int follow_child, int detach_fork)
2679 {
2680 struct target_ops *t;
2681
2682 for (t = current_target.beneath; t != NULL; t = t->beneath)
2683 {
2684 if (t->to_follow_fork != NULL)
2685 {
2686 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2687
2688 if (targetdebug)
2689 fprintf_unfiltered (gdb_stdlog,
2690 "target_follow_fork (%d, %d) = %d\n",
2691 follow_child, detach_fork, retval);
2692 return retval;
2693 }
2694 }
2695
2696 /* Some target returned a fork event, but did not know how to follow it. */
2697 internal_error (__FILE__, __LINE__,
2698 _("could not find a target to follow fork"));
2699 }
2700
2701 void
2702 target_mourn_inferior (void)
2703 {
2704 struct target_ops *t;
2705
2706 for (t = current_target.beneath; t != NULL; t = t->beneath)
2707 {
2708 if (t->to_mourn_inferior != NULL)
2709 {
2710 t->to_mourn_inferior (t);
2711 if (targetdebug)
2712 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2713
2714 /* We no longer need to keep handles on any of the object files.
2715 Make sure to release them to avoid unnecessarily locking any
2716 of them while we're not actually debugging. */
2717 bfd_cache_close_all ();
2718
2719 return;
2720 }
2721 }
2722
2723 internal_error (__FILE__, __LINE__,
2724 _("could not find a target to follow mourn inferior"));
2725 }
2726
2727 /* Look for a target which can describe architectural features, starting
2728 from TARGET. If we find one, return its description. */
2729
2730 const struct target_desc *
2731 target_read_description (struct target_ops *target)
2732 {
2733 struct target_ops *t;
2734
2735 for (t = target; t != NULL; t = t->beneath)
2736 if (t->to_read_description != NULL)
2737 {
2738 const struct target_desc *tdesc;
2739
2740 tdesc = t->to_read_description (t);
2741 if (tdesc)
2742 return tdesc;
2743 }
2744
2745 return NULL;
2746 }
2747
2748 /* The default implementation of to_search_memory.
2749 This implements a basic search of memory, reading target memory and
2750 performing the search here (as opposed to performing the search in on the
2751 target side with, for example, gdbserver). */
2752
2753 int
2754 simple_search_memory (struct target_ops *ops,
2755 CORE_ADDR start_addr, ULONGEST search_space_len,
2756 const gdb_byte *pattern, ULONGEST pattern_len,
2757 CORE_ADDR *found_addrp)
2758 {
2759 /* NOTE: also defined in find.c testcase. */
2760 #define SEARCH_CHUNK_SIZE 16000
2761 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2762 /* Buffer to hold memory contents for searching. */
2763 gdb_byte *search_buf;
2764 unsigned search_buf_size;
2765 struct cleanup *old_cleanups;
2766
2767 search_buf_size = chunk_size + pattern_len - 1;
2768
2769 /* No point in trying to allocate a buffer larger than the search space. */
2770 if (search_space_len < search_buf_size)
2771 search_buf_size = search_space_len;
2772
2773 search_buf = malloc (search_buf_size);
2774 if (search_buf == NULL)
2775 error (_("Unable to allocate memory to perform the search."));
2776 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2777
2778 /* Prime the search buffer. */
2779
2780 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2781 search_buf, start_addr, search_buf_size) != search_buf_size)
2782 {
2783 warning (_("Unable to access %s bytes of target "
2784 "memory at %s, halting search."),
2785 pulongest (search_buf_size), hex_string (start_addr));
2786 do_cleanups (old_cleanups);
2787 return -1;
2788 }
2789
2790 /* Perform the search.
2791
2792 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2793 When we've scanned N bytes we copy the trailing bytes to the start and
2794 read in another N bytes. */
2795
2796 while (search_space_len >= pattern_len)
2797 {
2798 gdb_byte *found_ptr;
2799 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2800
2801 found_ptr = memmem (search_buf, nr_search_bytes,
2802 pattern, pattern_len);
2803
2804 if (found_ptr != NULL)
2805 {
2806 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2807
2808 *found_addrp = found_addr;
2809 do_cleanups (old_cleanups);
2810 return 1;
2811 }
2812
2813 /* Not found in this chunk, skip to next chunk. */
2814
2815 /* Don't let search_space_len wrap here, it's unsigned. */
2816 if (search_space_len >= chunk_size)
2817 search_space_len -= chunk_size;
2818 else
2819 search_space_len = 0;
2820
2821 if (search_space_len >= pattern_len)
2822 {
2823 unsigned keep_len = search_buf_size - chunk_size;
2824 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2825 int nr_to_read;
2826
2827 /* Copy the trailing part of the previous iteration to the front
2828 of the buffer for the next iteration. */
2829 gdb_assert (keep_len == pattern_len - 1);
2830 memcpy (search_buf, search_buf + chunk_size, keep_len);
2831
2832 nr_to_read = min (search_space_len - keep_len, chunk_size);
2833
2834 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2835 search_buf + keep_len, read_addr,
2836 nr_to_read) != nr_to_read)
2837 {
2838 warning (_("Unable to access %s bytes of target "
2839 "memory at %s, halting search."),
2840 plongest (nr_to_read),
2841 hex_string (read_addr));
2842 do_cleanups (old_cleanups);
2843 return -1;
2844 }
2845
2846 start_addr += chunk_size;
2847 }
2848 }
2849
2850 /* Not found. */
2851
2852 do_cleanups (old_cleanups);
2853 return 0;
2854 }
2855
2856 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2857 sequence of bytes in PATTERN with length PATTERN_LEN.
2858
2859 The result is 1 if found, 0 if not found, and -1 if there was an error
2860 requiring halting of the search (e.g. memory read error).
2861 If the pattern is found the address is recorded in FOUND_ADDRP. */
2862
2863 int
2864 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2865 const gdb_byte *pattern, ULONGEST pattern_len,
2866 CORE_ADDR *found_addrp)
2867 {
2868 struct target_ops *t;
2869 int found;
2870
2871 /* We don't use INHERIT to set current_target.to_search_memory,
2872 so we have to scan the target stack and handle targetdebug
2873 ourselves. */
2874
2875 if (targetdebug)
2876 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2877 hex_string (start_addr));
2878
2879 for (t = current_target.beneath; t != NULL; t = t->beneath)
2880 if (t->to_search_memory != NULL)
2881 break;
2882
2883 if (t != NULL)
2884 {
2885 found = t->to_search_memory (t, start_addr, search_space_len,
2886 pattern, pattern_len, found_addrp);
2887 }
2888 else
2889 {
2890 /* If a special version of to_search_memory isn't available, use the
2891 simple version. */
2892 found = simple_search_memory (current_target.beneath,
2893 start_addr, search_space_len,
2894 pattern, pattern_len, found_addrp);
2895 }
2896
2897 if (targetdebug)
2898 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2899
2900 return found;
2901 }
2902
2903 /* Look through the currently pushed targets. If none of them will
2904 be able to restart the currently running process, issue an error
2905 message. */
2906
2907 void
2908 target_require_runnable (void)
2909 {
2910 struct target_ops *t;
2911
2912 for (t = target_stack; t != NULL; t = t->beneath)
2913 {
2914 /* If this target knows how to create a new program, then
2915 assume we will still be able to after killing the current
2916 one. Either killing and mourning will not pop T, or else
2917 find_default_run_target will find it again. */
2918 if (t->to_create_inferior != NULL)
2919 return;
2920
2921 /* Do not worry about thread_stratum targets that can not
2922 create inferiors. Assume they will be pushed again if
2923 necessary, and continue to the process_stratum. */
2924 if (t->to_stratum == thread_stratum
2925 || t->to_stratum == arch_stratum)
2926 continue;
2927
2928 error (_("The \"%s\" target does not support \"run\". "
2929 "Try \"help target\" or \"continue\"."),
2930 t->to_shortname);
2931 }
2932
2933 /* This function is only called if the target is running. In that
2934 case there should have been a process_stratum target and it
2935 should either know how to create inferiors, or not... */
2936 internal_error (__FILE__, __LINE__, _("No targets found"));
2937 }
2938
2939 /* Look through the list of possible targets for a target that can
2940 execute a run or attach command without any other data. This is
2941 used to locate the default process stratum.
2942
2943 If DO_MESG is not NULL, the result is always valid (error() is
2944 called for errors); else, return NULL on error. */
2945
2946 static struct target_ops *
2947 find_default_run_target (char *do_mesg)
2948 {
2949 struct target_ops **t;
2950 struct target_ops *runable = NULL;
2951 int count;
2952
2953 count = 0;
2954
2955 for (t = target_structs; t < target_structs + target_struct_size;
2956 ++t)
2957 {
2958 if ((*t)->to_can_run && target_can_run (*t))
2959 {
2960 runable = *t;
2961 ++count;
2962 }
2963 }
2964
2965 if (count != 1)
2966 {
2967 if (do_mesg)
2968 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2969 else
2970 return NULL;
2971 }
2972
2973 return runable;
2974 }
2975
2976 void
2977 find_default_attach (struct target_ops *ops, char *args, int from_tty)
2978 {
2979 struct target_ops *t;
2980
2981 t = find_default_run_target ("attach");
2982 (t->to_attach) (t, args, from_tty);
2983 return;
2984 }
2985
2986 void
2987 find_default_create_inferior (struct target_ops *ops,
2988 char *exec_file, char *allargs, char **env,
2989 int from_tty)
2990 {
2991 struct target_ops *t;
2992
2993 t = find_default_run_target ("run");
2994 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
2995 return;
2996 }
2997
2998 static int
2999 find_default_can_async_p (struct target_ops *ignore)
3000 {
3001 struct target_ops *t;
3002
3003 /* This may be called before the target is pushed on the stack;
3004 look for the default process stratum. If there's none, gdb isn't
3005 configured with a native debugger, and target remote isn't
3006 connected yet. */
3007 t = find_default_run_target (NULL);
3008 if (t && t->to_can_async_p != delegate_can_async_p)
3009 return (t->to_can_async_p) (t);
3010 return 0;
3011 }
3012
3013 static int
3014 find_default_is_async_p (struct target_ops *ignore)
3015 {
3016 struct target_ops *t;
3017
3018 /* This may be called before the target is pushed on the stack;
3019 look for the default process stratum. If there's none, gdb isn't
3020 configured with a native debugger, and target remote isn't
3021 connected yet. */
3022 t = find_default_run_target (NULL);
3023 if (t && t->to_is_async_p != delegate_is_async_p)
3024 return (t->to_is_async_p) (t);
3025 return 0;
3026 }
3027
3028 static int
3029 find_default_supports_non_stop (struct target_ops *self)
3030 {
3031 struct target_ops *t;
3032
3033 t = find_default_run_target (NULL);
3034 if (t && t->to_supports_non_stop)
3035 return (t->to_supports_non_stop) (t);
3036 return 0;
3037 }
3038
3039 int
3040 target_supports_non_stop (void)
3041 {
3042 struct target_ops *t;
3043
3044 for (t = &current_target; t != NULL; t = t->beneath)
3045 if (t->to_supports_non_stop)
3046 return t->to_supports_non_stop (t);
3047
3048 return 0;
3049 }
3050
3051 /* Implement the "info proc" command. */
3052
3053 int
3054 target_info_proc (char *args, enum info_proc_what what)
3055 {
3056 struct target_ops *t;
3057
3058 /* If we're already connected to something that can get us OS
3059 related data, use it. Otherwise, try using the native
3060 target. */
3061 if (current_target.to_stratum >= process_stratum)
3062 t = current_target.beneath;
3063 else
3064 t = find_default_run_target (NULL);
3065
3066 for (; t != NULL; t = t->beneath)
3067 {
3068 if (t->to_info_proc != NULL)
3069 {
3070 t->to_info_proc (t, args, what);
3071
3072 if (targetdebug)
3073 fprintf_unfiltered (gdb_stdlog,
3074 "target_info_proc (\"%s\", %d)\n", args, what);
3075
3076 return 1;
3077 }
3078 }
3079
3080 return 0;
3081 }
3082
3083 static int
3084 find_default_supports_disable_randomization (struct target_ops *self)
3085 {
3086 struct target_ops *t;
3087
3088 t = find_default_run_target (NULL);
3089 if (t && t->to_supports_disable_randomization)
3090 return (t->to_supports_disable_randomization) (t);
3091 return 0;
3092 }
3093
3094 int
3095 target_supports_disable_randomization (void)
3096 {
3097 struct target_ops *t;
3098
3099 for (t = &current_target; t != NULL; t = t->beneath)
3100 if (t->to_supports_disable_randomization)
3101 return t->to_supports_disable_randomization (t);
3102
3103 return 0;
3104 }
3105
3106 char *
3107 target_get_osdata (const char *type)
3108 {
3109 struct target_ops *t;
3110
3111 /* If we're already connected to something that can get us OS
3112 related data, use it. Otherwise, try using the native
3113 target. */
3114 if (current_target.to_stratum >= process_stratum)
3115 t = current_target.beneath;
3116 else
3117 t = find_default_run_target ("get OS data");
3118
3119 if (!t)
3120 return NULL;
3121
3122 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3123 }
3124
3125 /* Determine the current address space of thread PTID. */
3126
3127 struct address_space *
3128 target_thread_address_space (ptid_t ptid)
3129 {
3130 struct address_space *aspace;
3131 struct inferior *inf;
3132 struct target_ops *t;
3133
3134 for (t = current_target.beneath; t != NULL; t = t->beneath)
3135 {
3136 if (t->to_thread_address_space != NULL)
3137 {
3138 aspace = t->to_thread_address_space (t, ptid);
3139 gdb_assert (aspace);
3140
3141 if (targetdebug)
3142 fprintf_unfiltered (gdb_stdlog,
3143 "target_thread_address_space (%s) = %d\n",
3144 target_pid_to_str (ptid),
3145 address_space_num (aspace));
3146 return aspace;
3147 }
3148 }
3149
3150 /* Fall-back to the "main" address space of the inferior. */
3151 inf = find_inferior_pid (ptid_get_pid (ptid));
3152
3153 if (inf == NULL || inf->aspace == NULL)
3154 internal_error (__FILE__, __LINE__,
3155 _("Can't determine the current "
3156 "address space of thread %s\n"),
3157 target_pid_to_str (ptid));
3158
3159 return inf->aspace;
3160 }
3161
3162
3163 /* Target file operations. */
3164
3165 static struct target_ops *
3166 default_fileio_target (void)
3167 {
3168 /* If we're already connected to something that can perform
3169 file I/O, use it. Otherwise, try using the native target. */
3170 if (current_target.to_stratum >= process_stratum)
3171 return current_target.beneath;
3172 else
3173 return find_default_run_target ("file I/O");
3174 }
3175
3176 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3177 target file descriptor, or -1 if an error occurs (and set
3178 *TARGET_ERRNO). */
3179 int
3180 target_fileio_open (const char *filename, int flags, int mode,
3181 int *target_errno)
3182 {
3183 struct target_ops *t;
3184
3185 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3186 {
3187 if (t->to_fileio_open != NULL)
3188 {
3189 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
3190
3191 if (targetdebug)
3192 fprintf_unfiltered (gdb_stdlog,
3193 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3194 filename, flags, mode,
3195 fd, fd != -1 ? 0 : *target_errno);
3196 return fd;
3197 }
3198 }
3199
3200 *target_errno = FILEIO_ENOSYS;
3201 return -1;
3202 }
3203
3204 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3205 Return the number of bytes written, or -1 if an error occurs
3206 (and set *TARGET_ERRNO). */
3207 int
3208 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3209 ULONGEST offset, int *target_errno)
3210 {
3211 struct target_ops *t;
3212
3213 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3214 {
3215 if (t->to_fileio_pwrite != NULL)
3216 {
3217 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
3218 target_errno);
3219
3220 if (targetdebug)
3221 fprintf_unfiltered (gdb_stdlog,
3222 "target_fileio_pwrite (%d,...,%d,%s) "
3223 "= %d (%d)\n",
3224 fd, len, pulongest (offset),
3225 ret, ret != -1 ? 0 : *target_errno);
3226 return ret;
3227 }
3228 }
3229
3230 *target_errno = FILEIO_ENOSYS;
3231 return -1;
3232 }
3233
3234 /* Read up to LEN bytes FD on the target into READ_BUF.
3235 Return the number of bytes read, or -1 if an error occurs
3236 (and set *TARGET_ERRNO). */
3237 int
3238 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3239 ULONGEST offset, int *target_errno)
3240 {
3241 struct target_ops *t;
3242
3243 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3244 {
3245 if (t->to_fileio_pread != NULL)
3246 {
3247 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3248 target_errno);
3249
3250 if (targetdebug)
3251 fprintf_unfiltered (gdb_stdlog,
3252 "target_fileio_pread (%d,...,%d,%s) "
3253 "= %d (%d)\n",
3254 fd, len, pulongest (offset),
3255 ret, ret != -1 ? 0 : *target_errno);
3256 return ret;
3257 }
3258 }
3259
3260 *target_errno = FILEIO_ENOSYS;
3261 return -1;
3262 }
3263
3264 /* Close FD on the target. Return 0, or -1 if an error occurs
3265 (and set *TARGET_ERRNO). */
3266 int
3267 target_fileio_close (int fd, int *target_errno)
3268 {
3269 struct target_ops *t;
3270
3271 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3272 {
3273 if (t->to_fileio_close != NULL)
3274 {
3275 int ret = t->to_fileio_close (t, fd, target_errno);
3276
3277 if (targetdebug)
3278 fprintf_unfiltered (gdb_stdlog,
3279 "target_fileio_close (%d) = %d (%d)\n",
3280 fd, ret, ret != -1 ? 0 : *target_errno);
3281 return ret;
3282 }
3283 }
3284
3285 *target_errno = FILEIO_ENOSYS;
3286 return -1;
3287 }
3288
3289 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3290 occurs (and set *TARGET_ERRNO). */
3291 int
3292 target_fileio_unlink (const char *filename, int *target_errno)
3293 {
3294 struct target_ops *t;
3295
3296 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3297 {
3298 if (t->to_fileio_unlink != NULL)
3299 {
3300 int ret = t->to_fileio_unlink (t, filename, target_errno);
3301
3302 if (targetdebug)
3303 fprintf_unfiltered (gdb_stdlog,
3304 "target_fileio_unlink (%s) = %d (%d)\n",
3305 filename, ret, ret != -1 ? 0 : *target_errno);
3306 return ret;
3307 }
3308 }
3309
3310 *target_errno = FILEIO_ENOSYS;
3311 return -1;
3312 }
3313
3314 /* Read value of symbolic link FILENAME on the target. Return a
3315 null-terminated string allocated via xmalloc, or NULL if an error
3316 occurs (and set *TARGET_ERRNO). */
3317 char *
3318 target_fileio_readlink (const char *filename, int *target_errno)
3319 {
3320 struct target_ops *t;
3321
3322 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3323 {
3324 if (t->to_fileio_readlink != NULL)
3325 {
3326 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3327
3328 if (targetdebug)
3329 fprintf_unfiltered (gdb_stdlog,
3330 "target_fileio_readlink (%s) = %s (%d)\n",
3331 filename, ret? ret : "(nil)",
3332 ret? 0 : *target_errno);
3333 return ret;
3334 }
3335 }
3336
3337 *target_errno = FILEIO_ENOSYS;
3338 return NULL;
3339 }
3340
3341 static void
3342 target_fileio_close_cleanup (void *opaque)
3343 {
3344 int fd = *(int *) opaque;
3345 int target_errno;
3346
3347 target_fileio_close (fd, &target_errno);
3348 }
3349
3350 /* Read target file FILENAME. Store the result in *BUF_P and
3351 return the size of the transferred data. PADDING additional bytes are
3352 available in *BUF_P. This is a helper function for
3353 target_fileio_read_alloc; see the declaration of that function for more
3354 information. */
3355
3356 static LONGEST
3357 target_fileio_read_alloc_1 (const char *filename,
3358 gdb_byte **buf_p, int padding)
3359 {
3360 struct cleanup *close_cleanup;
3361 size_t buf_alloc, buf_pos;
3362 gdb_byte *buf;
3363 LONGEST n;
3364 int fd;
3365 int target_errno;
3366
3367 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3368 if (fd == -1)
3369 return -1;
3370
3371 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3372
3373 /* Start by reading up to 4K at a time. The target will throttle
3374 this number down if necessary. */
3375 buf_alloc = 4096;
3376 buf = xmalloc (buf_alloc);
3377 buf_pos = 0;
3378 while (1)
3379 {
3380 n = target_fileio_pread (fd, &buf[buf_pos],
3381 buf_alloc - buf_pos - padding, buf_pos,
3382 &target_errno);
3383 if (n < 0)
3384 {
3385 /* An error occurred. */
3386 do_cleanups (close_cleanup);
3387 xfree (buf);
3388 return -1;
3389 }
3390 else if (n == 0)
3391 {
3392 /* Read all there was. */
3393 do_cleanups (close_cleanup);
3394 if (buf_pos == 0)
3395 xfree (buf);
3396 else
3397 *buf_p = buf;
3398 return buf_pos;
3399 }
3400
3401 buf_pos += n;
3402
3403 /* If the buffer is filling up, expand it. */
3404 if (buf_alloc < buf_pos * 2)
3405 {
3406 buf_alloc *= 2;
3407 buf = xrealloc (buf, buf_alloc);
3408 }
3409
3410 QUIT;
3411 }
3412 }
3413
3414 /* Read target file FILENAME. Store the result in *BUF_P and return
3415 the size of the transferred data. See the declaration in "target.h"
3416 function for more information about the return value. */
3417
3418 LONGEST
3419 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3420 {
3421 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3422 }
3423
3424 /* Read target file FILENAME. The result is NUL-terminated and
3425 returned as a string, allocated using xmalloc. If an error occurs
3426 or the transfer is unsupported, NULL is returned. Empty objects
3427 are returned as allocated but empty strings. A warning is issued
3428 if the result contains any embedded NUL bytes. */
3429
3430 char *
3431 target_fileio_read_stralloc (const char *filename)
3432 {
3433 gdb_byte *buffer;
3434 char *bufstr;
3435 LONGEST i, transferred;
3436
3437 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3438 bufstr = (char *) buffer;
3439
3440 if (transferred < 0)
3441 return NULL;
3442
3443 if (transferred == 0)
3444 return xstrdup ("");
3445
3446 bufstr[transferred] = 0;
3447
3448 /* Check for embedded NUL bytes; but allow trailing NULs. */
3449 for (i = strlen (bufstr); i < transferred; i++)
3450 if (bufstr[i] != 0)
3451 {
3452 warning (_("target file %s "
3453 "contained unexpected null characters"),
3454 filename);
3455 break;
3456 }
3457
3458 return bufstr;
3459 }
3460
3461
3462 static int
3463 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3464 CORE_ADDR addr, int len)
3465 {
3466 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3467 }
3468
3469 static int
3470 default_watchpoint_addr_within_range (struct target_ops *target,
3471 CORE_ADDR addr,
3472 CORE_ADDR start, int length)
3473 {
3474 return addr >= start && addr < start + length;
3475 }
3476
3477 static struct gdbarch *
3478 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3479 {
3480 return target_gdbarch ();
3481 }
3482
3483 static int
3484 return_zero (void)
3485 {
3486 return 0;
3487 }
3488
3489 static void *
3490 return_null (void)
3491 {
3492 return 0;
3493 }
3494
3495 /*
3496 * Find the next target down the stack from the specified target.
3497 */
3498
3499 struct target_ops *
3500 find_target_beneath (struct target_ops *t)
3501 {
3502 return t->beneath;
3503 }
3504
3505 /* See target.h. */
3506
3507 struct target_ops *
3508 find_target_at (enum strata stratum)
3509 {
3510 struct target_ops *t;
3511
3512 for (t = current_target.beneath; t != NULL; t = t->beneath)
3513 if (t->to_stratum == stratum)
3514 return t;
3515
3516 return NULL;
3517 }
3518
3519 \f
3520 /* The inferior process has died. Long live the inferior! */
3521
3522 void
3523 generic_mourn_inferior (void)
3524 {
3525 ptid_t ptid;
3526
3527 ptid = inferior_ptid;
3528 inferior_ptid = null_ptid;
3529
3530 /* Mark breakpoints uninserted in case something tries to delete a
3531 breakpoint while we delete the inferior's threads (which would
3532 fail, since the inferior is long gone). */
3533 mark_breakpoints_out ();
3534
3535 if (!ptid_equal (ptid, null_ptid))
3536 {
3537 int pid = ptid_get_pid (ptid);
3538 exit_inferior (pid);
3539 }
3540
3541 /* Note this wipes step-resume breakpoints, so needs to be done
3542 after exit_inferior, which ends up referencing the step-resume
3543 breakpoints through clear_thread_inferior_resources. */
3544 breakpoint_init_inferior (inf_exited);
3545
3546 registers_changed ();
3547
3548 reopen_exec_file ();
3549 reinit_frame_cache ();
3550
3551 if (deprecated_detach_hook)
3552 deprecated_detach_hook ();
3553 }
3554 \f
3555 /* Convert a normal process ID to a string. Returns the string in a
3556 static buffer. */
3557
3558 char *
3559 normal_pid_to_str (ptid_t ptid)
3560 {
3561 static char buf[32];
3562
3563 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3564 return buf;
3565 }
3566
3567 static char *
3568 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3569 {
3570 return normal_pid_to_str (ptid);
3571 }
3572
3573 /* Error-catcher for target_find_memory_regions. */
3574 static int
3575 dummy_find_memory_regions (struct target_ops *self,
3576 find_memory_region_ftype ignore1, void *ignore2)
3577 {
3578 error (_("Command not implemented for this target."));
3579 return 0;
3580 }
3581
3582 /* Error-catcher for target_make_corefile_notes. */
3583 static char *
3584 dummy_make_corefile_notes (struct target_ops *self,
3585 bfd *ignore1, int *ignore2)
3586 {
3587 error (_("Command not implemented for this target."));
3588 return NULL;
3589 }
3590
3591 /* Set up the handful of non-empty slots needed by the dummy target
3592 vector. */
3593
3594 static void
3595 init_dummy_target (void)
3596 {
3597 dummy_target.to_shortname = "None";
3598 dummy_target.to_longname = "None";
3599 dummy_target.to_doc = "";
3600 dummy_target.to_create_inferior = find_default_create_inferior;
3601 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3602 dummy_target.to_supports_disable_randomization
3603 = find_default_supports_disable_randomization;
3604 dummy_target.to_pid_to_str = dummy_pid_to_str;
3605 dummy_target.to_stratum = dummy_stratum;
3606 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3607 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3608 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3609 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3610 dummy_target.to_has_execution
3611 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3612 dummy_target.to_magic = OPS_MAGIC;
3613
3614 install_dummy_methods (&dummy_target);
3615 }
3616 \f
3617 static void
3618 debug_to_open (char *args, int from_tty)
3619 {
3620 debug_target.to_open (args, from_tty);
3621
3622 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3623 }
3624
3625 void
3626 target_close (struct target_ops *targ)
3627 {
3628 gdb_assert (!target_is_pushed (targ));
3629
3630 if (targ->to_xclose != NULL)
3631 targ->to_xclose (targ);
3632 else if (targ->to_close != NULL)
3633 targ->to_close (targ);
3634
3635 if (targetdebug)
3636 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3637 }
3638
3639 void
3640 target_attach (char *args, int from_tty)
3641 {
3642 current_target.to_attach (&current_target, args, from_tty);
3643 if (targetdebug)
3644 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3645 args, from_tty);
3646 }
3647
3648 int
3649 target_thread_alive (ptid_t ptid)
3650 {
3651 struct target_ops *t;
3652
3653 for (t = current_target.beneath; t != NULL; t = t->beneath)
3654 {
3655 if (t->to_thread_alive != NULL)
3656 {
3657 int retval;
3658
3659 retval = t->to_thread_alive (t, ptid);
3660 if (targetdebug)
3661 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3662 ptid_get_pid (ptid), retval);
3663
3664 return retval;
3665 }
3666 }
3667
3668 return 0;
3669 }
3670
3671 void
3672 target_find_new_threads (void)
3673 {
3674 struct target_ops *t;
3675
3676 for (t = current_target.beneath; t != NULL; t = t->beneath)
3677 {
3678 if (t->to_find_new_threads != NULL)
3679 {
3680 t->to_find_new_threads (t);
3681 if (targetdebug)
3682 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3683
3684 return;
3685 }
3686 }
3687 }
3688
3689 void
3690 target_stop (ptid_t ptid)
3691 {
3692 if (!may_stop)
3693 {
3694 warning (_("May not interrupt or stop the target, ignoring attempt"));
3695 return;
3696 }
3697
3698 (*current_target.to_stop) (&current_target, ptid);
3699 }
3700
3701 static void
3702 debug_to_post_attach (struct target_ops *self, int pid)
3703 {
3704 debug_target.to_post_attach (&debug_target, pid);
3705
3706 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3707 }
3708
3709 /* Concatenate ELEM to LIST, a comma separate list, and return the
3710 result. The LIST incoming argument is released. */
3711
3712 static char *
3713 str_comma_list_concat_elem (char *list, const char *elem)
3714 {
3715 if (list == NULL)
3716 return xstrdup (elem);
3717 else
3718 return reconcat (list, list, ", ", elem, (char *) NULL);
3719 }
3720
3721 /* Helper for target_options_to_string. If OPT is present in
3722 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3723 Returns the new resulting string. OPT is removed from
3724 TARGET_OPTIONS. */
3725
3726 static char *
3727 do_option (int *target_options, char *ret,
3728 int opt, char *opt_str)
3729 {
3730 if ((*target_options & opt) != 0)
3731 {
3732 ret = str_comma_list_concat_elem (ret, opt_str);
3733 *target_options &= ~opt;
3734 }
3735
3736 return ret;
3737 }
3738
3739 char *
3740 target_options_to_string (int target_options)
3741 {
3742 char *ret = NULL;
3743
3744 #define DO_TARG_OPTION(OPT) \
3745 ret = do_option (&target_options, ret, OPT, #OPT)
3746
3747 DO_TARG_OPTION (TARGET_WNOHANG);
3748
3749 if (target_options != 0)
3750 ret = str_comma_list_concat_elem (ret, "unknown???");
3751
3752 if (ret == NULL)
3753 ret = xstrdup ("");
3754 return ret;
3755 }
3756
3757 static void
3758 debug_print_register (const char * func,
3759 struct regcache *regcache, int regno)
3760 {
3761 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3762
3763 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3764 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3765 && gdbarch_register_name (gdbarch, regno) != NULL
3766 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3767 fprintf_unfiltered (gdb_stdlog, "(%s)",
3768 gdbarch_register_name (gdbarch, regno));
3769 else
3770 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3771 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3772 {
3773 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3774 int i, size = register_size (gdbarch, regno);
3775 gdb_byte buf[MAX_REGISTER_SIZE];
3776
3777 regcache_raw_collect (regcache, regno, buf);
3778 fprintf_unfiltered (gdb_stdlog, " = ");
3779 for (i = 0; i < size; i++)
3780 {
3781 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3782 }
3783 if (size <= sizeof (LONGEST))
3784 {
3785 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3786
3787 fprintf_unfiltered (gdb_stdlog, " %s %s",
3788 core_addr_to_string_nz (val), plongest (val));
3789 }
3790 }
3791 fprintf_unfiltered (gdb_stdlog, "\n");
3792 }
3793
3794 void
3795 target_fetch_registers (struct regcache *regcache, int regno)
3796 {
3797 struct target_ops *t;
3798
3799 for (t = current_target.beneath; t != NULL; t = t->beneath)
3800 {
3801 if (t->to_fetch_registers != NULL)
3802 {
3803 t->to_fetch_registers (t, regcache, regno);
3804 if (targetdebug)
3805 debug_print_register ("target_fetch_registers", regcache, regno);
3806 return;
3807 }
3808 }
3809 }
3810
3811 void
3812 target_store_registers (struct regcache *regcache, int regno)
3813 {
3814 struct target_ops *t;
3815
3816 if (!may_write_registers)
3817 error (_("Writing to registers is not allowed (regno %d)"), regno);
3818
3819 current_target.to_store_registers (&current_target, regcache, regno);
3820 if (targetdebug)
3821 {
3822 debug_print_register ("target_store_registers", regcache, regno);
3823 }
3824 }
3825
3826 int
3827 target_core_of_thread (ptid_t ptid)
3828 {
3829 struct target_ops *t;
3830
3831 for (t = current_target.beneath; t != NULL; t = t->beneath)
3832 {
3833 if (t->to_core_of_thread != NULL)
3834 {
3835 int retval = t->to_core_of_thread (t, ptid);
3836
3837 if (targetdebug)
3838 fprintf_unfiltered (gdb_stdlog,
3839 "target_core_of_thread (%d) = %d\n",
3840 ptid_get_pid (ptid), retval);
3841 return retval;
3842 }
3843 }
3844
3845 return -1;
3846 }
3847
3848 int
3849 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3850 {
3851 struct target_ops *t;
3852
3853 for (t = current_target.beneath; t != NULL; t = t->beneath)
3854 {
3855 if (t->to_verify_memory != NULL)
3856 {
3857 int retval = t->to_verify_memory (t, data, memaddr, size);
3858
3859 if (targetdebug)
3860 fprintf_unfiltered (gdb_stdlog,
3861 "target_verify_memory (%s, %s) = %d\n",
3862 paddress (target_gdbarch (), memaddr),
3863 pulongest (size),
3864 retval);
3865 return retval;
3866 }
3867 }
3868
3869 tcomplain ();
3870 }
3871
3872 /* The documentation for this function is in its prototype declaration in
3873 target.h. */
3874
3875 int
3876 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3877 {
3878 struct target_ops *t;
3879
3880 for (t = current_target.beneath; t != NULL; t = t->beneath)
3881 if (t->to_insert_mask_watchpoint != NULL)
3882 {
3883 int ret;
3884
3885 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
3886
3887 if (targetdebug)
3888 fprintf_unfiltered (gdb_stdlog, "\
3889 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3890 core_addr_to_string (addr),
3891 core_addr_to_string (mask), rw, ret);
3892
3893 return ret;
3894 }
3895
3896 return 1;
3897 }
3898
3899 /* The documentation for this function is in its prototype declaration in
3900 target.h. */
3901
3902 int
3903 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3904 {
3905 struct target_ops *t;
3906
3907 for (t = current_target.beneath; t != NULL; t = t->beneath)
3908 if (t->to_remove_mask_watchpoint != NULL)
3909 {
3910 int ret;
3911
3912 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
3913
3914 if (targetdebug)
3915 fprintf_unfiltered (gdb_stdlog, "\
3916 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3917 core_addr_to_string (addr),
3918 core_addr_to_string (mask), rw, ret);
3919
3920 return ret;
3921 }
3922
3923 return 1;
3924 }
3925
3926 /* The documentation for this function is in its prototype declaration
3927 in target.h. */
3928
3929 int
3930 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3931 {
3932 struct target_ops *t;
3933
3934 for (t = current_target.beneath; t != NULL; t = t->beneath)
3935 if (t->to_masked_watch_num_registers != NULL)
3936 return t->to_masked_watch_num_registers (t, addr, mask);
3937
3938 return -1;
3939 }
3940
3941 /* The documentation for this function is in its prototype declaration
3942 in target.h. */
3943
3944 int
3945 target_ranged_break_num_registers (void)
3946 {
3947 struct target_ops *t;
3948
3949 for (t = current_target.beneath; t != NULL; t = t->beneath)
3950 if (t->to_ranged_break_num_registers != NULL)
3951 return t->to_ranged_break_num_registers (t);
3952
3953 return -1;
3954 }
3955
3956 /* See target.h. */
3957
3958 struct btrace_target_info *
3959 target_enable_btrace (ptid_t ptid)
3960 {
3961 struct target_ops *t;
3962
3963 for (t = current_target.beneath; t != NULL; t = t->beneath)
3964 if (t->to_enable_btrace != NULL)
3965 return t->to_enable_btrace (t, ptid);
3966
3967 tcomplain ();
3968 return NULL;
3969 }
3970
3971 /* See target.h. */
3972
3973 void
3974 target_disable_btrace (struct btrace_target_info *btinfo)
3975 {
3976 struct target_ops *t;
3977
3978 for (t = current_target.beneath; t != NULL; t = t->beneath)
3979 if (t->to_disable_btrace != NULL)
3980 {
3981 t->to_disable_btrace (t, btinfo);
3982 return;
3983 }
3984
3985 tcomplain ();
3986 }
3987
3988 /* See target.h. */
3989
3990 void
3991 target_teardown_btrace (struct btrace_target_info *btinfo)
3992 {
3993 struct target_ops *t;
3994
3995 for (t = current_target.beneath; t != NULL; t = t->beneath)
3996 if (t->to_teardown_btrace != NULL)
3997 {
3998 t->to_teardown_btrace (t, btinfo);
3999 return;
4000 }
4001
4002 tcomplain ();
4003 }
4004
4005 /* See target.h. */
4006
4007 enum btrace_error
4008 target_read_btrace (VEC (btrace_block_s) **btrace,
4009 struct btrace_target_info *btinfo,
4010 enum btrace_read_type type)
4011 {
4012 struct target_ops *t;
4013
4014 for (t = current_target.beneath; t != NULL; t = t->beneath)
4015 if (t->to_read_btrace != NULL)
4016 return t->to_read_btrace (t, btrace, btinfo, type);
4017
4018 tcomplain ();
4019 return BTRACE_ERR_NOT_SUPPORTED;
4020 }
4021
4022 /* See target.h. */
4023
4024 void
4025 target_stop_recording (void)
4026 {
4027 struct target_ops *t;
4028
4029 for (t = current_target.beneath; t != NULL; t = t->beneath)
4030 if (t->to_stop_recording != NULL)
4031 {
4032 t->to_stop_recording (t);
4033 return;
4034 }
4035
4036 /* This is optional. */
4037 }
4038
4039 /* See target.h. */
4040
4041 void
4042 target_info_record (void)
4043 {
4044 struct target_ops *t;
4045
4046 for (t = current_target.beneath; t != NULL; t = t->beneath)
4047 if (t->to_info_record != NULL)
4048 {
4049 t->to_info_record (t);
4050 return;
4051 }
4052
4053 tcomplain ();
4054 }
4055
4056 /* See target.h. */
4057
4058 void
4059 target_save_record (const char *filename)
4060 {
4061 struct target_ops *t;
4062
4063 for (t = current_target.beneath; t != NULL; t = t->beneath)
4064 if (t->to_save_record != NULL)
4065 {
4066 t->to_save_record (t, filename);
4067 return;
4068 }
4069
4070 tcomplain ();
4071 }
4072
4073 /* See target.h. */
4074
4075 int
4076 target_supports_delete_record (void)
4077 {
4078 struct target_ops *t;
4079
4080 for (t = current_target.beneath; t != NULL; t = t->beneath)
4081 if (t->to_delete_record != NULL)
4082 return 1;
4083
4084 return 0;
4085 }
4086
4087 /* See target.h. */
4088
4089 void
4090 target_delete_record (void)
4091 {
4092 struct target_ops *t;
4093
4094 for (t = current_target.beneath; t != NULL; t = t->beneath)
4095 if (t->to_delete_record != NULL)
4096 {
4097 t->to_delete_record (t);
4098 return;
4099 }
4100
4101 tcomplain ();
4102 }
4103
4104 /* See target.h. */
4105
4106 int
4107 target_record_is_replaying (void)
4108 {
4109 struct target_ops *t;
4110
4111 for (t = current_target.beneath; t != NULL; t = t->beneath)
4112 if (t->to_record_is_replaying != NULL)
4113 return t->to_record_is_replaying (t);
4114
4115 return 0;
4116 }
4117
4118 /* See target.h. */
4119
4120 void
4121 target_goto_record_begin (void)
4122 {
4123 struct target_ops *t;
4124
4125 for (t = current_target.beneath; t != NULL; t = t->beneath)
4126 if (t->to_goto_record_begin != NULL)
4127 {
4128 t->to_goto_record_begin (t);
4129 return;
4130 }
4131
4132 tcomplain ();
4133 }
4134
4135 /* See target.h. */
4136
4137 void
4138 target_goto_record_end (void)
4139 {
4140 struct target_ops *t;
4141
4142 for (t = current_target.beneath; t != NULL; t = t->beneath)
4143 if (t->to_goto_record_end != NULL)
4144 {
4145 t->to_goto_record_end (t);
4146 return;
4147 }
4148
4149 tcomplain ();
4150 }
4151
4152 /* See target.h. */
4153
4154 void
4155 target_goto_record (ULONGEST insn)
4156 {
4157 struct target_ops *t;
4158
4159 for (t = current_target.beneath; t != NULL; t = t->beneath)
4160 if (t->to_goto_record != NULL)
4161 {
4162 t->to_goto_record (t, insn);
4163 return;
4164 }
4165
4166 tcomplain ();
4167 }
4168
4169 /* See target.h. */
4170
4171 void
4172 target_insn_history (int size, int flags)
4173 {
4174 struct target_ops *t;
4175
4176 for (t = current_target.beneath; t != NULL; t = t->beneath)
4177 if (t->to_insn_history != NULL)
4178 {
4179 t->to_insn_history (t, size, flags);
4180 return;
4181 }
4182
4183 tcomplain ();
4184 }
4185
4186 /* See target.h. */
4187
4188 void
4189 target_insn_history_from (ULONGEST from, int size, int flags)
4190 {
4191 struct target_ops *t;
4192
4193 for (t = current_target.beneath; t != NULL; t = t->beneath)
4194 if (t->to_insn_history_from != NULL)
4195 {
4196 t->to_insn_history_from (t, from, size, flags);
4197 return;
4198 }
4199
4200 tcomplain ();
4201 }
4202
4203 /* See target.h. */
4204
4205 void
4206 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4207 {
4208 struct target_ops *t;
4209
4210 for (t = current_target.beneath; t != NULL; t = t->beneath)
4211 if (t->to_insn_history_range != NULL)
4212 {
4213 t->to_insn_history_range (t, begin, end, flags);
4214 return;
4215 }
4216
4217 tcomplain ();
4218 }
4219
4220 /* See target.h. */
4221
4222 void
4223 target_call_history (int size, int flags)
4224 {
4225 struct target_ops *t;
4226
4227 for (t = current_target.beneath; t != NULL; t = t->beneath)
4228 if (t->to_call_history != NULL)
4229 {
4230 t->to_call_history (t, size, flags);
4231 return;
4232 }
4233
4234 tcomplain ();
4235 }
4236
4237 /* See target.h. */
4238
4239 void
4240 target_call_history_from (ULONGEST begin, int size, int flags)
4241 {
4242 struct target_ops *t;
4243
4244 for (t = current_target.beneath; t != NULL; t = t->beneath)
4245 if (t->to_call_history_from != NULL)
4246 {
4247 t->to_call_history_from (t, begin, size, flags);
4248 return;
4249 }
4250
4251 tcomplain ();
4252 }
4253
4254 /* See target.h. */
4255
4256 void
4257 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4258 {
4259 struct target_ops *t;
4260
4261 for (t = current_target.beneath; t != NULL; t = t->beneath)
4262 if (t->to_call_history_range != NULL)
4263 {
4264 t->to_call_history_range (t, begin, end, flags);
4265 return;
4266 }
4267
4268 tcomplain ();
4269 }
4270
4271 static void
4272 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4273 {
4274 debug_target.to_prepare_to_store (&debug_target, regcache);
4275
4276 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4277 }
4278
4279 /* See target.h. */
4280
4281 const struct frame_unwind *
4282 target_get_unwinder (void)
4283 {
4284 struct target_ops *t;
4285
4286 for (t = current_target.beneath; t != NULL; t = t->beneath)
4287 if (t->to_get_unwinder != NULL)
4288 return t->to_get_unwinder;
4289
4290 return NULL;
4291 }
4292
4293 /* See target.h. */
4294
4295 const struct frame_unwind *
4296 target_get_tailcall_unwinder (void)
4297 {
4298 struct target_ops *t;
4299
4300 for (t = current_target.beneath; t != NULL; t = t->beneath)
4301 if (t->to_get_tailcall_unwinder != NULL)
4302 return t->to_get_tailcall_unwinder;
4303
4304 return NULL;
4305 }
4306
4307 /* See target.h. */
4308
4309 CORE_ADDR
4310 forward_target_decr_pc_after_break (struct target_ops *ops,
4311 struct gdbarch *gdbarch)
4312 {
4313 for (; ops != NULL; ops = ops->beneath)
4314 if (ops->to_decr_pc_after_break != NULL)
4315 return ops->to_decr_pc_after_break (ops, gdbarch);
4316
4317 return gdbarch_decr_pc_after_break (gdbarch);
4318 }
4319
4320 /* See target.h. */
4321
4322 CORE_ADDR
4323 target_decr_pc_after_break (struct gdbarch *gdbarch)
4324 {
4325 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4326 }
4327
4328 static int
4329 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4330 int write, struct mem_attrib *attrib,
4331 struct target_ops *target)
4332 {
4333 int retval;
4334
4335 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4336 attrib, target);
4337
4338 fprintf_unfiltered (gdb_stdlog,
4339 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4340 paddress (target_gdbarch (), memaddr), len,
4341 write ? "write" : "read", retval);
4342
4343 if (retval > 0)
4344 {
4345 int i;
4346
4347 fputs_unfiltered (", bytes =", gdb_stdlog);
4348 for (i = 0; i < retval; i++)
4349 {
4350 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4351 {
4352 if (targetdebug < 2 && i > 0)
4353 {
4354 fprintf_unfiltered (gdb_stdlog, " ...");
4355 break;
4356 }
4357 fprintf_unfiltered (gdb_stdlog, "\n");
4358 }
4359
4360 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4361 }
4362 }
4363
4364 fputc_unfiltered ('\n', gdb_stdlog);
4365
4366 return retval;
4367 }
4368
4369 static void
4370 debug_to_files_info (struct target_ops *target)
4371 {
4372 debug_target.to_files_info (target);
4373
4374 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4375 }
4376
4377 static int
4378 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4379 struct bp_target_info *bp_tgt)
4380 {
4381 int retval;
4382
4383 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4384
4385 fprintf_unfiltered (gdb_stdlog,
4386 "target_insert_breakpoint (%s, xxx) = %ld\n",
4387 core_addr_to_string (bp_tgt->placed_address),
4388 (unsigned long) retval);
4389 return retval;
4390 }
4391
4392 static int
4393 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4394 struct bp_target_info *bp_tgt)
4395 {
4396 int retval;
4397
4398 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4399
4400 fprintf_unfiltered (gdb_stdlog,
4401 "target_remove_breakpoint (%s, xxx) = %ld\n",
4402 core_addr_to_string (bp_tgt->placed_address),
4403 (unsigned long) retval);
4404 return retval;
4405 }
4406
4407 static int
4408 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4409 int type, int cnt, int from_tty)
4410 {
4411 int retval;
4412
4413 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4414 type, cnt, from_tty);
4415
4416 fprintf_unfiltered (gdb_stdlog,
4417 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4418 (unsigned long) type,
4419 (unsigned long) cnt,
4420 (unsigned long) from_tty,
4421 (unsigned long) retval);
4422 return retval;
4423 }
4424
4425 static int
4426 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4427 CORE_ADDR addr, int len)
4428 {
4429 CORE_ADDR retval;
4430
4431 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4432 addr, len);
4433
4434 fprintf_unfiltered (gdb_stdlog,
4435 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4436 core_addr_to_string (addr), (unsigned long) len,
4437 core_addr_to_string (retval));
4438 return retval;
4439 }
4440
4441 static int
4442 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4443 CORE_ADDR addr, int len, int rw,
4444 struct expression *cond)
4445 {
4446 int retval;
4447
4448 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4449 addr, len,
4450 rw, cond);
4451
4452 fprintf_unfiltered (gdb_stdlog,
4453 "target_can_accel_watchpoint_condition "
4454 "(%s, %d, %d, %s) = %ld\n",
4455 core_addr_to_string (addr), len, rw,
4456 host_address_to_string (cond), (unsigned long) retval);
4457 return retval;
4458 }
4459
4460 static int
4461 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4462 {
4463 int retval;
4464
4465 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4466
4467 fprintf_unfiltered (gdb_stdlog,
4468 "target_stopped_by_watchpoint () = %ld\n",
4469 (unsigned long) retval);
4470 return retval;
4471 }
4472
4473 static int
4474 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4475 {
4476 int retval;
4477
4478 retval = debug_target.to_stopped_data_address (target, addr);
4479
4480 fprintf_unfiltered (gdb_stdlog,
4481 "target_stopped_data_address ([%s]) = %ld\n",
4482 core_addr_to_string (*addr),
4483 (unsigned long)retval);
4484 return retval;
4485 }
4486
4487 static int
4488 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4489 CORE_ADDR addr,
4490 CORE_ADDR start, int length)
4491 {
4492 int retval;
4493
4494 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4495 start, length);
4496
4497 fprintf_filtered (gdb_stdlog,
4498 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4499 core_addr_to_string (addr), core_addr_to_string (start),
4500 length, retval);
4501 return retval;
4502 }
4503
4504 static int
4505 debug_to_insert_hw_breakpoint (struct target_ops *self,
4506 struct gdbarch *gdbarch,
4507 struct bp_target_info *bp_tgt)
4508 {
4509 int retval;
4510
4511 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4512 gdbarch, bp_tgt);
4513
4514 fprintf_unfiltered (gdb_stdlog,
4515 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4516 core_addr_to_string (bp_tgt->placed_address),
4517 (unsigned long) retval);
4518 return retval;
4519 }
4520
4521 static int
4522 debug_to_remove_hw_breakpoint (struct target_ops *self,
4523 struct gdbarch *gdbarch,
4524 struct bp_target_info *bp_tgt)
4525 {
4526 int retval;
4527
4528 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4529 gdbarch, bp_tgt);
4530
4531 fprintf_unfiltered (gdb_stdlog,
4532 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4533 core_addr_to_string (bp_tgt->placed_address),
4534 (unsigned long) retval);
4535 return retval;
4536 }
4537
4538 static int
4539 debug_to_insert_watchpoint (struct target_ops *self,
4540 CORE_ADDR addr, int len, int type,
4541 struct expression *cond)
4542 {
4543 int retval;
4544
4545 retval = debug_target.to_insert_watchpoint (&debug_target,
4546 addr, len, type, cond);
4547
4548 fprintf_unfiltered (gdb_stdlog,
4549 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4550 core_addr_to_string (addr), len, type,
4551 host_address_to_string (cond), (unsigned long) retval);
4552 return retval;
4553 }
4554
4555 static int
4556 debug_to_remove_watchpoint (struct target_ops *self,
4557 CORE_ADDR addr, int len, int type,
4558 struct expression *cond)
4559 {
4560 int retval;
4561
4562 retval = debug_target.to_remove_watchpoint (&debug_target,
4563 addr, len, type, cond);
4564
4565 fprintf_unfiltered (gdb_stdlog,
4566 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4567 core_addr_to_string (addr), len, type,
4568 host_address_to_string (cond), (unsigned long) retval);
4569 return retval;
4570 }
4571
4572 static void
4573 debug_to_terminal_init (struct target_ops *self)
4574 {
4575 debug_target.to_terminal_init (&debug_target);
4576
4577 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4578 }
4579
4580 static void
4581 debug_to_terminal_inferior (struct target_ops *self)
4582 {
4583 debug_target.to_terminal_inferior (&debug_target);
4584
4585 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4586 }
4587
4588 static void
4589 debug_to_terminal_ours_for_output (struct target_ops *self)
4590 {
4591 debug_target.to_terminal_ours_for_output (&debug_target);
4592
4593 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4594 }
4595
4596 static void
4597 debug_to_terminal_ours (struct target_ops *self)
4598 {
4599 debug_target.to_terminal_ours (&debug_target);
4600
4601 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4602 }
4603
4604 static void
4605 debug_to_terminal_save_ours (struct target_ops *self)
4606 {
4607 debug_target.to_terminal_save_ours (&debug_target);
4608
4609 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4610 }
4611
4612 static void
4613 debug_to_terminal_info (struct target_ops *self,
4614 const char *arg, int from_tty)
4615 {
4616 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4617
4618 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4619 from_tty);
4620 }
4621
4622 static void
4623 debug_to_load (struct target_ops *self, char *args, int from_tty)
4624 {
4625 debug_target.to_load (&debug_target, args, from_tty);
4626
4627 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4628 }
4629
4630 static void
4631 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4632 {
4633 debug_target.to_post_startup_inferior (&debug_target, ptid);
4634
4635 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4636 ptid_get_pid (ptid));
4637 }
4638
4639 static int
4640 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4641 {
4642 int retval;
4643
4644 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4645
4646 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4647 pid, retval);
4648
4649 return retval;
4650 }
4651
4652 static int
4653 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4654 {
4655 int retval;
4656
4657 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4658
4659 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4660 pid, retval);
4661
4662 return retval;
4663 }
4664
4665 static int
4666 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4667 {
4668 int retval;
4669
4670 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4671
4672 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4673 pid, retval);
4674
4675 return retval;
4676 }
4677
4678 static int
4679 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4680 {
4681 int retval;
4682
4683 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4684
4685 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4686 pid, retval);
4687
4688 return retval;
4689 }
4690
4691 static int
4692 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4693 {
4694 int retval;
4695
4696 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4697
4698 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4699 pid, retval);
4700
4701 return retval;
4702 }
4703
4704 static int
4705 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4706 {
4707 int retval;
4708
4709 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4710
4711 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4712 pid, retval);
4713
4714 return retval;
4715 }
4716
4717 static int
4718 debug_to_has_exited (struct target_ops *self,
4719 int pid, int wait_status, int *exit_status)
4720 {
4721 int has_exited;
4722
4723 has_exited = debug_target.to_has_exited (&debug_target,
4724 pid, wait_status, exit_status);
4725
4726 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4727 pid, wait_status, *exit_status, has_exited);
4728
4729 return has_exited;
4730 }
4731
4732 static int
4733 debug_to_can_run (struct target_ops *self)
4734 {
4735 int retval;
4736
4737 retval = debug_target.to_can_run (&debug_target);
4738
4739 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4740
4741 return retval;
4742 }
4743
4744 static struct gdbarch *
4745 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4746 {
4747 struct gdbarch *retval;
4748
4749 retval = debug_target.to_thread_architecture (ops, ptid);
4750
4751 fprintf_unfiltered (gdb_stdlog,
4752 "target_thread_architecture (%s) = %s [%s]\n",
4753 target_pid_to_str (ptid),
4754 host_address_to_string (retval),
4755 gdbarch_bfd_arch_info (retval)->printable_name);
4756 return retval;
4757 }
4758
4759 static void
4760 debug_to_stop (struct target_ops *self, ptid_t ptid)
4761 {
4762 debug_target.to_stop (&debug_target, ptid);
4763
4764 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4765 target_pid_to_str (ptid));
4766 }
4767
4768 static void
4769 debug_to_rcmd (struct target_ops *self, char *command,
4770 struct ui_file *outbuf)
4771 {
4772 debug_target.to_rcmd (&debug_target, command, outbuf);
4773 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4774 }
4775
4776 static char *
4777 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4778 {
4779 char *exec_file;
4780
4781 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4782
4783 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4784 pid, exec_file);
4785
4786 return exec_file;
4787 }
4788
4789 static void
4790 setup_target_debug (void)
4791 {
4792 memcpy (&debug_target, &current_target, sizeof debug_target);
4793
4794 current_target.to_open = debug_to_open;
4795 current_target.to_post_attach = debug_to_post_attach;
4796 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4797 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4798 current_target.to_files_info = debug_to_files_info;
4799 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4800 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4801 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4802 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4803 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4804 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4805 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4806 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4807 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4808 current_target.to_watchpoint_addr_within_range
4809 = debug_to_watchpoint_addr_within_range;
4810 current_target.to_region_ok_for_hw_watchpoint
4811 = debug_to_region_ok_for_hw_watchpoint;
4812 current_target.to_can_accel_watchpoint_condition
4813 = debug_to_can_accel_watchpoint_condition;
4814 current_target.to_terminal_init = debug_to_terminal_init;
4815 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4816 current_target.to_terminal_ours_for_output
4817 = debug_to_terminal_ours_for_output;
4818 current_target.to_terminal_ours = debug_to_terminal_ours;
4819 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4820 current_target.to_terminal_info = debug_to_terminal_info;
4821 current_target.to_load = debug_to_load;
4822 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4823 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4824 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4825 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4826 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4827 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4828 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4829 current_target.to_has_exited = debug_to_has_exited;
4830 current_target.to_can_run = debug_to_can_run;
4831 current_target.to_stop = debug_to_stop;
4832 current_target.to_rcmd = debug_to_rcmd;
4833 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4834 current_target.to_thread_architecture = debug_to_thread_architecture;
4835 }
4836 \f
4837
4838 static char targ_desc[] =
4839 "Names of targets and files being debugged.\nShows the entire \
4840 stack of targets currently in use (including the exec-file,\n\
4841 core-file, and process, if any), as well as the symbol file name.";
4842
4843 static void
4844 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
4845 {
4846 error (_("\"monitor\" command not supported by this target."));
4847 }
4848
4849 static void
4850 do_monitor_command (char *cmd,
4851 int from_tty)
4852 {
4853 target_rcmd (cmd, gdb_stdtarg);
4854 }
4855
4856 /* Print the name of each layers of our target stack. */
4857
4858 static void
4859 maintenance_print_target_stack (char *cmd, int from_tty)
4860 {
4861 struct target_ops *t;
4862
4863 printf_filtered (_("The current target stack is:\n"));
4864
4865 for (t = target_stack; t != NULL; t = t->beneath)
4866 {
4867 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4868 }
4869 }
4870
4871 /* Controls if async mode is permitted. */
4872 int target_async_permitted = 0;
4873
4874 /* The set command writes to this variable. If the inferior is
4875 executing, target_async_permitted is *not* updated. */
4876 static int target_async_permitted_1 = 0;
4877
4878 static void
4879 set_target_async_command (char *args, int from_tty,
4880 struct cmd_list_element *c)
4881 {
4882 if (have_live_inferiors ())
4883 {
4884 target_async_permitted_1 = target_async_permitted;
4885 error (_("Cannot change this setting while the inferior is running."));
4886 }
4887
4888 target_async_permitted = target_async_permitted_1;
4889 }
4890
4891 static void
4892 show_target_async_command (struct ui_file *file, int from_tty,
4893 struct cmd_list_element *c,
4894 const char *value)
4895 {
4896 fprintf_filtered (file,
4897 _("Controlling the inferior in "
4898 "asynchronous mode is %s.\n"), value);
4899 }
4900
4901 /* Temporary copies of permission settings. */
4902
4903 static int may_write_registers_1 = 1;
4904 static int may_write_memory_1 = 1;
4905 static int may_insert_breakpoints_1 = 1;
4906 static int may_insert_tracepoints_1 = 1;
4907 static int may_insert_fast_tracepoints_1 = 1;
4908 static int may_stop_1 = 1;
4909
4910 /* Make the user-set values match the real values again. */
4911
4912 void
4913 update_target_permissions (void)
4914 {
4915 may_write_registers_1 = may_write_registers;
4916 may_write_memory_1 = may_write_memory;
4917 may_insert_breakpoints_1 = may_insert_breakpoints;
4918 may_insert_tracepoints_1 = may_insert_tracepoints;
4919 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4920 may_stop_1 = may_stop;
4921 }
4922
4923 /* The one function handles (most of) the permission flags in the same
4924 way. */
4925
4926 static void
4927 set_target_permissions (char *args, int from_tty,
4928 struct cmd_list_element *c)
4929 {
4930 if (target_has_execution)
4931 {
4932 update_target_permissions ();
4933 error (_("Cannot change this setting while the inferior is running."));
4934 }
4935
4936 /* Make the real values match the user-changed values. */
4937 may_write_registers = may_write_registers_1;
4938 may_insert_breakpoints = may_insert_breakpoints_1;
4939 may_insert_tracepoints = may_insert_tracepoints_1;
4940 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4941 may_stop = may_stop_1;
4942 update_observer_mode ();
4943 }
4944
4945 /* Set memory write permission independently of observer mode. */
4946
4947 static void
4948 set_write_memory_permission (char *args, int from_tty,
4949 struct cmd_list_element *c)
4950 {
4951 /* Make the real values match the user-changed values. */
4952 may_write_memory = may_write_memory_1;
4953 update_observer_mode ();
4954 }
4955
4956
4957 void
4958 initialize_targets (void)
4959 {
4960 init_dummy_target ();
4961 push_target (&dummy_target);
4962
4963 add_info ("target", target_info, targ_desc);
4964 add_info ("files", target_info, targ_desc);
4965
4966 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4967 Set target debugging."), _("\
4968 Show target debugging."), _("\
4969 When non-zero, target debugging is enabled. Higher numbers are more\n\
4970 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4971 command."),
4972 NULL,
4973 show_targetdebug,
4974 &setdebuglist, &showdebuglist);
4975
4976 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4977 &trust_readonly, _("\
4978 Set mode for reading from readonly sections."), _("\
4979 Show mode for reading from readonly sections."), _("\
4980 When this mode is on, memory reads from readonly sections (such as .text)\n\
4981 will be read from the object file instead of from the target. This will\n\
4982 result in significant performance improvement for remote targets."),
4983 NULL,
4984 show_trust_readonly,
4985 &setlist, &showlist);
4986
4987 add_com ("monitor", class_obscure, do_monitor_command,
4988 _("Send a command to the remote monitor (remote targets only)."));
4989
4990 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4991 _("Print the name of each layer of the internal target stack."),
4992 &maintenanceprintlist);
4993
4994 add_setshow_boolean_cmd ("target-async", no_class,
4995 &target_async_permitted_1, _("\
4996 Set whether gdb controls the inferior in asynchronous mode."), _("\
4997 Show whether gdb controls the inferior in asynchronous mode."), _("\
4998 Tells gdb whether to control the inferior in asynchronous mode."),
4999 set_target_async_command,
5000 show_target_async_command,
5001 &setlist,
5002 &showlist);
5003
5004 add_setshow_boolean_cmd ("may-write-registers", class_support,
5005 &may_write_registers_1, _("\
5006 Set permission to write into registers."), _("\
5007 Show permission to write into registers."), _("\
5008 When this permission is on, GDB may write into the target's registers.\n\
5009 Otherwise, any sort of write attempt will result in an error."),
5010 set_target_permissions, NULL,
5011 &setlist, &showlist);
5012
5013 add_setshow_boolean_cmd ("may-write-memory", class_support,
5014 &may_write_memory_1, _("\
5015 Set permission to write into target memory."), _("\
5016 Show permission to write into target memory."), _("\
5017 When this permission is on, GDB may write into the target's memory.\n\
5018 Otherwise, any sort of write attempt will result in an error."),
5019 set_write_memory_permission, NULL,
5020 &setlist, &showlist);
5021
5022 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5023 &may_insert_breakpoints_1, _("\
5024 Set permission to insert breakpoints in the target."), _("\
5025 Show permission to insert breakpoints in the target."), _("\
5026 When this permission is on, GDB may insert breakpoints in the program.\n\
5027 Otherwise, any sort of insertion attempt will result in an error."),
5028 set_target_permissions, NULL,
5029 &setlist, &showlist);
5030
5031 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5032 &may_insert_tracepoints_1, _("\
5033 Set permission to insert tracepoints in the target."), _("\
5034 Show permission to insert tracepoints in the target."), _("\
5035 When this permission is on, GDB may insert tracepoints in the program.\n\
5036 Otherwise, any sort of insertion attempt will result in an error."),
5037 set_target_permissions, NULL,
5038 &setlist, &showlist);
5039
5040 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5041 &may_insert_fast_tracepoints_1, _("\
5042 Set permission to insert fast tracepoints in the target."), _("\
5043 Show permission to insert fast tracepoints in the target."), _("\
5044 When this permission is on, GDB may insert fast tracepoints.\n\
5045 Otherwise, any sort of insertion attempt will result in an error."),
5046 set_target_permissions, NULL,
5047 &setlist, &showlist);
5048
5049 add_setshow_boolean_cmd ("may-interrupt", class_support,
5050 &may_stop_1, _("\
5051 Set permission to interrupt or signal the target."), _("\
5052 Show permission to interrupt or signal the target."), _("\
5053 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5054 Otherwise, any attempt to interrupt or stop will be ignored."),
5055 set_target_permissions, NULL,
5056 &setlist, &showlist);
5057 }