* target.h (struct target_section): Delete member bfd.
[binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2013 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include "gdb_string.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "symtab.h"
28 #include "inferior.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "dcache.h"
33 #include <signal.h>
34 #include "regcache.h"
35 #include "gdb_assert.h"
36 #include "gdbcore.h"
37 #include "exceptions.h"
38 #include "target-descriptions.h"
39 #include "gdbthread.h"
40 #include "solib.h"
41 #include "exec.h"
42 #include "inline-frame.h"
43 #include "tracepoint.h"
44 #include "gdb/fileio.h"
45 #include "agent.h"
46
47 static void target_info (char *, int);
48
49 static void default_terminal_info (const char *, int);
50
51 static int default_watchpoint_addr_within_range (struct target_ops *,
52 CORE_ADDR, CORE_ADDR, int);
53
54 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
55
56 static void tcomplain (void) ATTRIBUTE_NORETURN;
57
58 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
59
60 static int return_zero (void);
61
62 static int return_one (void);
63
64 static int return_minus_one (void);
65
66 void target_ignore (void);
67
68 static void target_command (char *, int);
69
70 static struct target_ops *find_default_run_target (char *);
71
72 static LONGEST default_xfer_partial (struct target_ops *ops,
73 enum target_object object,
74 const char *annex, gdb_byte *readbuf,
75 const gdb_byte *writebuf,
76 ULONGEST offset, LONGEST len);
77
78 static LONGEST current_xfer_partial (struct target_ops *ops,
79 enum target_object object,
80 const char *annex, gdb_byte *readbuf,
81 const gdb_byte *writebuf,
82 ULONGEST offset, LONGEST len);
83
84 static LONGEST target_xfer_partial (struct target_ops *ops,
85 enum target_object object,
86 const char *annex,
87 void *readbuf, const void *writebuf,
88 ULONGEST offset, LONGEST len);
89
90 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
91 ptid_t ptid);
92
93 static void init_dummy_target (void);
94
95 static struct target_ops debug_target;
96
97 static void debug_to_open (char *, int);
98
99 static void debug_to_prepare_to_store (struct regcache *);
100
101 static void debug_to_files_info (struct target_ops *);
102
103 static int debug_to_insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *);
105
106 static int debug_to_remove_breakpoint (struct gdbarch *,
107 struct bp_target_info *);
108
109 static int debug_to_can_use_hw_breakpoint (int, int, int);
110
111 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
112 struct bp_target_info *);
113
114 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
115 struct bp_target_info *);
116
117 static int debug_to_insert_watchpoint (CORE_ADDR, int, int,
118 struct expression *);
119
120 static int debug_to_remove_watchpoint (CORE_ADDR, int, int,
121 struct expression *);
122
123 static int debug_to_stopped_by_watchpoint (void);
124
125 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
126
127 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
128 CORE_ADDR, CORE_ADDR, int);
129
130 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
131
132 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR, int, int,
133 struct expression *);
134
135 static void debug_to_terminal_init (void);
136
137 static void debug_to_terminal_inferior (void);
138
139 static void debug_to_terminal_ours_for_output (void);
140
141 static void debug_to_terminal_save_ours (void);
142
143 static void debug_to_terminal_ours (void);
144
145 static void debug_to_load (char *, int);
146
147 static int debug_to_can_run (void);
148
149 static void debug_to_stop (ptid_t);
150
151 /* Pointer to array of target architecture structures; the size of the
152 array; the current index into the array; the allocated size of the
153 array. */
154 struct target_ops **target_structs;
155 unsigned target_struct_size;
156 unsigned target_struct_allocsize;
157 #define DEFAULT_ALLOCSIZE 10
158
159 /* The initial current target, so that there is always a semi-valid
160 current target. */
161
162 static struct target_ops dummy_target;
163
164 /* Top of target stack. */
165
166 static struct target_ops *target_stack;
167
168 /* The target structure we are currently using to talk to a process
169 or file or whatever "inferior" we have. */
170
171 struct target_ops current_target;
172
173 /* Command list for target. */
174
175 static struct cmd_list_element *targetlist = NULL;
176
177 /* Nonzero if we should trust readonly sections from the
178 executable when reading memory. */
179
180 static int trust_readonly = 0;
181
182 /* Nonzero if we should show true memory content including
183 memory breakpoint inserted by gdb. */
184
185 static int show_memory_breakpoints = 0;
186
187 /* These globals control whether GDB attempts to perform these
188 operations; they are useful for targets that need to prevent
189 inadvertant disruption, such as in non-stop mode. */
190
191 int may_write_registers = 1;
192
193 int may_write_memory = 1;
194
195 int may_insert_breakpoints = 1;
196
197 int may_insert_tracepoints = 1;
198
199 int may_insert_fast_tracepoints = 1;
200
201 int may_stop = 1;
202
203 /* Non-zero if we want to see trace of target level stuff. */
204
205 static unsigned int targetdebug = 0;
206 static void
207 show_targetdebug (struct ui_file *file, int from_tty,
208 struct cmd_list_element *c, const char *value)
209 {
210 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
211 }
212
213 static void setup_target_debug (void);
214
215 /* The option sets this. */
216 static int stack_cache_enabled_p_1 = 1;
217 /* And set_stack_cache_enabled_p updates this.
218 The reason for the separation is so that we don't flush the cache for
219 on->on transitions. */
220 static int stack_cache_enabled_p = 1;
221
222 /* This is called *after* the stack-cache has been set.
223 Flush the cache for off->on and on->off transitions.
224 There's no real need to flush the cache for on->off transitions,
225 except cleanliness. */
226
227 static void
228 set_stack_cache_enabled_p (char *args, int from_tty,
229 struct cmd_list_element *c)
230 {
231 if (stack_cache_enabled_p != stack_cache_enabled_p_1)
232 target_dcache_invalidate ();
233
234 stack_cache_enabled_p = stack_cache_enabled_p_1;
235 }
236
237 static void
238 show_stack_cache_enabled_p (struct ui_file *file, int from_tty,
239 struct cmd_list_element *c, const char *value)
240 {
241 fprintf_filtered (file, _("Cache use for stack accesses is %s.\n"), value);
242 }
243
244 /* Cache of memory operations, to speed up remote access. */
245 static DCACHE *target_dcache;
246
247 /* Invalidate the target dcache. */
248
249 void
250 target_dcache_invalidate (void)
251 {
252 dcache_invalidate (target_dcache);
253 }
254
255 /* The user just typed 'target' without the name of a target. */
256
257 static void
258 target_command (char *arg, int from_tty)
259 {
260 fputs_filtered ("Argument required (target name). Try `help target'\n",
261 gdb_stdout);
262 }
263
264 /* Default target_has_* methods for process_stratum targets. */
265
266 int
267 default_child_has_all_memory (struct target_ops *ops)
268 {
269 /* If no inferior selected, then we can't read memory here. */
270 if (ptid_equal (inferior_ptid, null_ptid))
271 return 0;
272
273 return 1;
274 }
275
276 int
277 default_child_has_memory (struct target_ops *ops)
278 {
279 /* If no inferior selected, then we can't read memory here. */
280 if (ptid_equal (inferior_ptid, null_ptid))
281 return 0;
282
283 return 1;
284 }
285
286 int
287 default_child_has_stack (struct target_ops *ops)
288 {
289 /* If no inferior selected, there's no stack. */
290 if (ptid_equal (inferior_ptid, null_ptid))
291 return 0;
292
293 return 1;
294 }
295
296 int
297 default_child_has_registers (struct target_ops *ops)
298 {
299 /* Can't read registers from no inferior. */
300 if (ptid_equal (inferior_ptid, null_ptid))
301 return 0;
302
303 return 1;
304 }
305
306 int
307 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
308 {
309 /* If there's no thread selected, then we can't make it run through
310 hoops. */
311 if (ptid_equal (the_ptid, null_ptid))
312 return 0;
313
314 return 1;
315 }
316
317
318 int
319 target_has_all_memory_1 (void)
320 {
321 struct target_ops *t;
322
323 for (t = current_target.beneath; t != NULL; t = t->beneath)
324 if (t->to_has_all_memory (t))
325 return 1;
326
327 return 0;
328 }
329
330 int
331 target_has_memory_1 (void)
332 {
333 struct target_ops *t;
334
335 for (t = current_target.beneath; t != NULL; t = t->beneath)
336 if (t->to_has_memory (t))
337 return 1;
338
339 return 0;
340 }
341
342 int
343 target_has_stack_1 (void)
344 {
345 struct target_ops *t;
346
347 for (t = current_target.beneath; t != NULL; t = t->beneath)
348 if (t->to_has_stack (t))
349 return 1;
350
351 return 0;
352 }
353
354 int
355 target_has_registers_1 (void)
356 {
357 struct target_ops *t;
358
359 for (t = current_target.beneath; t != NULL; t = t->beneath)
360 if (t->to_has_registers (t))
361 return 1;
362
363 return 0;
364 }
365
366 int
367 target_has_execution_1 (ptid_t the_ptid)
368 {
369 struct target_ops *t;
370
371 for (t = current_target.beneath; t != NULL; t = t->beneath)
372 if (t->to_has_execution (t, the_ptid))
373 return 1;
374
375 return 0;
376 }
377
378 int
379 target_has_execution_current (void)
380 {
381 return target_has_execution_1 (inferior_ptid);
382 }
383
384 /* Add possible target architecture T to the list and add a new
385 command 'target T->to_shortname'. Set COMPLETER as the command's
386 completer if not NULL. */
387
388 void
389 add_target_with_completer (struct target_ops *t,
390 completer_ftype *completer)
391 {
392 struct cmd_list_element *c;
393
394 /* Provide default values for all "must have" methods. */
395 if (t->to_xfer_partial == NULL)
396 t->to_xfer_partial = default_xfer_partial;
397
398 if (t->to_has_all_memory == NULL)
399 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
400
401 if (t->to_has_memory == NULL)
402 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
403
404 if (t->to_has_stack == NULL)
405 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
406
407 if (t->to_has_registers == NULL)
408 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
409
410 if (t->to_has_execution == NULL)
411 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
412
413 if (!target_structs)
414 {
415 target_struct_allocsize = DEFAULT_ALLOCSIZE;
416 target_structs = (struct target_ops **) xmalloc
417 (target_struct_allocsize * sizeof (*target_structs));
418 }
419 if (target_struct_size >= target_struct_allocsize)
420 {
421 target_struct_allocsize *= 2;
422 target_structs = (struct target_ops **)
423 xrealloc ((char *) target_structs,
424 target_struct_allocsize * sizeof (*target_structs));
425 }
426 target_structs[target_struct_size++] = t;
427
428 if (targetlist == NULL)
429 add_prefix_cmd ("target", class_run, target_command, _("\
430 Connect to a target machine or process.\n\
431 The first argument is the type or protocol of the target machine.\n\
432 Remaining arguments are interpreted by the target protocol. For more\n\
433 information on the arguments for a particular protocol, type\n\
434 `help target ' followed by the protocol name."),
435 &targetlist, "target ", 0, &cmdlist);
436 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
437 &targetlist);
438 if (completer != NULL)
439 set_cmd_completer (c, completer);
440 }
441
442 /* Add a possible target architecture to the list. */
443
444 void
445 add_target (struct target_ops *t)
446 {
447 add_target_with_completer (t, NULL);
448 }
449
450 /* See target.h. */
451
452 void
453 add_deprecated_target_alias (struct target_ops *t, char *alias)
454 {
455 struct cmd_list_element *c;
456 char *alt;
457
458 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
459 see PR cli/15104. */
460 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
461 alt = xstrprintf ("target %s", t->to_shortname);
462 deprecate_cmd (c, alt);
463 }
464
465 /* Stub functions */
466
467 void
468 target_ignore (void)
469 {
470 }
471
472 void
473 target_kill (void)
474 {
475 struct target_ops *t;
476
477 for (t = current_target.beneath; t != NULL; t = t->beneath)
478 if (t->to_kill != NULL)
479 {
480 if (targetdebug)
481 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
482
483 t->to_kill (t);
484 return;
485 }
486
487 noprocess ();
488 }
489
490 void
491 target_load (char *arg, int from_tty)
492 {
493 target_dcache_invalidate ();
494 (*current_target.to_load) (arg, from_tty);
495 }
496
497 void
498 target_create_inferior (char *exec_file, char *args,
499 char **env, int from_tty)
500 {
501 struct target_ops *t;
502
503 for (t = current_target.beneath; t != NULL; t = t->beneath)
504 {
505 if (t->to_create_inferior != NULL)
506 {
507 t->to_create_inferior (t, exec_file, args, env, from_tty);
508 if (targetdebug)
509 fprintf_unfiltered (gdb_stdlog,
510 "target_create_inferior (%s, %s, xxx, %d)\n",
511 exec_file, args, from_tty);
512 return;
513 }
514 }
515
516 internal_error (__FILE__, __LINE__,
517 _("could not find a target to create inferior"));
518 }
519
520 void
521 target_terminal_inferior (void)
522 {
523 /* A background resume (``run&'') should leave GDB in control of the
524 terminal. Use target_can_async_p, not target_is_async_p, since at
525 this point the target is not async yet. However, if sync_execution
526 is not set, we know it will become async prior to resume. */
527 if (target_can_async_p () && !sync_execution)
528 return;
529
530 /* If GDB is resuming the inferior in the foreground, install
531 inferior's terminal modes. */
532 (*current_target.to_terminal_inferior) ();
533 }
534
535 static int
536 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
537 struct target_ops *t)
538 {
539 errno = EIO; /* Can't read/write this location. */
540 return 0; /* No bytes handled. */
541 }
542
543 static void
544 tcomplain (void)
545 {
546 error (_("You can't do that when your target is `%s'"),
547 current_target.to_shortname);
548 }
549
550 void
551 noprocess (void)
552 {
553 error (_("You can't do that without a process to debug."));
554 }
555
556 static void
557 default_terminal_info (const char *args, int from_tty)
558 {
559 printf_unfiltered (_("No saved terminal information.\n"));
560 }
561
562 /* A default implementation for the to_get_ada_task_ptid target method.
563
564 This function builds the PTID by using both LWP and TID as part of
565 the PTID lwp and tid elements. The pid used is the pid of the
566 inferior_ptid. */
567
568 static ptid_t
569 default_get_ada_task_ptid (long lwp, long tid)
570 {
571 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
572 }
573
574 static enum exec_direction_kind
575 default_execution_direction (void)
576 {
577 if (!target_can_execute_reverse)
578 return EXEC_FORWARD;
579 else if (!target_can_async_p ())
580 return EXEC_FORWARD;
581 else
582 gdb_assert_not_reached ("\
583 to_execution_direction must be implemented for reverse async");
584 }
585
586 /* Go through the target stack from top to bottom, copying over zero
587 entries in current_target, then filling in still empty entries. In
588 effect, we are doing class inheritance through the pushed target
589 vectors.
590
591 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
592 is currently implemented, is that it discards any knowledge of
593 which target an inherited method originally belonged to.
594 Consequently, new new target methods should instead explicitly and
595 locally search the target stack for the target that can handle the
596 request. */
597
598 static void
599 update_current_target (void)
600 {
601 struct target_ops *t;
602
603 /* First, reset current's contents. */
604 memset (&current_target, 0, sizeof (current_target));
605
606 #define INHERIT(FIELD, TARGET) \
607 if (!current_target.FIELD) \
608 current_target.FIELD = (TARGET)->FIELD
609
610 for (t = target_stack; t; t = t->beneath)
611 {
612 INHERIT (to_shortname, t);
613 INHERIT (to_longname, t);
614 INHERIT (to_doc, t);
615 /* Do not inherit to_open. */
616 /* Do not inherit to_close. */
617 /* Do not inherit to_attach. */
618 INHERIT (to_post_attach, t);
619 INHERIT (to_attach_no_wait, t);
620 /* Do not inherit to_detach. */
621 /* Do not inherit to_disconnect. */
622 /* Do not inherit to_resume. */
623 /* Do not inherit to_wait. */
624 /* Do not inherit to_fetch_registers. */
625 /* Do not inherit to_store_registers. */
626 INHERIT (to_prepare_to_store, t);
627 INHERIT (deprecated_xfer_memory, t);
628 INHERIT (to_files_info, t);
629 INHERIT (to_insert_breakpoint, t);
630 INHERIT (to_remove_breakpoint, t);
631 INHERIT (to_can_use_hw_breakpoint, t);
632 INHERIT (to_insert_hw_breakpoint, t);
633 INHERIT (to_remove_hw_breakpoint, t);
634 /* Do not inherit to_ranged_break_num_registers. */
635 INHERIT (to_insert_watchpoint, t);
636 INHERIT (to_remove_watchpoint, t);
637 /* Do not inherit to_insert_mask_watchpoint. */
638 /* Do not inherit to_remove_mask_watchpoint. */
639 INHERIT (to_stopped_data_address, t);
640 INHERIT (to_have_steppable_watchpoint, t);
641 INHERIT (to_have_continuable_watchpoint, t);
642 INHERIT (to_stopped_by_watchpoint, t);
643 INHERIT (to_watchpoint_addr_within_range, t);
644 INHERIT (to_region_ok_for_hw_watchpoint, t);
645 INHERIT (to_can_accel_watchpoint_condition, t);
646 /* Do not inherit to_masked_watch_num_registers. */
647 INHERIT (to_terminal_init, t);
648 INHERIT (to_terminal_inferior, t);
649 INHERIT (to_terminal_ours_for_output, t);
650 INHERIT (to_terminal_ours, t);
651 INHERIT (to_terminal_save_ours, t);
652 INHERIT (to_terminal_info, t);
653 /* Do not inherit to_kill. */
654 INHERIT (to_load, t);
655 /* Do no inherit to_create_inferior. */
656 INHERIT (to_post_startup_inferior, t);
657 INHERIT (to_insert_fork_catchpoint, t);
658 INHERIT (to_remove_fork_catchpoint, t);
659 INHERIT (to_insert_vfork_catchpoint, t);
660 INHERIT (to_remove_vfork_catchpoint, t);
661 /* Do not inherit to_follow_fork. */
662 INHERIT (to_insert_exec_catchpoint, t);
663 INHERIT (to_remove_exec_catchpoint, t);
664 INHERIT (to_set_syscall_catchpoint, t);
665 INHERIT (to_has_exited, t);
666 /* Do not inherit to_mourn_inferior. */
667 INHERIT (to_can_run, t);
668 /* Do not inherit to_pass_signals. */
669 /* Do not inherit to_program_signals. */
670 /* Do not inherit to_thread_alive. */
671 /* Do not inherit to_find_new_threads. */
672 /* Do not inherit to_pid_to_str. */
673 INHERIT (to_extra_thread_info, t);
674 INHERIT (to_thread_name, t);
675 INHERIT (to_stop, t);
676 /* Do not inherit to_xfer_partial. */
677 INHERIT (to_rcmd, t);
678 INHERIT (to_pid_to_exec_file, t);
679 INHERIT (to_log_command, t);
680 INHERIT (to_stratum, t);
681 /* Do not inherit to_has_all_memory. */
682 /* Do not inherit to_has_memory. */
683 /* Do not inherit to_has_stack. */
684 /* Do not inherit to_has_registers. */
685 /* Do not inherit to_has_execution. */
686 INHERIT (to_has_thread_control, t);
687 INHERIT (to_can_async_p, t);
688 INHERIT (to_is_async_p, t);
689 INHERIT (to_async, t);
690 INHERIT (to_find_memory_regions, t);
691 INHERIT (to_make_corefile_notes, t);
692 INHERIT (to_get_bookmark, t);
693 INHERIT (to_goto_bookmark, t);
694 /* Do not inherit to_get_thread_local_address. */
695 INHERIT (to_can_execute_reverse, t);
696 INHERIT (to_execution_direction, t);
697 INHERIT (to_thread_architecture, t);
698 /* Do not inherit to_read_description. */
699 INHERIT (to_get_ada_task_ptid, t);
700 /* Do not inherit to_search_memory. */
701 INHERIT (to_supports_multi_process, t);
702 INHERIT (to_supports_enable_disable_tracepoint, t);
703 INHERIT (to_supports_string_tracing, t);
704 INHERIT (to_trace_init, t);
705 INHERIT (to_download_tracepoint, t);
706 INHERIT (to_can_download_tracepoint, t);
707 INHERIT (to_download_trace_state_variable, t);
708 INHERIT (to_enable_tracepoint, t);
709 INHERIT (to_disable_tracepoint, t);
710 INHERIT (to_trace_set_readonly_regions, t);
711 INHERIT (to_trace_start, t);
712 INHERIT (to_get_trace_status, t);
713 INHERIT (to_get_tracepoint_status, t);
714 INHERIT (to_trace_stop, t);
715 INHERIT (to_trace_find, t);
716 INHERIT (to_get_trace_state_variable_value, t);
717 INHERIT (to_save_trace_data, t);
718 INHERIT (to_upload_tracepoints, t);
719 INHERIT (to_upload_trace_state_variables, t);
720 INHERIT (to_get_raw_trace_data, t);
721 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
722 INHERIT (to_set_disconnected_tracing, t);
723 INHERIT (to_set_circular_trace_buffer, t);
724 INHERIT (to_set_trace_buffer_size, t);
725 INHERIT (to_set_trace_notes, t);
726 INHERIT (to_get_tib_address, t);
727 INHERIT (to_set_permissions, t);
728 INHERIT (to_static_tracepoint_marker_at, t);
729 INHERIT (to_static_tracepoint_markers_by_strid, t);
730 INHERIT (to_traceframe_info, t);
731 INHERIT (to_use_agent, t);
732 INHERIT (to_can_use_agent, t);
733 INHERIT (to_augmented_libraries_svr4_read, t);
734 INHERIT (to_magic, t);
735 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
736 INHERIT (to_can_run_breakpoint_commands, t);
737 /* Do not inherit to_memory_map. */
738 /* Do not inherit to_flash_erase. */
739 /* Do not inherit to_flash_done. */
740 }
741 #undef INHERIT
742
743 /* Clean up a target struct so it no longer has any zero pointers in
744 it. Some entries are defaulted to a method that print an error,
745 others are hard-wired to a standard recursive default. */
746
747 #define de_fault(field, value) \
748 if (!current_target.field) \
749 current_target.field = value
750
751 de_fault (to_open,
752 (void (*) (char *, int))
753 tcomplain);
754 de_fault (to_close,
755 (void (*) (void))
756 target_ignore);
757 de_fault (to_post_attach,
758 (void (*) (int))
759 target_ignore);
760 de_fault (to_prepare_to_store,
761 (void (*) (struct regcache *))
762 noprocess);
763 de_fault (deprecated_xfer_memory,
764 (int (*) (CORE_ADDR, gdb_byte *, int, int,
765 struct mem_attrib *, struct target_ops *))
766 nomemory);
767 de_fault (to_files_info,
768 (void (*) (struct target_ops *))
769 target_ignore);
770 de_fault (to_insert_breakpoint,
771 memory_insert_breakpoint);
772 de_fault (to_remove_breakpoint,
773 memory_remove_breakpoint);
774 de_fault (to_can_use_hw_breakpoint,
775 (int (*) (int, int, int))
776 return_zero);
777 de_fault (to_insert_hw_breakpoint,
778 (int (*) (struct gdbarch *, struct bp_target_info *))
779 return_minus_one);
780 de_fault (to_remove_hw_breakpoint,
781 (int (*) (struct gdbarch *, struct bp_target_info *))
782 return_minus_one);
783 de_fault (to_insert_watchpoint,
784 (int (*) (CORE_ADDR, int, int, struct expression *))
785 return_minus_one);
786 de_fault (to_remove_watchpoint,
787 (int (*) (CORE_ADDR, int, int, struct expression *))
788 return_minus_one);
789 de_fault (to_stopped_by_watchpoint,
790 (int (*) (void))
791 return_zero);
792 de_fault (to_stopped_data_address,
793 (int (*) (struct target_ops *, CORE_ADDR *))
794 return_zero);
795 de_fault (to_watchpoint_addr_within_range,
796 default_watchpoint_addr_within_range);
797 de_fault (to_region_ok_for_hw_watchpoint,
798 default_region_ok_for_hw_watchpoint);
799 de_fault (to_can_accel_watchpoint_condition,
800 (int (*) (CORE_ADDR, int, int, struct expression *))
801 return_zero);
802 de_fault (to_terminal_init,
803 (void (*) (void))
804 target_ignore);
805 de_fault (to_terminal_inferior,
806 (void (*) (void))
807 target_ignore);
808 de_fault (to_terminal_ours_for_output,
809 (void (*) (void))
810 target_ignore);
811 de_fault (to_terminal_ours,
812 (void (*) (void))
813 target_ignore);
814 de_fault (to_terminal_save_ours,
815 (void (*) (void))
816 target_ignore);
817 de_fault (to_terminal_info,
818 default_terminal_info);
819 de_fault (to_load,
820 (void (*) (char *, int))
821 tcomplain);
822 de_fault (to_post_startup_inferior,
823 (void (*) (ptid_t))
824 target_ignore);
825 de_fault (to_insert_fork_catchpoint,
826 (int (*) (int))
827 return_one);
828 de_fault (to_remove_fork_catchpoint,
829 (int (*) (int))
830 return_one);
831 de_fault (to_insert_vfork_catchpoint,
832 (int (*) (int))
833 return_one);
834 de_fault (to_remove_vfork_catchpoint,
835 (int (*) (int))
836 return_one);
837 de_fault (to_insert_exec_catchpoint,
838 (int (*) (int))
839 return_one);
840 de_fault (to_remove_exec_catchpoint,
841 (int (*) (int))
842 return_one);
843 de_fault (to_set_syscall_catchpoint,
844 (int (*) (int, int, int, int, int *))
845 return_one);
846 de_fault (to_has_exited,
847 (int (*) (int, int, int *))
848 return_zero);
849 de_fault (to_can_run,
850 return_zero);
851 de_fault (to_extra_thread_info,
852 (char *(*) (struct thread_info *))
853 return_zero);
854 de_fault (to_thread_name,
855 (char *(*) (struct thread_info *))
856 return_zero);
857 de_fault (to_stop,
858 (void (*) (ptid_t))
859 target_ignore);
860 current_target.to_xfer_partial = current_xfer_partial;
861 de_fault (to_rcmd,
862 (void (*) (char *, struct ui_file *))
863 tcomplain);
864 de_fault (to_pid_to_exec_file,
865 (char *(*) (int))
866 return_zero);
867 de_fault (to_async,
868 (void (*) (void (*) (enum inferior_event_type, void*), void*))
869 tcomplain);
870 de_fault (to_thread_architecture,
871 default_thread_architecture);
872 current_target.to_read_description = NULL;
873 de_fault (to_get_ada_task_ptid,
874 (ptid_t (*) (long, long))
875 default_get_ada_task_ptid);
876 de_fault (to_supports_multi_process,
877 (int (*) (void))
878 return_zero);
879 de_fault (to_supports_enable_disable_tracepoint,
880 (int (*) (void))
881 return_zero);
882 de_fault (to_supports_string_tracing,
883 (int (*) (void))
884 return_zero);
885 de_fault (to_trace_init,
886 (void (*) (void))
887 tcomplain);
888 de_fault (to_download_tracepoint,
889 (void (*) (struct bp_location *))
890 tcomplain);
891 de_fault (to_can_download_tracepoint,
892 (int (*) (void))
893 return_zero);
894 de_fault (to_download_trace_state_variable,
895 (void (*) (struct trace_state_variable *))
896 tcomplain);
897 de_fault (to_enable_tracepoint,
898 (void (*) (struct bp_location *))
899 tcomplain);
900 de_fault (to_disable_tracepoint,
901 (void (*) (struct bp_location *))
902 tcomplain);
903 de_fault (to_trace_set_readonly_regions,
904 (void (*) (void))
905 tcomplain);
906 de_fault (to_trace_start,
907 (void (*) (void))
908 tcomplain);
909 de_fault (to_get_trace_status,
910 (int (*) (struct trace_status *))
911 return_minus_one);
912 de_fault (to_get_tracepoint_status,
913 (void (*) (struct breakpoint *, struct uploaded_tp *))
914 tcomplain);
915 de_fault (to_trace_stop,
916 (void (*) (void))
917 tcomplain);
918 de_fault (to_trace_find,
919 (int (*) (enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
920 return_minus_one);
921 de_fault (to_get_trace_state_variable_value,
922 (int (*) (int, LONGEST *))
923 return_zero);
924 de_fault (to_save_trace_data,
925 (int (*) (const char *))
926 tcomplain);
927 de_fault (to_upload_tracepoints,
928 (int (*) (struct uploaded_tp **))
929 return_zero);
930 de_fault (to_upload_trace_state_variables,
931 (int (*) (struct uploaded_tsv **))
932 return_zero);
933 de_fault (to_get_raw_trace_data,
934 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
935 tcomplain);
936 de_fault (to_get_min_fast_tracepoint_insn_len,
937 (int (*) (void))
938 return_minus_one);
939 de_fault (to_set_disconnected_tracing,
940 (void (*) (int))
941 target_ignore);
942 de_fault (to_set_circular_trace_buffer,
943 (void (*) (int))
944 target_ignore);
945 de_fault (to_set_trace_buffer_size,
946 (void (*) (LONGEST))
947 target_ignore);
948 de_fault (to_set_trace_notes,
949 (int (*) (const char *, const char *, const char *))
950 return_zero);
951 de_fault (to_get_tib_address,
952 (int (*) (ptid_t, CORE_ADDR *))
953 tcomplain);
954 de_fault (to_set_permissions,
955 (void (*) (void))
956 target_ignore);
957 de_fault (to_static_tracepoint_marker_at,
958 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
959 return_zero);
960 de_fault (to_static_tracepoint_markers_by_strid,
961 (VEC(static_tracepoint_marker_p) * (*) (const char *))
962 tcomplain);
963 de_fault (to_traceframe_info,
964 (struct traceframe_info * (*) (void))
965 tcomplain);
966 de_fault (to_supports_evaluation_of_breakpoint_conditions,
967 (int (*) (void))
968 return_zero);
969 de_fault (to_can_run_breakpoint_commands,
970 (int (*) (void))
971 return_zero);
972 de_fault (to_use_agent,
973 (int (*) (int))
974 tcomplain);
975 de_fault (to_can_use_agent,
976 (int (*) (void))
977 return_zero);
978 de_fault (to_augmented_libraries_svr4_read,
979 (int (*) (void))
980 return_zero);
981 de_fault (to_execution_direction, default_execution_direction);
982
983 #undef de_fault
984
985 /* Finally, position the target-stack beneath the squashed
986 "current_target". That way code looking for a non-inherited
987 target method can quickly and simply find it. */
988 current_target.beneath = target_stack;
989
990 if (targetdebug)
991 setup_target_debug ();
992 }
993
994 /* Push a new target type into the stack of the existing target accessors,
995 possibly superseding some of the existing accessors.
996
997 Rather than allow an empty stack, we always have the dummy target at
998 the bottom stratum, so we can call the function vectors without
999 checking them. */
1000
1001 void
1002 push_target (struct target_ops *t)
1003 {
1004 struct target_ops **cur;
1005
1006 /* Check magic number. If wrong, it probably means someone changed
1007 the struct definition, but not all the places that initialize one. */
1008 if (t->to_magic != OPS_MAGIC)
1009 {
1010 fprintf_unfiltered (gdb_stderr,
1011 "Magic number of %s target struct wrong\n",
1012 t->to_shortname);
1013 internal_error (__FILE__, __LINE__,
1014 _("failed internal consistency check"));
1015 }
1016
1017 /* Find the proper stratum to install this target in. */
1018 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1019 {
1020 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
1021 break;
1022 }
1023
1024 /* If there's already targets at this stratum, remove them. */
1025 /* FIXME: cagney/2003-10-15: I think this should be popping all
1026 targets to CUR, and not just those at this stratum level. */
1027 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
1028 {
1029 /* There's already something at this stratum level. Close it,
1030 and un-hook it from the stack. */
1031 struct target_ops *tmp = (*cur);
1032
1033 (*cur) = (*cur)->beneath;
1034 tmp->beneath = NULL;
1035 target_close (tmp);
1036 }
1037
1038 /* We have removed all targets in our stratum, now add the new one. */
1039 t->beneath = (*cur);
1040 (*cur) = t;
1041
1042 update_current_target ();
1043 }
1044
1045 /* Remove a target_ops vector from the stack, wherever it may be.
1046 Return how many times it was removed (0 or 1). */
1047
1048 int
1049 unpush_target (struct target_ops *t)
1050 {
1051 struct target_ops **cur;
1052 struct target_ops *tmp;
1053
1054 if (t->to_stratum == dummy_stratum)
1055 internal_error (__FILE__, __LINE__,
1056 _("Attempt to unpush the dummy target"));
1057
1058 /* Look for the specified target. Note that we assume that a target
1059 can only occur once in the target stack. */
1060
1061 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1062 {
1063 if ((*cur) == t)
1064 break;
1065 }
1066
1067 /* If we don't find target_ops, quit. Only open targets should be
1068 closed. */
1069 if ((*cur) == NULL)
1070 return 0;
1071
1072 /* Unchain the target. */
1073 tmp = (*cur);
1074 (*cur) = (*cur)->beneath;
1075 tmp->beneath = NULL;
1076
1077 update_current_target ();
1078
1079 /* Finally close the target. Note we do this after unchaining, so
1080 any target method calls from within the target_close
1081 implementation don't end up in T anymore. */
1082 target_close (t);
1083
1084 return 1;
1085 }
1086
1087 void
1088 pop_target (void)
1089 {
1090 target_close (target_stack); /* Let it clean up. */
1091 if (unpush_target (target_stack) == 1)
1092 return;
1093
1094 fprintf_unfiltered (gdb_stderr,
1095 "pop_target couldn't find target %s\n",
1096 current_target.to_shortname);
1097 internal_error (__FILE__, __LINE__,
1098 _("failed internal consistency check"));
1099 }
1100
1101 void
1102 pop_all_targets_above (enum strata above_stratum)
1103 {
1104 while ((int) (current_target.to_stratum) > (int) above_stratum)
1105 {
1106 target_close (target_stack);
1107 if (!unpush_target (target_stack))
1108 {
1109 fprintf_unfiltered (gdb_stderr,
1110 "pop_all_targets couldn't find target %s\n",
1111 target_stack->to_shortname);
1112 internal_error (__FILE__, __LINE__,
1113 _("failed internal consistency check"));
1114 break;
1115 }
1116 }
1117 }
1118
1119 void
1120 pop_all_targets (void)
1121 {
1122 pop_all_targets_above (dummy_stratum);
1123 }
1124
1125 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1126
1127 int
1128 target_is_pushed (struct target_ops *t)
1129 {
1130 struct target_ops **cur;
1131
1132 /* Check magic number. If wrong, it probably means someone changed
1133 the struct definition, but not all the places that initialize one. */
1134 if (t->to_magic != OPS_MAGIC)
1135 {
1136 fprintf_unfiltered (gdb_stderr,
1137 "Magic number of %s target struct wrong\n",
1138 t->to_shortname);
1139 internal_error (__FILE__, __LINE__,
1140 _("failed internal consistency check"));
1141 }
1142
1143 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1144 if (*cur == t)
1145 return 1;
1146
1147 return 0;
1148 }
1149
1150 /* Using the objfile specified in OBJFILE, find the address for the
1151 current thread's thread-local storage with offset OFFSET. */
1152 CORE_ADDR
1153 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1154 {
1155 volatile CORE_ADDR addr = 0;
1156 struct target_ops *target;
1157
1158 for (target = current_target.beneath;
1159 target != NULL;
1160 target = target->beneath)
1161 {
1162 if (target->to_get_thread_local_address != NULL)
1163 break;
1164 }
1165
1166 if (target != NULL
1167 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1168 {
1169 ptid_t ptid = inferior_ptid;
1170 volatile struct gdb_exception ex;
1171
1172 TRY_CATCH (ex, RETURN_MASK_ALL)
1173 {
1174 CORE_ADDR lm_addr;
1175
1176 /* Fetch the load module address for this objfile. */
1177 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1178 objfile);
1179 /* If it's 0, throw the appropriate exception. */
1180 if (lm_addr == 0)
1181 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1182 _("TLS load module not found"));
1183
1184 addr = target->to_get_thread_local_address (target, ptid,
1185 lm_addr, offset);
1186 }
1187 /* If an error occurred, print TLS related messages here. Otherwise,
1188 throw the error to some higher catcher. */
1189 if (ex.reason < 0)
1190 {
1191 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1192
1193 switch (ex.error)
1194 {
1195 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1196 error (_("Cannot find thread-local variables "
1197 "in this thread library."));
1198 break;
1199 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1200 if (objfile_is_library)
1201 error (_("Cannot find shared library `%s' in dynamic"
1202 " linker's load module list"), objfile->name);
1203 else
1204 error (_("Cannot find executable file `%s' in dynamic"
1205 " linker's load module list"), objfile->name);
1206 break;
1207 case TLS_NOT_ALLOCATED_YET_ERROR:
1208 if (objfile_is_library)
1209 error (_("The inferior has not yet allocated storage for"
1210 " thread-local variables in\n"
1211 "the shared library `%s'\n"
1212 "for %s"),
1213 objfile->name, target_pid_to_str (ptid));
1214 else
1215 error (_("The inferior has not yet allocated storage for"
1216 " thread-local variables in\n"
1217 "the executable `%s'\n"
1218 "for %s"),
1219 objfile->name, target_pid_to_str (ptid));
1220 break;
1221 case TLS_GENERIC_ERROR:
1222 if (objfile_is_library)
1223 error (_("Cannot find thread-local storage for %s, "
1224 "shared library %s:\n%s"),
1225 target_pid_to_str (ptid),
1226 objfile->name, ex.message);
1227 else
1228 error (_("Cannot find thread-local storage for %s, "
1229 "executable file %s:\n%s"),
1230 target_pid_to_str (ptid),
1231 objfile->name, ex.message);
1232 break;
1233 default:
1234 throw_exception (ex);
1235 break;
1236 }
1237 }
1238 }
1239 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1240 TLS is an ABI-specific thing. But we don't do that yet. */
1241 else
1242 error (_("Cannot find thread-local variables on this target"));
1243
1244 return addr;
1245 }
1246
1247 #undef MIN
1248 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1249
1250 /* target_read_string -- read a null terminated string, up to LEN bytes,
1251 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1252 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1253 is responsible for freeing it. Return the number of bytes successfully
1254 read. */
1255
1256 int
1257 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1258 {
1259 int tlen, offset, i;
1260 gdb_byte buf[4];
1261 int errcode = 0;
1262 char *buffer;
1263 int buffer_allocated;
1264 char *bufptr;
1265 unsigned int nbytes_read = 0;
1266
1267 gdb_assert (string);
1268
1269 /* Small for testing. */
1270 buffer_allocated = 4;
1271 buffer = xmalloc (buffer_allocated);
1272 bufptr = buffer;
1273
1274 while (len > 0)
1275 {
1276 tlen = MIN (len, 4 - (memaddr & 3));
1277 offset = memaddr & 3;
1278
1279 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1280 if (errcode != 0)
1281 {
1282 /* The transfer request might have crossed the boundary to an
1283 unallocated region of memory. Retry the transfer, requesting
1284 a single byte. */
1285 tlen = 1;
1286 offset = 0;
1287 errcode = target_read_memory (memaddr, buf, 1);
1288 if (errcode != 0)
1289 goto done;
1290 }
1291
1292 if (bufptr - buffer + tlen > buffer_allocated)
1293 {
1294 unsigned int bytes;
1295
1296 bytes = bufptr - buffer;
1297 buffer_allocated *= 2;
1298 buffer = xrealloc (buffer, buffer_allocated);
1299 bufptr = buffer + bytes;
1300 }
1301
1302 for (i = 0; i < tlen; i++)
1303 {
1304 *bufptr++ = buf[i + offset];
1305 if (buf[i + offset] == '\000')
1306 {
1307 nbytes_read += i + 1;
1308 goto done;
1309 }
1310 }
1311
1312 memaddr += tlen;
1313 len -= tlen;
1314 nbytes_read += tlen;
1315 }
1316 done:
1317 *string = buffer;
1318 if (errnop != NULL)
1319 *errnop = errcode;
1320 return nbytes_read;
1321 }
1322
1323 struct target_section_table *
1324 target_get_section_table (struct target_ops *target)
1325 {
1326 struct target_ops *t;
1327
1328 if (targetdebug)
1329 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1330
1331 for (t = target; t != NULL; t = t->beneath)
1332 if (t->to_get_section_table != NULL)
1333 return (*t->to_get_section_table) (t);
1334
1335 return NULL;
1336 }
1337
1338 /* Find a section containing ADDR. */
1339
1340 struct target_section *
1341 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1342 {
1343 struct target_section_table *table = target_get_section_table (target);
1344 struct target_section *secp;
1345
1346 if (table == NULL)
1347 return NULL;
1348
1349 for (secp = table->sections; secp < table->sections_end; secp++)
1350 {
1351 if (addr >= secp->addr && addr < secp->endaddr)
1352 return secp;
1353 }
1354 return NULL;
1355 }
1356
1357 /* Read memory from the live target, even if currently inspecting a
1358 traceframe. The return is the same as that of target_read. */
1359
1360 static LONGEST
1361 target_read_live_memory (enum target_object object,
1362 ULONGEST memaddr, gdb_byte *myaddr, LONGEST len)
1363 {
1364 int ret;
1365 struct cleanup *cleanup;
1366
1367 /* Switch momentarily out of tfind mode so to access live memory.
1368 Note that this must not clear global state, such as the frame
1369 cache, which must still remain valid for the previous traceframe.
1370 We may be _building_ the frame cache at this point. */
1371 cleanup = make_cleanup_restore_traceframe_number ();
1372 set_traceframe_number (-1);
1373
1374 ret = target_read (current_target.beneath, object, NULL,
1375 myaddr, memaddr, len);
1376
1377 do_cleanups (cleanup);
1378 return ret;
1379 }
1380
1381 /* Using the set of read-only target sections of OPS, read live
1382 read-only memory. Note that the actual reads start from the
1383 top-most target again.
1384
1385 For interface/parameters/return description see target.h,
1386 to_xfer_partial. */
1387
1388 static LONGEST
1389 memory_xfer_live_readonly_partial (struct target_ops *ops,
1390 enum target_object object,
1391 gdb_byte *readbuf, ULONGEST memaddr,
1392 LONGEST len)
1393 {
1394 struct target_section *secp;
1395 struct target_section_table *table;
1396
1397 secp = target_section_by_addr (ops, memaddr);
1398 if (secp != NULL
1399 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1400 secp->the_bfd_section)
1401 & SEC_READONLY))
1402 {
1403 struct target_section *p;
1404 ULONGEST memend = memaddr + len;
1405
1406 table = target_get_section_table (ops);
1407
1408 for (p = table->sections; p < table->sections_end; p++)
1409 {
1410 if (memaddr >= p->addr)
1411 {
1412 if (memend <= p->endaddr)
1413 {
1414 /* Entire transfer is within this section. */
1415 return target_read_live_memory (object, memaddr,
1416 readbuf, len);
1417 }
1418 else if (memaddr >= p->endaddr)
1419 {
1420 /* This section ends before the transfer starts. */
1421 continue;
1422 }
1423 else
1424 {
1425 /* This section overlaps the transfer. Just do half. */
1426 len = p->endaddr - memaddr;
1427 return target_read_live_memory (object, memaddr,
1428 readbuf, len);
1429 }
1430 }
1431 }
1432 }
1433
1434 return 0;
1435 }
1436
1437 /* Perform a partial memory transfer.
1438 For docs see target.h, to_xfer_partial. */
1439
1440 static LONGEST
1441 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1442 void *readbuf, const void *writebuf, ULONGEST memaddr,
1443 LONGEST len)
1444 {
1445 LONGEST res;
1446 int reg_len;
1447 struct mem_region *region;
1448 struct inferior *inf;
1449
1450 /* For accesses to unmapped overlay sections, read directly from
1451 files. Must do this first, as MEMADDR may need adjustment. */
1452 if (readbuf != NULL && overlay_debugging)
1453 {
1454 struct obj_section *section = find_pc_overlay (memaddr);
1455
1456 if (pc_in_unmapped_range (memaddr, section))
1457 {
1458 struct target_section_table *table
1459 = target_get_section_table (ops);
1460 const char *section_name = section->the_bfd_section->name;
1461
1462 memaddr = overlay_mapped_address (memaddr, section);
1463 return section_table_xfer_memory_partial (readbuf, writebuf,
1464 memaddr, len,
1465 table->sections,
1466 table->sections_end,
1467 section_name);
1468 }
1469 }
1470
1471 /* Try the executable files, if "trust-readonly-sections" is set. */
1472 if (readbuf != NULL && trust_readonly)
1473 {
1474 struct target_section *secp;
1475 struct target_section_table *table;
1476
1477 secp = target_section_by_addr (ops, memaddr);
1478 if (secp != NULL
1479 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1480 secp->the_bfd_section)
1481 & SEC_READONLY))
1482 {
1483 table = target_get_section_table (ops);
1484 return section_table_xfer_memory_partial (readbuf, writebuf,
1485 memaddr, len,
1486 table->sections,
1487 table->sections_end,
1488 NULL);
1489 }
1490 }
1491
1492 /* If reading unavailable memory in the context of traceframes, and
1493 this address falls within a read-only section, fallback to
1494 reading from live memory. */
1495 if (readbuf != NULL && get_traceframe_number () != -1)
1496 {
1497 VEC(mem_range_s) *available;
1498
1499 /* If we fail to get the set of available memory, then the
1500 target does not support querying traceframe info, and so we
1501 attempt reading from the traceframe anyway (assuming the
1502 target implements the old QTro packet then). */
1503 if (traceframe_available_memory (&available, memaddr, len))
1504 {
1505 struct cleanup *old_chain;
1506
1507 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1508
1509 if (VEC_empty (mem_range_s, available)
1510 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1511 {
1512 /* Don't read into the traceframe's available
1513 memory. */
1514 if (!VEC_empty (mem_range_s, available))
1515 {
1516 LONGEST oldlen = len;
1517
1518 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1519 gdb_assert (len <= oldlen);
1520 }
1521
1522 do_cleanups (old_chain);
1523
1524 /* This goes through the topmost target again. */
1525 res = memory_xfer_live_readonly_partial (ops, object,
1526 readbuf, memaddr, len);
1527 if (res > 0)
1528 return res;
1529
1530 /* No use trying further, we know some memory starting
1531 at MEMADDR isn't available. */
1532 return -1;
1533 }
1534
1535 /* Don't try to read more than how much is available, in
1536 case the target implements the deprecated QTro packet to
1537 cater for older GDBs (the target's knowledge of read-only
1538 sections may be outdated by now). */
1539 len = VEC_index (mem_range_s, available, 0)->length;
1540
1541 do_cleanups (old_chain);
1542 }
1543 }
1544
1545 /* Try GDB's internal data cache. */
1546 region = lookup_mem_region (memaddr);
1547 /* region->hi == 0 means there's no upper bound. */
1548 if (memaddr + len < region->hi || region->hi == 0)
1549 reg_len = len;
1550 else
1551 reg_len = region->hi - memaddr;
1552
1553 switch (region->attrib.mode)
1554 {
1555 case MEM_RO:
1556 if (writebuf != NULL)
1557 return -1;
1558 break;
1559
1560 case MEM_WO:
1561 if (readbuf != NULL)
1562 return -1;
1563 break;
1564
1565 case MEM_FLASH:
1566 /* We only support writing to flash during "load" for now. */
1567 if (writebuf != NULL)
1568 error (_("Writing to flash memory forbidden in this context"));
1569 break;
1570
1571 case MEM_NONE:
1572 return -1;
1573 }
1574
1575 if (!ptid_equal (inferior_ptid, null_ptid))
1576 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1577 else
1578 inf = NULL;
1579
1580 if (inf != NULL
1581 /* The dcache reads whole cache lines; that doesn't play well
1582 with reading from a trace buffer, because reading outside of
1583 the collected memory range fails. */
1584 && get_traceframe_number () == -1
1585 && (region->attrib.cache
1586 || (stack_cache_enabled_p && object == TARGET_OBJECT_STACK_MEMORY)))
1587 {
1588 if (readbuf != NULL)
1589 res = dcache_xfer_memory (ops, target_dcache, memaddr, readbuf,
1590 reg_len, 0);
1591 else
1592 /* FIXME drow/2006-08-09: If we're going to preserve const
1593 correctness dcache_xfer_memory should take readbuf and
1594 writebuf. */
1595 res = dcache_xfer_memory (ops, target_dcache, memaddr,
1596 (void *) writebuf,
1597 reg_len, 1);
1598 if (res <= 0)
1599 return -1;
1600 else
1601 return res;
1602 }
1603
1604 /* If none of those methods found the memory we wanted, fall back
1605 to a target partial transfer. Normally a single call to
1606 to_xfer_partial is enough; if it doesn't recognize an object
1607 it will call the to_xfer_partial of the next target down.
1608 But for memory this won't do. Memory is the only target
1609 object which can be read from more than one valid target.
1610 A core file, for instance, could have some of memory but
1611 delegate other bits to the target below it. So, we must
1612 manually try all targets. */
1613
1614 do
1615 {
1616 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1617 readbuf, writebuf, memaddr, reg_len);
1618 if (res > 0)
1619 break;
1620
1621 /* We want to continue past core files to executables, but not
1622 past a running target's memory. */
1623 if (ops->to_has_all_memory (ops))
1624 break;
1625
1626 ops = ops->beneath;
1627 }
1628 while (ops != NULL);
1629
1630 /* Make sure the cache gets updated no matter what - if we are writing
1631 to the stack. Even if this write is not tagged as such, we still need
1632 to update the cache. */
1633
1634 if (res > 0
1635 && inf != NULL
1636 && writebuf != NULL
1637 && !region->attrib.cache
1638 && stack_cache_enabled_p
1639 && object != TARGET_OBJECT_STACK_MEMORY)
1640 {
1641 dcache_update (target_dcache, memaddr, (void *) writebuf, res);
1642 }
1643
1644 /* If we still haven't got anything, return the last error. We
1645 give up. */
1646 return res;
1647 }
1648
1649 /* Perform a partial memory transfer. For docs see target.h,
1650 to_xfer_partial. */
1651
1652 static LONGEST
1653 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1654 void *readbuf, const void *writebuf, ULONGEST memaddr,
1655 LONGEST len)
1656 {
1657 int res;
1658
1659 /* Zero length requests are ok and require no work. */
1660 if (len == 0)
1661 return 0;
1662
1663 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1664 breakpoint insns, thus hiding out from higher layers whether
1665 there are software breakpoints inserted in the code stream. */
1666 if (readbuf != NULL)
1667 {
1668 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len);
1669
1670 if (res > 0 && !show_memory_breakpoints)
1671 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1672 }
1673 else
1674 {
1675 void *buf;
1676 struct cleanup *old_chain;
1677
1678 buf = xmalloc (len);
1679 old_chain = make_cleanup (xfree, buf);
1680 memcpy (buf, writebuf, len);
1681
1682 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1683 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len);
1684
1685 do_cleanups (old_chain);
1686 }
1687
1688 return res;
1689 }
1690
1691 static void
1692 restore_show_memory_breakpoints (void *arg)
1693 {
1694 show_memory_breakpoints = (uintptr_t) arg;
1695 }
1696
1697 struct cleanup *
1698 make_show_memory_breakpoints_cleanup (int show)
1699 {
1700 int current = show_memory_breakpoints;
1701
1702 show_memory_breakpoints = show;
1703 return make_cleanup (restore_show_memory_breakpoints,
1704 (void *) (uintptr_t) current);
1705 }
1706
1707 /* For docs see target.h, to_xfer_partial. */
1708
1709 static LONGEST
1710 target_xfer_partial (struct target_ops *ops,
1711 enum target_object object, const char *annex,
1712 void *readbuf, const void *writebuf,
1713 ULONGEST offset, LONGEST len)
1714 {
1715 LONGEST retval;
1716
1717 gdb_assert (ops->to_xfer_partial != NULL);
1718
1719 if (writebuf && !may_write_memory)
1720 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1721 core_addr_to_string_nz (offset), plongest (len));
1722
1723 /* If this is a memory transfer, let the memory-specific code
1724 have a look at it instead. Memory transfers are more
1725 complicated. */
1726 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY)
1727 retval = memory_xfer_partial (ops, object, readbuf,
1728 writebuf, offset, len);
1729 else
1730 {
1731 enum target_object raw_object = object;
1732
1733 /* If this is a raw memory transfer, request the normal
1734 memory object from other layers. */
1735 if (raw_object == TARGET_OBJECT_RAW_MEMORY)
1736 raw_object = TARGET_OBJECT_MEMORY;
1737
1738 retval = ops->to_xfer_partial (ops, raw_object, annex, readbuf,
1739 writebuf, offset, len);
1740 }
1741
1742 if (targetdebug)
1743 {
1744 const unsigned char *myaddr = NULL;
1745
1746 fprintf_unfiltered (gdb_stdlog,
1747 "%s:target_xfer_partial "
1748 "(%d, %s, %s, %s, %s, %s) = %s",
1749 ops->to_shortname,
1750 (int) object,
1751 (annex ? annex : "(null)"),
1752 host_address_to_string (readbuf),
1753 host_address_to_string (writebuf),
1754 core_addr_to_string_nz (offset),
1755 plongest (len), plongest (retval));
1756
1757 if (readbuf)
1758 myaddr = readbuf;
1759 if (writebuf)
1760 myaddr = writebuf;
1761 if (retval > 0 && myaddr != NULL)
1762 {
1763 int i;
1764
1765 fputs_unfiltered (", bytes =", gdb_stdlog);
1766 for (i = 0; i < retval; i++)
1767 {
1768 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1769 {
1770 if (targetdebug < 2 && i > 0)
1771 {
1772 fprintf_unfiltered (gdb_stdlog, " ...");
1773 break;
1774 }
1775 fprintf_unfiltered (gdb_stdlog, "\n");
1776 }
1777
1778 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1779 }
1780 }
1781
1782 fputc_unfiltered ('\n', gdb_stdlog);
1783 }
1784 return retval;
1785 }
1786
1787 /* Read LEN bytes of target memory at address MEMADDR, placing the results in
1788 GDB's memory at MYADDR. Returns either 0 for success or an errno value
1789 if any error occurs.
1790
1791 If an error occurs, no guarantee is made about the contents of the data at
1792 MYADDR. In particular, the caller should not depend upon partial reads
1793 filling the buffer with good data. There is no way for the caller to know
1794 how much good data might have been transfered anyway. Callers that can
1795 deal with partial reads should call target_read (which will retry until
1796 it makes no progress, and then return how much was transferred). */
1797
1798 int
1799 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1800 {
1801 /* Dispatch to the topmost target, not the flattened current_target.
1802 Memory accesses check target->to_has_(all_)memory, and the
1803 flattened target doesn't inherit those. */
1804 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1805 myaddr, memaddr, len) == len)
1806 return 0;
1807 else
1808 return EIO;
1809 }
1810
1811 /* Like target_read_memory, but specify explicitly that this is a read from
1812 the target's stack. This may trigger different cache behavior. */
1813
1814 int
1815 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1816 {
1817 /* Dispatch to the topmost target, not the flattened current_target.
1818 Memory accesses check target->to_has_(all_)memory, and the
1819 flattened target doesn't inherit those. */
1820
1821 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1822 myaddr, memaddr, len) == len)
1823 return 0;
1824 else
1825 return EIO;
1826 }
1827
1828 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1829 Returns either 0 for success or an errno value if any error occurs.
1830 If an error occurs, no guarantee is made about how much data got written.
1831 Callers that can deal with partial writes should call target_write. */
1832
1833 int
1834 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1835 {
1836 /* Dispatch to the topmost target, not the flattened current_target.
1837 Memory accesses check target->to_has_(all_)memory, and the
1838 flattened target doesn't inherit those. */
1839 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1840 myaddr, memaddr, len) == len)
1841 return 0;
1842 else
1843 return EIO;
1844 }
1845
1846 /* Write LEN bytes from MYADDR to target raw memory at address
1847 MEMADDR. Returns either 0 for success or an errno value if any
1848 error occurs. If an error occurs, no guarantee is made about how
1849 much data got written. Callers that can deal with partial writes
1850 should call target_write. */
1851
1852 int
1853 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1854 {
1855 /* Dispatch to the topmost target, not the flattened current_target.
1856 Memory accesses check target->to_has_(all_)memory, and the
1857 flattened target doesn't inherit those. */
1858 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1859 myaddr, memaddr, len) == len)
1860 return 0;
1861 else
1862 return EIO;
1863 }
1864
1865 /* Fetch the target's memory map. */
1866
1867 VEC(mem_region_s) *
1868 target_memory_map (void)
1869 {
1870 VEC(mem_region_s) *result;
1871 struct mem_region *last_one, *this_one;
1872 int ix;
1873 struct target_ops *t;
1874
1875 if (targetdebug)
1876 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1877
1878 for (t = current_target.beneath; t != NULL; t = t->beneath)
1879 if (t->to_memory_map != NULL)
1880 break;
1881
1882 if (t == NULL)
1883 return NULL;
1884
1885 result = t->to_memory_map (t);
1886 if (result == NULL)
1887 return NULL;
1888
1889 qsort (VEC_address (mem_region_s, result),
1890 VEC_length (mem_region_s, result),
1891 sizeof (struct mem_region), mem_region_cmp);
1892
1893 /* Check that regions do not overlap. Simultaneously assign
1894 a numbering for the "mem" commands to use to refer to
1895 each region. */
1896 last_one = NULL;
1897 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1898 {
1899 this_one->number = ix;
1900
1901 if (last_one && last_one->hi > this_one->lo)
1902 {
1903 warning (_("Overlapping regions in memory map: ignoring"));
1904 VEC_free (mem_region_s, result);
1905 return NULL;
1906 }
1907 last_one = this_one;
1908 }
1909
1910 return result;
1911 }
1912
1913 void
1914 target_flash_erase (ULONGEST address, LONGEST length)
1915 {
1916 struct target_ops *t;
1917
1918 for (t = current_target.beneath; t != NULL; t = t->beneath)
1919 if (t->to_flash_erase != NULL)
1920 {
1921 if (targetdebug)
1922 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1923 hex_string (address), phex (length, 0));
1924 t->to_flash_erase (t, address, length);
1925 return;
1926 }
1927
1928 tcomplain ();
1929 }
1930
1931 void
1932 target_flash_done (void)
1933 {
1934 struct target_ops *t;
1935
1936 for (t = current_target.beneath; t != NULL; t = t->beneath)
1937 if (t->to_flash_done != NULL)
1938 {
1939 if (targetdebug)
1940 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1941 t->to_flash_done (t);
1942 return;
1943 }
1944
1945 tcomplain ();
1946 }
1947
1948 static void
1949 show_trust_readonly (struct ui_file *file, int from_tty,
1950 struct cmd_list_element *c, const char *value)
1951 {
1952 fprintf_filtered (file,
1953 _("Mode for reading from readonly sections is %s.\n"),
1954 value);
1955 }
1956
1957 /* More generic transfers. */
1958
1959 static LONGEST
1960 default_xfer_partial (struct target_ops *ops, enum target_object object,
1961 const char *annex, gdb_byte *readbuf,
1962 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1963 {
1964 if (object == TARGET_OBJECT_MEMORY
1965 && ops->deprecated_xfer_memory != NULL)
1966 /* If available, fall back to the target's
1967 "deprecated_xfer_memory" method. */
1968 {
1969 int xfered = -1;
1970
1971 errno = 0;
1972 if (writebuf != NULL)
1973 {
1974 void *buffer = xmalloc (len);
1975 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1976
1977 memcpy (buffer, writebuf, len);
1978 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1979 1/*write*/, NULL, ops);
1980 do_cleanups (cleanup);
1981 }
1982 if (readbuf != NULL)
1983 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1984 0/*read*/, NULL, ops);
1985 if (xfered > 0)
1986 return xfered;
1987 else if (xfered == 0 && errno == 0)
1988 /* "deprecated_xfer_memory" uses 0, cross checked against
1989 ERRNO as one indication of an error. */
1990 return 0;
1991 else
1992 return -1;
1993 }
1994 else if (ops->beneath != NULL)
1995 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1996 readbuf, writebuf, offset, len);
1997 else
1998 return -1;
1999 }
2000
2001 /* The xfer_partial handler for the topmost target. Unlike the default,
2002 it does not need to handle memory specially; it just passes all
2003 requests down the stack. */
2004
2005 static LONGEST
2006 current_xfer_partial (struct target_ops *ops, enum target_object object,
2007 const char *annex, gdb_byte *readbuf,
2008 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
2009 {
2010 if (ops->beneath != NULL)
2011 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
2012 readbuf, writebuf, offset, len);
2013 else
2014 return -1;
2015 }
2016
2017 /* Target vector read/write partial wrapper functions. */
2018
2019 static LONGEST
2020 target_read_partial (struct target_ops *ops,
2021 enum target_object object,
2022 const char *annex, gdb_byte *buf,
2023 ULONGEST offset, LONGEST len)
2024 {
2025 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
2026 }
2027
2028 static LONGEST
2029 target_write_partial (struct target_ops *ops,
2030 enum target_object object,
2031 const char *annex, const gdb_byte *buf,
2032 ULONGEST offset, LONGEST len)
2033 {
2034 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
2035 }
2036
2037 /* Wrappers to perform the full transfer. */
2038
2039 /* For docs on target_read see target.h. */
2040
2041 LONGEST
2042 target_read (struct target_ops *ops,
2043 enum target_object object,
2044 const char *annex, gdb_byte *buf,
2045 ULONGEST offset, LONGEST len)
2046 {
2047 LONGEST xfered = 0;
2048
2049 while (xfered < len)
2050 {
2051 LONGEST xfer = target_read_partial (ops, object, annex,
2052 (gdb_byte *) buf + xfered,
2053 offset + xfered, len - xfered);
2054
2055 /* Call an observer, notifying them of the xfer progress? */
2056 if (xfer == 0)
2057 return xfered;
2058 if (xfer < 0)
2059 return -1;
2060 xfered += xfer;
2061 QUIT;
2062 }
2063 return len;
2064 }
2065
2066 /* Assuming that the entire [begin, end) range of memory cannot be
2067 read, try to read whatever subrange is possible to read.
2068
2069 The function returns, in RESULT, either zero or one memory block.
2070 If there's a readable subrange at the beginning, it is completely
2071 read and returned. Any further readable subrange will not be read.
2072 Otherwise, if there's a readable subrange at the end, it will be
2073 completely read and returned. Any readable subranges before it
2074 (obviously, not starting at the beginning), will be ignored. In
2075 other cases -- either no readable subrange, or readable subrange(s)
2076 that is neither at the beginning, or end, nothing is returned.
2077
2078 The purpose of this function is to handle a read across a boundary
2079 of accessible memory in a case when memory map is not available.
2080 The above restrictions are fine for this case, but will give
2081 incorrect results if the memory is 'patchy'. However, supporting
2082 'patchy' memory would require trying to read every single byte,
2083 and it seems unacceptable solution. Explicit memory map is
2084 recommended for this case -- and target_read_memory_robust will
2085 take care of reading multiple ranges then. */
2086
2087 static void
2088 read_whatever_is_readable (struct target_ops *ops,
2089 ULONGEST begin, ULONGEST end,
2090 VEC(memory_read_result_s) **result)
2091 {
2092 gdb_byte *buf = xmalloc (end - begin);
2093 ULONGEST current_begin = begin;
2094 ULONGEST current_end = end;
2095 int forward;
2096 memory_read_result_s r;
2097
2098 /* If we previously failed to read 1 byte, nothing can be done here. */
2099 if (end - begin <= 1)
2100 {
2101 xfree (buf);
2102 return;
2103 }
2104
2105 /* Check that either first or the last byte is readable, and give up
2106 if not. This heuristic is meant to permit reading accessible memory
2107 at the boundary of accessible region. */
2108 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2109 buf, begin, 1) == 1)
2110 {
2111 forward = 1;
2112 ++current_begin;
2113 }
2114 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2115 buf + (end-begin) - 1, end - 1, 1) == 1)
2116 {
2117 forward = 0;
2118 --current_end;
2119 }
2120 else
2121 {
2122 xfree (buf);
2123 return;
2124 }
2125
2126 /* Loop invariant is that the [current_begin, current_end) was previously
2127 found to be not readable as a whole.
2128
2129 Note loop condition -- if the range has 1 byte, we can't divide the range
2130 so there's no point trying further. */
2131 while (current_end - current_begin > 1)
2132 {
2133 ULONGEST first_half_begin, first_half_end;
2134 ULONGEST second_half_begin, second_half_end;
2135 LONGEST xfer;
2136 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2137
2138 if (forward)
2139 {
2140 first_half_begin = current_begin;
2141 first_half_end = middle;
2142 second_half_begin = middle;
2143 second_half_end = current_end;
2144 }
2145 else
2146 {
2147 first_half_begin = middle;
2148 first_half_end = current_end;
2149 second_half_begin = current_begin;
2150 second_half_end = middle;
2151 }
2152
2153 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2154 buf + (first_half_begin - begin),
2155 first_half_begin,
2156 first_half_end - first_half_begin);
2157
2158 if (xfer == first_half_end - first_half_begin)
2159 {
2160 /* This half reads up fine. So, the error must be in the
2161 other half. */
2162 current_begin = second_half_begin;
2163 current_end = second_half_end;
2164 }
2165 else
2166 {
2167 /* This half is not readable. Because we've tried one byte, we
2168 know some part of this half if actually redable. Go to the next
2169 iteration to divide again and try to read.
2170
2171 We don't handle the other half, because this function only tries
2172 to read a single readable subrange. */
2173 current_begin = first_half_begin;
2174 current_end = first_half_end;
2175 }
2176 }
2177
2178 if (forward)
2179 {
2180 /* The [begin, current_begin) range has been read. */
2181 r.begin = begin;
2182 r.end = current_begin;
2183 r.data = buf;
2184 }
2185 else
2186 {
2187 /* The [current_end, end) range has been read. */
2188 LONGEST rlen = end - current_end;
2189
2190 r.data = xmalloc (rlen);
2191 memcpy (r.data, buf + current_end - begin, rlen);
2192 r.begin = current_end;
2193 r.end = end;
2194 xfree (buf);
2195 }
2196 VEC_safe_push(memory_read_result_s, (*result), &r);
2197 }
2198
2199 void
2200 free_memory_read_result_vector (void *x)
2201 {
2202 VEC(memory_read_result_s) *v = x;
2203 memory_read_result_s *current;
2204 int ix;
2205
2206 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2207 {
2208 xfree (current->data);
2209 }
2210 VEC_free (memory_read_result_s, v);
2211 }
2212
2213 VEC(memory_read_result_s) *
2214 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2215 {
2216 VEC(memory_read_result_s) *result = 0;
2217
2218 LONGEST xfered = 0;
2219 while (xfered < len)
2220 {
2221 struct mem_region *region = lookup_mem_region (offset + xfered);
2222 LONGEST rlen;
2223
2224 /* If there is no explicit region, a fake one should be created. */
2225 gdb_assert (region);
2226
2227 if (region->hi == 0)
2228 rlen = len - xfered;
2229 else
2230 rlen = region->hi - offset;
2231
2232 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2233 {
2234 /* Cannot read this region. Note that we can end up here only
2235 if the region is explicitly marked inaccessible, or
2236 'inaccessible-by-default' is in effect. */
2237 xfered += rlen;
2238 }
2239 else
2240 {
2241 LONGEST to_read = min (len - xfered, rlen);
2242 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2243
2244 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2245 (gdb_byte *) buffer,
2246 offset + xfered, to_read);
2247 /* Call an observer, notifying them of the xfer progress? */
2248 if (xfer <= 0)
2249 {
2250 /* Got an error reading full chunk. See if maybe we can read
2251 some subrange. */
2252 xfree (buffer);
2253 read_whatever_is_readable (ops, offset + xfered,
2254 offset + xfered + to_read, &result);
2255 xfered += to_read;
2256 }
2257 else
2258 {
2259 struct memory_read_result r;
2260 r.data = buffer;
2261 r.begin = offset + xfered;
2262 r.end = r.begin + xfer;
2263 VEC_safe_push (memory_read_result_s, result, &r);
2264 xfered += xfer;
2265 }
2266 QUIT;
2267 }
2268 }
2269 return result;
2270 }
2271
2272
2273 /* An alternative to target_write with progress callbacks. */
2274
2275 LONGEST
2276 target_write_with_progress (struct target_ops *ops,
2277 enum target_object object,
2278 const char *annex, const gdb_byte *buf,
2279 ULONGEST offset, LONGEST len,
2280 void (*progress) (ULONGEST, void *), void *baton)
2281 {
2282 LONGEST xfered = 0;
2283
2284 /* Give the progress callback a chance to set up. */
2285 if (progress)
2286 (*progress) (0, baton);
2287
2288 while (xfered < len)
2289 {
2290 LONGEST xfer = target_write_partial (ops, object, annex,
2291 (gdb_byte *) buf + xfered,
2292 offset + xfered, len - xfered);
2293
2294 if (xfer == 0)
2295 return xfered;
2296 if (xfer < 0)
2297 return -1;
2298
2299 if (progress)
2300 (*progress) (xfer, baton);
2301
2302 xfered += xfer;
2303 QUIT;
2304 }
2305 return len;
2306 }
2307
2308 /* For docs on target_write see target.h. */
2309
2310 LONGEST
2311 target_write (struct target_ops *ops,
2312 enum target_object object,
2313 const char *annex, const gdb_byte *buf,
2314 ULONGEST offset, LONGEST len)
2315 {
2316 return target_write_with_progress (ops, object, annex, buf, offset, len,
2317 NULL, NULL);
2318 }
2319
2320 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2321 the size of the transferred data. PADDING additional bytes are
2322 available in *BUF_P. This is a helper function for
2323 target_read_alloc; see the declaration of that function for more
2324 information. */
2325
2326 static LONGEST
2327 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2328 const char *annex, gdb_byte **buf_p, int padding)
2329 {
2330 size_t buf_alloc, buf_pos;
2331 gdb_byte *buf;
2332 LONGEST n;
2333
2334 /* This function does not have a length parameter; it reads the
2335 entire OBJECT). Also, it doesn't support objects fetched partly
2336 from one target and partly from another (in a different stratum,
2337 e.g. a core file and an executable). Both reasons make it
2338 unsuitable for reading memory. */
2339 gdb_assert (object != TARGET_OBJECT_MEMORY);
2340
2341 /* Start by reading up to 4K at a time. The target will throttle
2342 this number down if necessary. */
2343 buf_alloc = 4096;
2344 buf = xmalloc (buf_alloc);
2345 buf_pos = 0;
2346 while (1)
2347 {
2348 n = target_read_partial (ops, object, annex, &buf[buf_pos],
2349 buf_pos, buf_alloc - buf_pos - padding);
2350 if (n < 0)
2351 {
2352 /* An error occurred. */
2353 xfree (buf);
2354 return -1;
2355 }
2356 else if (n == 0)
2357 {
2358 /* Read all there was. */
2359 if (buf_pos == 0)
2360 xfree (buf);
2361 else
2362 *buf_p = buf;
2363 return buf_pos;
2364 }
2365
2366 buf_pos += n;
2367
2368 /* If the buffer is filling up, expand it. */
2369 if (buf_alloc < buf_pos * 2)
2370 {
2371 buf_alloc *= 2;
2372 buf = xrealloc (buf, buf_alloc);
2373 }
2374
2375 QUIT;
2376 }
2377 }
2378
2379 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2380 the size of the transferred data. See the declaration in "target.h"
2381 function for more information about the return value. */
2382
2383 LONGEST
2384 target_read_alloc (struct target_ops *ops, enum target_object object,
2385 const char *annex, gdb_byte **buf_p)
2386 {
2387 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2388 }
2389
2390 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2391 returned as a string, allocated using xmalloc. If an error occurs
2392 or the transfer is unsupported, NULL is returned. Empty objects
2393 are returned as allocated but empty strings. A warning is issued
2394 if the result contains any embedded NUL bytes. */
2395
2396 char *
2397 target_read_stralloc (struct target_ops *ops, enum target_object object,
2398 const char *annex)
2399 {
2400 gdb_byte *buffer;
2401 char *bufstr;
2402 LONGEST i, transferred;
2403
2404 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2405 bufstr = (char *) buffer;
2406
2407 if (transferred < 0)
2408 return NULL;
2409
2410 if (transferred == 0)
2411 return xstrdup ("");
2412
2413 bufstr[transferred] = 0;
2414
2415 /* Check for embedded NUL bytes; but allow trailing NULs. */
2416 for (i = strlen (bufstr); i < transferred; i++)
2417 if (bufstr[i] != 0)
2418 {
2419 warning (_("target object %d, annex %s, "
2420 "contained unexpected null characters"),
2421 (int) object, annex ? annex : "(none)");
2422 break;
2423 }
2424
2425 return bufstr;
2426 }
2427
2428 /* Memory transfer methods. */
2429
2430 void
2431 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2432 LONGEST len)
2433 {
2434 /* This method is used to read from an alternate, non-current
2435 target. This read must bypass the overlay support (as symbols
2436 don't match this target), and GDB's internal cache (wrong cache
2437 for this target). */
2438 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2439 != len)
2440 memory_error (EIO, addr);
2441 }
2442
2443 ULONGEST
2444 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2445 int len, enum bfd_endian byte_order)
2446 {
2447 gdb_byte buf[sizeof (ULONGEST)];
2448
2449 gdb_assert (len <= sizeof (buf));
2450 get_target_memory (ops, addr, buf, len);
2451 return extract_unsigned_integer (buf, len, byte_order);
2452 }
2453
2454 int
2455 target_insert_breakpoint (struct gdbarch *gdbarch,
2456 struct bp_target_info *bp_tgt)
2457 {
2458 if (!may_insert_breakpoints)
2459 {
2460 warning (_("May not insert breakpoints"));
2461 return 1;
2462 }
2463
2464 return (*current_target.to_insert_breakpoint) (gdbarch, bp_tgt);
2465 }
2466
2467 int
2468 target_remove_breakpoint (struct gdbarch *gdbarch,
2469 struct bp_target_info *bp_tgt)
2470 {
2471 /* This is kind of a weird case to handle, but the permission might
2472 have been changed after breakpoints were inserted - in which case
2473 we should just take the user literally and assume that any
2474 breakpoints should be left in place. */
2475 if (!may_insert_breakpoints)
2476 {
2477 warning (_("May not remove breakpoints"));
2478 return 1;
2479 }
2480
2481 return (*current_target.to_remove_breakpoint) (gdbarch, bp_tgt);
2482 }
2483
2484 static void
2485 target_info (char *args, int from_tty)
2486 {
2487 struct target_ops *t;
2488 int has_all_mem = 0;
2489
2490 if (symfile_objfile != NULL)
2491 printf_unfiltered (_("Symbols from \"%s\".\n"), symfile_objfile->name);
2492
2493 for (t = target_stack; t != NULL; t = t->beneath)
2494 {
2495 if (!(*t->to_has_memory) (t))
2496 continue;
2497
2498 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2499 continue;
2500 if (has_all_mem)
2501 printf_unfiltered (_("\tWhile running this, "
2502 "GDB does not access memory from...\n"));
2503 printf_unfiltered ("%s:\n", t->to_longname);
2504 (t->to_files_info) (t);
2505 has_all_mem = (*t->to_has_all_memory) (t);
2506 }
2507 }
2508
2509 /* This function is called before any new inferior is created, e.g.
2510 by running a program, attaching, or connecting to a target.
2511 It cleans up any state from previous invocations which might
2512 change between runs. This is a subset of what target_preopen
2513 resets (things which might change between targets). */
2514
2515 void
2516 target_pre_inferior (int from_tty)
2517 {
2518 /* Clear out solib state. Otherwise the solib state of the previous
2519 inferior might have survived and is entirely wrong for the new
2520 target. This has been observed on GNU/Linux using glibc 2.3. How
2521 to reproduce:
2522
2523 bash$ ./foo&
2524 [1] 4711
2525 bash$ ./foo&
2526 [1] 4712
2527 bash$ gdb ./foo
2528 [...]
2529 (gdb) attach 4711
2530 (gdb) detach
2531 (gdb) attach 4712
2532 Cannot access memory at address 0xdeadbeef
2533 */
2534
2535 /* In some OSs, the shared library list is the same/global/shared
2536 across inferiors. If code is shared between processes, so are
2537 memory regions and features. */
2538 if (!gdbarch_has_global_solist (target_gdbarch ()))
2539 {
2540 no_shared_libraries (NULL, from_tty);
2541
2542 invalidate_target_mem_regions ();
2543
2544 target_clear_description ();
2545 }
2546
2547 agent_capability_invalidate ();
2548 }
2549
2550 /* Callback for iterate_over_inferiors. Gets rid of the given
2551 inferior. */
2552
2553 static int
2554 dispose_inferior (struct inferior *inf, void *args)
2555 {
2556 struct thread_info *thread;
2557
2558 thread = any_thread_of_process (inf->pid);
2559 if (thread)
2560 {
2561 switch_to_thread (thread->ptid);
2562
2563 /* Core inferiors actually should be detached, not killed. */
2564 if (target_has_execution)
2565 target_kill ();
2566 else
2567 target_detach (NULL, 0);
2568 }
2569
2570 return 0;
2571 }
2572
2573 /* This is to be called by the open routine before it does
2574 anything. */
2575
2576 void
2577 target_preopen (int from_tty)
2578 {
2579 dont_repeat ();
2580
2581 if (have_inferiors ())
2582 {
2583 if (!from_tty
2584 || !have_live_inferiors ()
2585 || query (_("A program is being debugged already. Kill it? ")))
2586 iterate_over_inferiors (dispose_inferior, NULL);
2587 else
2588 error (_("Program not killed."));
2589 }
2590
2591 /* Calling target_kill may remove the target from the stack. But if
2592 it doesn't (which seems like a win for UDI), remove it now. */
2593 /* Leave the exec target, though. The user may be switching from a
2594 live process to a core of the same program. */
2595 pop_all_targets_above (file_stratum);
2596
2597 target_pre_inferior (from_tty);
2598 }
2599
2600 /* Detach a target after doing deferred register stores. */
2601
2602 void
2603 target_detach (char *args, int from_tty)
2604 {
2605 struct target_ops* t;
2606
2607 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2608 /* Don't remove global breakpoints here. They're removed on
2609 disconnection from the target. */
2610 ;
2611 else
2612 /* If we're in breakpoints-always-inserted mode, have to remove
2613 them before detaching. */
2614 remove_breakpoints_pid (PIDGET (inferior_ptid));
2615
2616 prepare_for_detach ();
2617
2618 for (t = current_target.beneath; t != NULL; t = t->beneath)
2619 {
2620 if (t->to_detach != NULL)
2621 {
2622 t->to_detach (t, args, from_tty);
2623 if (targetdebug)
2624 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2625 args, from_tty);
2626 return;
2627 }
2628 }
2629
2630 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2631 }
2632
2633 void
2634 target_disconnect (char *args, int from_tty)
2635 {
2636 struct target_ops *t;
2637
2638 /* If we're in breakpoints-always-inserted mode or if breakpoints
2639 are global across processes, we have to remove them before
2640 disconnecting. */
2641 remove_breakpoints ();
2642
2643 for (t = current_target.beneath; t != NULL; t = t->beneath)
2644 if (t->to_disconnect != NULL)
2645 {
2646 if (targetdebug)
2647 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2648 args, from_tty);
2649 t->to_disconnect (t, args, from_tty);
2650 return;
2651 }
2652
2653 tcomplain ();
2654 }
2655
2656 ptid_t
2657 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2658 {
2659 struct target_ops *t;
2660
2661 for (t = current_target.beneath; t != NULL; t = t->beneath)
2662 {
2663 if (t->to_wait != NULL)
2664 {
2665 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2666
2667 if (targetdebug)
2668 {
2669 char *status_string;
2670 char *options_string;
2671
2672 status_string = target_waitstatus_to_string (status);
2673 options_string = target_options_to_string (options);
2674 fprintf_unfiltered (gdb_stdlog,
2675 "target_wait (%d, status, options={%s})"
2676 " = %d, %s\n",
2677 PIDGET (ptid), options_string,
2678 PIDGET (retval), status_string);
2679 xfree (status_string);
2680 xfree (options_string);
2681 }
2682
2683 return retval;
2684 }
2685 }
2686
2687 noprocess ();
2688 }
2689
2690 char *
2691 target_pid_to_str (ptid_t ptid)
2692 {
2693 struct target_ops *t;
2694
2695 for (t = current_target.beneath; t != NULL; t = t->beneath)
2696 {
2697 if (t->to_pid_to_str != NULL)
2698 return (*t->to_pid_to_str) (t, ptid);
2699 }
2700
2701 return normal_pid_to_str (ptid);
2702 }
2703
2704 char *
2705 target_thread_name (struct thread_info *info)
2706 {
2707 struct target_ops *t;
2708
2709 for (t = current_target.beneath; t != NULL; t = t->beneath)
2710 {
2711 if (t->to_thread_name != NULL)
2712 return (*t->to_thread_name) (info);
2713 }
2714
2715 return NULL;
2716 }
2717
2718 void
2719 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2720 {
2721 struct target_ops *t;
2722
2723 target_dcache_invalidate ();
2724
2725 for (t = current_target.beneath; t != NULL; t = t->beneath)
2726 {
2727 if (t->to_resume != NULL)
2728 {
2729 t->to_resume (t, ptid, step, signal);
2730 if (targetdebug)
2731 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2732 PIDGET (ptid),
2733 step ? "step" : "continue",
2734 gdb_signal_to_name (signal));
2735
2736 registers_changed_ptid (ptid);
2737 set_executing (ptid, 1);
2738 set_running (ptid, 1);
2739 clear_inline_frame_state (ptid);
2740 return;
2741 }
2742 }
2743
2744 noprocess ();
2745 }
2746
2747 void
2748 target_pass_signals (int numsigs, unsigned char *pass_signals)
2749 {
2750 struct target_ops *t;
2751
2752 for (t = current_target.beneath; t != NULL; t = t->beneath)
2753 {
2754 if (t->to_pass_signals != NULL)
2755 {
2756 if (targetdebug)
2757 {
2758 int i;
2759
2760 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2761 numsigs);
2762
2763 for (i = 0; i < numsigs; i++)
2764 if (pass_signals[i])
2765 fprintf_unfiltered (gdb_stdlog, " %s",
2766 gdb_signal_to_name (i));
2767
2768 fprintf_unfiltered (gdb_stdlog, " })\n");
2769 }
2770
2771 (*t->to_pass_signals) (numsigs, pass_signals);
2772 return;
2773 }
2774 }
2775 }
2776
2777 void
2778 target_program_signals (int numsigs, unsigned char *program_signals)
2779 {
2780 struct target_ops *t;
2781
2782 for (t = current_target.beneath; t != NULL; t = t->beneath)
2783 {
2784 if (t->to_program_signals != NULL)
2785 {
2786 if (targetdebug)
2787 {
2788 int i;
2789
2790 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2791 numsigs);
2792
2793 for (i = 0; i < numsigs; i++)
2794 if (program_signals[i])
2795 fprintf_unfiltered (gdb_stdlog, " %s",
2796 gdb_signal_to_name (i));
2797
2798 fprintf_unfiltered (gdb_stdlog, " })\n");
2799 }
2800
2801 (*t->to_program_signals) (numsigs, program_signals);
2802 return;
2803 }
2804 }
2805 }
2806
2807 /* Look through the list of possible targets for a target that can
2808 follow forks. */
2809
2810 int
2811 target_follow_fork (int follow_child)
2812 {
2813 struct target_ops *t;
2814
2815 for (t = current_target.beneath; t != NULL; t = t->beneath)
2816 {
2817 if (t->to_follow_fork != NULL)
2818 {
2819 int retval = t->to_follow_fork (t, follow_child);
2820
2821 if (targetdebug)
2822 fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
2823 follow_child, retval);
2824 return retval;
2825 }
2826 }
2827
2828 /* Some target returned a fork event, but did not know how to follow it. */
2829 internal_error (__FILE__, __LINE__,
2830 _("could not find a target to follow fork"));
2831 }
2832
2833 void
2834 target_mourn_inferior (void)
2835 {
2836 struct target_ops *t;
2837
2838 for (t = current_target.beneath; t != NULL; t = t->beneath)
2839 {
2840 if (t->to_mourn_inferior != NULL)
2841 {
2842 t->to_mourn_inferior (t);
2843 if (targetdebug)
2844 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2845
2846 /* We no longer need to keep handles on any of the object files.
2847 Make sure to release them to avoid unnecessarily locking any
2848 of them while we're not actually debugging. */
2849 bfd_cache_close_all ();
2850
2851 return;
2852 }
2853 }
2854
2855 internal_error (__FILE__, __LINE__,
2856 _("could not find a target to follow mourn inferior"));
2857 }
2858
2859 /* Look for a target which can describe architectural features, starting
2860 from TARGET. If we find one, return its description. */
2861
2862 const struct target_desc *
2863 target_read_description (struct target_ops *target)
2864 {
2865 struct target_ops *t;
2866
2867 for (t = target; t != NULL; t = t->beneath)
2868 if (t->to_read_description != NULL)
2869 {
2870 const struct target_desc *tdesc;
2871
2872 tdesc = t->to_read_description (t);
2873 if (tdesc)
2874 return tdesc;
2875 }
2876
2877 return NULL;
2878 }
2879
2880 /* The default implementation of to_search_memory.
2881 This implements a basic search of memory, reading target memory and
2882 performing the search here (as opposed to performing the search in on the
2883 target side with, for example, gdbserver). */
2884
2885 int
2886 simple_search_memory (struct target_ops *ops,
2887 CORE_ADDR start_addr, ULONGEST search_space_len,
2888 const gdb_byte *pattern, ULONGEST pattern_len,
2889 CORE_ADDR *found_addrp)
2890 {
2891 /* NOTE: also defined in find.c testcase. */
2892 #define SEARCH_CHUNK_SIZE 16000
2893 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2894 /* Buffer to hold memory contents for searching. */
2895 gdb_byte *search_buf;
2896 unsigned search_buf_size;
2897 struct cleanup *old_cleanups;
2898
2899 search_buf_size = chunk_size + pattern_len - 1;
2900
2901 /* No point in trying to allocate a buffer larger than the search space. */
2902 if (search_space_len < search_buf_size)
2903 search_buf_size = search_space_len;
2904
2905 search_buf = malloc (search_buf_size);
2906 if (search_buf == NULL)
2907 error (_("Unable to allocate memory to perform the search."));
2908 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2909
2910 /* Prime the search buffer. */
2911
2912 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2913 search_buf, start_addr, search_buf_size) != search_buf_size)
2914 {
2915 warning (_("Unable to access %s bytes of target "
2916 "memory at %s, halting search."),
2917 pulongest (search_buf_size), hex_string (start_addr));
2918 do_cleanups (old_cleanups);
2919 return -1;
2920 }
2921
2922 /* Perform the search.
2923
2924 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2925 When we've scanned N bytes we copy the trailing bytes to the start and
2926 read in another N bytes. */
2927
2928 while (search_space_len >= pattern_len)
2929 {
2930 gdb_byte *found_ptr;
2931 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2932
2933 found_ptr = memmem (search_buf, nr_search_bytes,
2934 pattern, pattern_len);
2935
2936 if (found_ptr != NULL)
2937 {
2938 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2939
2940 *found_addrp = found_addr;
2941 do_cleanups (old_cleanups);
2942 return 1;
2943 }
2944
2945 /* Not found in this chunk, skip to next chunk. */
2946
2947 /* Don't let search_space_len wrap here, it's unsigned. */
2948 if (search_space_len >= chunk_size)
2949 search_space_len -= chunk_size;
2950 else
2951 search_space_len = 0;
2952
2953 if (search_space_len >= pattern_len)
2954 {
2955 unsigned keep_len = search_buf_size - chunk_size;
2956 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2957 int nr_to_read;
2958
2959 /* Copy the trailing part of the previous iteration to the front
2960 of the buffer for the next iteration. */
2961 gdb_assert (keep_len == pattern_len - 1);
2962 memcpy (search_buf, search_buf + chunk_size, keep_len);
2963
2964 nr_to_read = min (search_space_len - keep_len, chunk_size);
2965
2966 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2967 search_buf + keep_len, read_addr,
2968 nr_to_read) != nr_to_read)
2969 {
2970 warning (_("Unable to access %s bytes of target "
2971 "memory at %s, halting search."),
2972 plongest (nr_to_read),
2973 hex_string (read_addr));
2974 do_cleanups (old_cleanups);
2975 return -1;
2976 }
2977
2978 start_addr += chunk_size;
2979 }
2980 }
2981
2982 /* Not found. */
2983
2984 do_cleanups (old_cleanups);
2985 return 0;
2986 }
2987
2988 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2989 sequence of bytes in PATTERN with length PATTERN_LEN.
2990
2991 The result is 1 if found, 0 if not found, and -1 if there was an error
2992 requiring halting of the search (e.g. memory read error).
2993 If the pattern is found the address is recorded in FOUND_ADDRP. */
2994
2995 int
2996 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2997 const gdb_byte *pattern, ULONGEST pattern_len,
2998 CORE_ADDR *found_addrp)
2999 {
3000 struct target_ops *t;
3001 int found;
3002
3003 /* We don't use INHERIT to set current_target.to_search_memory,
3004 so we have to scan the target stack and handle targetdebug
3005 ourselves. */
3006
3007 if (targetdebug)
3008 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
3009 hex_string (start_addr));
3010
3011 for (t = current_target.beneath; t != NULL; t = t->beneath)
3012 if (t->to_search_memory != NULL)
3013 break;
3014
3015 if (t != NULL)
3016 {
3017 found = t->to_search_memory (t, start_addr, search_space_len,
3018 pattern, pattern_len, found_addrp);
3019 }
3020 else
3021 {
3022 /* If a special version of to_search_memory isn't available, use the
3023 simple version. */
3024 found = simple_search_memory (current_target.beneath,
3025 start_addr, search_space_len,
3026 pattern, pattern_len, found_addrp);
3027 }
3028
3029 if (targetdebug)
3030 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
3031
3032 return found;
3033 }
3034
3035 /* Look through the currently pushed targets. If none of them will
3036 be able to restart the currently running process, issue an error
3037 message. */
3038
3039 void
3040 target_require_runnable (void)
3041 {
3042 struct target_ops *t;
3043
3044 for (t = target_stack; t != NULL; t = t->beneath)
3045 {
3046 /* If this target knows how to create a new program, then
3047 assume we will still be able to after killing the current
3048 one. Either killing and mourning will not pop T, or else
3049 find_default_run_target will find it again. */
3050 if (t->to_create_inferior != NULL)
3051 return;
3052
3053 /* Do not worry about thread_stratum targets that can not
3054 create inferiors. Assume they will be pushed again if
3055 necessary, and continue to the process_stratum. */
3056 if (t->to_stratum == thread_stratum
3057 || t->to_stratum == arch_stratum)
3058 continue;
3059
3060 error (_("The \"%s\" target does not support \"run\". "
3061 "Try \"help target\" or \"continue\"."),
3062 t->to_shortname);
3063 }
3064
3065 /* This function is only called if the target is running. In that
3066 case there should have been a process_stratum target and it
3067 should either know how to create inferiors, or not... */
3068 internal_error (__FILE__, __LINE__, _("No targets found"));
3069 }
3070
3071 /* Look through the list of possible targets for a target that can
3072 execute a run or attach command without any other data. This is
3073 used to locate the default process stratum.
3074
3075 If DO_MESG is not NULL, the result is always valid (error() is
3076 called for errors); else, return NULL on error. */
3077
3078 static struct target_ops *
3079 find_default_run_target (char *do_mesg)
3080 {
3081 struct target_ops **t;
3082 struct target_ops *runable = NULL;
3083 int count;
3084
3085 count = 0;
3086
3087 for (t = target_structs; t < target_structs + target_struct_size;
3088 ++t)
3089 {
3090 if ((*t)->to_can_run && target_can_run (*t))
3091 {
3092 runable = *t;
3093 ++count;
3094 }
3095 }
3096
3097 if (count != 1)
3098 {
3099 if (do_mesg)
3100 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3101 else
3102 return NULL;
3103 }
3104
3105 return runable;
3106 }
3107
3108 void
3109 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3110 {
3111 struct target_ops *t;
3112
3113 t = find_default_run_target ("attach");
3114 (t->to_attach) (t, args, from_tty);
3115 return;
3116 }
3117
3118 void
3119 find_default_create_inferior (struct target_ops *ops,
3120 char *exec_file, char *allargs, char **env,
3121 int from_tty)
3122 {
3123 struct target_ops *t;
3124
3125 t = find_default_run_target ("run");
3126 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3127 return;
3128 }
3129
3130 static int
3131 find_default_can_async_p (void)
3132 {
3133 struct target_ops *t;
3134
3135 /* This may be called before the target is pushed on the stack;
3136 look for the default process stratum. If there's none, gdb isn't
3137 configured with a native debugger, and target remote isn't
3138 connected yet. */
3139 t = find_default_run_target (NULL);
3140 if (t && t->to_can_async_p)
3141 return (t->to_can_async_p) ();
3142 return 0;
3143 }
3144
3145 static int
3146 find_default_is_async_p (void)
3147 {
3148 struct target_ops *t;
3149
3150 /* This may be called before the target is pushed on the stack;
3151 look for the default process stratum. If there's none, gdb isn't
3152 configured with a native debugger, and target remote isn't
3153 connected yet. */
3154 t = find_default_run_target (NULL);
3155 if (t && t->to_is_async_p)
3156 return (t->to_is_async_p) ();
3157 return 0;
3158 }
3159
3160 static int
3161 find_default_supports_non_stop (void)
3162 {
3163 struct target_ops *t;
3164
3165 t = find_default_run_target (NULL);
3166 if (t && t->to_supports_non_stop)
3167 return (t->to_supports_non_stop) ();
3168 return 0;
3169 }
3170
3171 int
3172 target_supports_non_stop (void)
3173 {
3174 struct target_ops *t;
3175
3176 for (t = &current_target; t != NULL; t = t->beneath)
3177 if (t->to_supports_non_stop)
3178 return t->to_supports_non_stop ();
3179
3180 return 0;
3181 }
3182
3183 /* Implement the "info proc" command. */
3184
3185 int
3186 target_info_proc (char *args, enum info_proc_what what)
3187 {
3188 struct target_ops *t;
3189
3190 /* If we're already connected to something that can get us OS
3191 related data, use it. Otherwise, try using the native
3192 target. */
3193 if (current_target.to_stratum >= process_stratum)
3194 t = current_target.beneath;
3195 else
3196 t = find_default_run_target (NULL);
3197
3198 for (; t != NULL; t = t->beneath)
3199 {
3200 if (t->to_info_proc != NULL)
3201 {
3202 t->to_info_proc (t, args, what);
3203
3204 if (targetdebug)
3205 fprintf_unfiltered (gdb_stdlog,
3206 "target_info_proc (\"%s\", %d)\n", args, what);
3207
3208 return 1;
3209 }
3210 }
3211
3212 return 0;
3213 }
3214
3215 static int
3216 find_default_supports_disable_randomization (void)
3217 {
3218 struct target_ops *t;
3219
3220 t = find_default_run_target (NULL);
3221 if (t && t->to_supports_disable_randomization)
3222 return (t->to_supports_disable_randomization) ();
3223 return 0;
3224 }
3225
3226 int
3227 target_supports_disable_randomization (void)
3228 {
3229 struct target_ops *t;
3230
3231 for (t = &current_target; t != NULL; t = t->beneath)
3232 if (t->to_supports_disable_randomization)
3233 return t->to_supports_disable_randomization ();
3234
3235 return 0;
3236 }
3237
3238 char *
3239 target_get_osdata (const char *type)
3240 {
3241 struct target_ops *t;
3242
3243 /* If we're already connected to something that can get us OS
3244 related data, use it. Otherwise, try using the native
3245 target. */
3246 if (current_target.to_stratum >= process_stratum)
3247 t = current_target.beneath;
3248 else
3249 t = find_default_run_target ("get OS data");
3250
3251 if (!t)
3252 return NULL;
3253
3254 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3255 }
3256
3257 /* Determine the current address space of thread PTID. */
3258
3259 struct address_space *
3260 target_thread_address_space (ptid_t ptid)
3261 {
3262 struct address_space *aspace;
3263 struct inferior *inf;
3264 struct target_ops *t;
3265
3266 for (t = current_target.beneath; t != NULL; t = t->beneath)
3267 {
3268 if (t->to_thread_address_space != NULL)
3269 {
3270 aspace = t->to_thread_address_space (t, ptid);
3271 gdb_assert (aspace);
3272
3273 if (targetdebug)
3274 fprintf_unfiltered (gdb_stdlog,
3275 "target_thread_address_space (%s) = %d\n",
3276 target_pid_to_str (ptid),
3277 address_space_num (aspace));
3278 return aspace;
3279 }
3280 }
3281
3282 /* Fall-back to the "main" address space of the inferior. */
3283 inf = find_inferior_pid (ptid_get_pid (ptid));
3284
3285 if (inf == NULL || inf->aspace == NULL)
3286 internal_error (__FILE__, __LINE__,
3287 _("Can't determine the current "
3288 "address space of thread %s\n"),
3289 target_pid_to_str (ptid));
3290
3291 return inf->aspace;
3292 }
3293
3294
3295 /* Target file operations. */
3296
3297 static struct target_ops *
3298 default_fileio_target (void)
3299 {
3300 /* If we're already connected to something that can perform
3301 file I/O, use it. Otherwise, try using the native target. */
3302 if (current_target.to_stratum >= process_stratum)
3303 return current_target.beneath;
3304 else
3305 return find_default_run_target ("file I/O");
3306 }
3307
3308 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3309 target file descriptor, or -1 if an error occurs (and set
3310 *TARGET_ERRNO). */
3311 int
3312 target_fileio_open (const char *filename, int flags, int mode,
3313 int *target_errno)
3314 {
3315 struct target_ops *t;
3316
3317 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3318 {
3319 if (t->to_fileio_open != NULL)
3320 {
3321 int fd = t->to_fileio_open (filename, flags, mode, target_errno);
3322
3323 if (targetdebug)
3324 fprintf_unfiltered (gdb_stdlog,
3325 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3326 filename, flags, mode,
3327 fd, fd != -1 ? 0 : *target_errno);
3328 return fd;
3329 }
3330 }
3331
3332 *target_errno = FILEIO_ENOSYS;
3333 return -1;
3334 }
3335
3336 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3337 Return the number of bytes written, or -1 if an error occurs
3338 (and set *TARGET_ERRNO). */
3339 int
3340 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3341 ULONGEST offset, int *target_errno)
3342 {
3343 struct target_ops *t;
3344
3345 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3346 {
3347 if (t->to_fileio_pwrite != NULL)
3348 {
3349 int ret = t->to_fileio_pwrite (fd, write_buf, len, offset,
3350 target_errno);
3351
3352 if (targetdebug)
3353 fprintf_unfiltered (gdb_stdlog,
3354 "target_fileio_pwrite (%d,...,%d,%s) "
3355 "= %d (%d)\n",
3356 fd, len, pulongest (offset),
3357 ret, ret != -1 ? 0 : *target_errno);
3358 return ret;
3359 }
3360 }
3361
3362 *target_errno = FILEIO_ENOSYS;
3363 return -1;
3364 }
3365
3366 /* Read up to LEN bytes FD on the target into READ_BUF.
3367 Return the number of bytes read, or -1 if an error occurs
3368 (and set *TARGET_ERRNO). */
3369 int
3370 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3371 ULONGEST offset, int *target_errno)
3372 {
3373 struct target_ops *t;
3374
3375 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3376 {
3377 if (t->to_fileio_pread != NULL)
3378 {
3379 int ret = t->to_fileio_pread (fd, read_buf, len, offset,
3380 target_errno);
3381
3382 if (targetdebug)
3383 fprintf_unfiltered (gdb_stdlog,
3384 "target_fileio_pread (%d,...,%d,%s) "
3385 "= %d (%d)\n",
3386 fd, len, pulongest (offset),
3387 ret, ret != -1 ? 0 : *target_errno);
3388 return ret;
3389 }
3390 }
3391
3392 *target_errno = FILEIO_ENOSYS;
3393 return -1;
3394 }
3395
3396 /* Close FD on the target. Return 0, or -1 if an error occurs
3397 (and set *TARGET_ERRNO). */
3398 int
3399 target_fileio_close (int fd, int *target_errno)
3400 {
3401 struct target_ops *t;
3402
3403 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3404 {
3405 if (t->to_fileio_close != NULL)
3406 {
3407 int ret = t->to_fileio_close (fd, target_errno);
3408
3409 if (targetdebug)
3410 fprintf_unfiltered (gdb_stdlog,
3411 "target_fileio_close (%d) = %d (%d)\n",
3412 fd, ret, ret != -1 ? 0 : *target_errno);
3413 return ret;
3414 }
3415 }
3416
3417 *target_errno = FILEIO_ENOSYS;
3418 return -1;
3419 }
3420
3421 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3422 occurs (and set *TARGET_ERRNO). */
3423 int
3424 target_fileio_unlink (const char *filename, int *target_errno)
3425 {
3426 struct target_ops *t;
3427
3428 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3429 {
3430 if (t->to_fileio_unlink != NULL)
3431 {
3432 int ret = t->to_fileio_unlink (filename, target_errno);
3433
3434 if (targetdebug)
3435 fprintf_unfiltered (gdb_stdlog,
3436 "target_fileio_unlink (%s) = %d (%d)\n",
3437 filename, ret, ret != -1 ? 0 : *target_errno);
3438 return ret;
3439 }
3440 }
3441
3442 *target_errno = FILEIO_ENOSYS;
3443 return -1;
3444 }
3445
3446 /* Read value of symbolic link FILENAME on the target. Return a
3447 null-terminated string allocated via xmalloc, or NULL if an error
3448 occurs (and set *TARGET_ERRNO). */
3449 char *
3450 target_fileio_readlink (const char *filename, int *target_errno)
3451 {
3452 struct target_ops *t;
3453
3454 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3455 {
3456 if (t->to_fileio_readlink != NULL)
3457 {
3458 char *ret = t->to_fileio_readlink (filename, target_errno);
3459
3460 if (targetdebug)
3461 fprintf_unfiltered (gdb_stdlog,
3462 "target_fileio_readlink (%s) = %s (%d)\n",
3463 filename, ret? ret : "(nil)",
3464 ret? 0 : *target_errno);
3465 return ret;
3466 }
3467 }
3468
3469 *target_errno = FILEIO_ENOSYS;
3470 return NULL;
3471 }
3472
3473 static void
3474 target_fileio_close_cleanup (void *opaque)
3475 {
3476 int fd = *(int *) opaque;
3477 int target_errno;
3478
3479 target_fileio_close (fd, &target_errno);
3480 }
3481
3482 /* Read target file FILENAME. Store the result in *BUF_P and
3483 return the size of the transferred data. PADDING additional bytes are
3484 available in *BUF_P. This is a helper function for
3485 target_fileio_read_alloc; see the declaration of that function for more
3486 information. */
3487
3488 static LONGEST
3489 target_fileio_read_alloc_1 (const char *filename,
3490 gdb_byte **buf_p, int padding)
3491 {
3492 struct cleanup *close_cleanup;
3493 size_t buf_alloc, buf_pos;
3494 gdb_byte *buf;
3495 LONGEST n;
3496 int fd;
3497 int target_errno;
3498
3499 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3500 if (fd == -1)
3501 return -1;
3502
3503 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3504
3505 /* Start by reading up to 4K at a time. The target will throttle
3506 this number down if necessary. */
3507 buf_alloc = 4096;
3508 buf = xmalloc (buf_alloc);
3509 buf_pos = 0;
3510 while (1)
3511 {
3512 n = target_fileio_pread (fd, &buf[buf_pos],
3513 buf_alloc - buf_pos - padding, buf_pos,
3514 &target_errno);
3515 if (n < 0)
3516 {
3517 /* An error occurred. */
3518 do_cleanups (close_cleanup);
3519 xfree (buf);
3520 return -1;
3521 }
3522 else if (n == 0)
3523 {
3524 /* Read all there was. */
3525 do_cleanups (close_cleanup);
3526 if (buf_pos == 0)
3527 xfree (buf);
3528 else
3529 *buf_p = buf;
3530 return buf_pos;
3531 }
3532
3533 buf_pos += n;
3534
3535 /* If the buffer is filling up, expand it. */
3536 if (buf_alloc < buf_pos * 2)
3537 {
3538 buf_alloc *= 2;
3539 buf = xrealloc (buf, buf_alloc);
3540 }
3541
3542 QUIT;
3543 }
3544 }
3545
3546 /* Read target file FILENAME. Store the result in *BUF_P and return
3547 the size of the transferred data. See the declaration in "target.h"
3548 function for more information about the return value. */
3549
3550 LONGEST
3551 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3552 {
3553 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3554 }
3555
3556 /* Read target file FILENAME. The result is NUL-terminated and
3557 returned as a string, allocated using xmalloc. If an error occurs
3558 or the transfer is unsupported, NULL is returned. Empty objects
3559 are returned as allocated but empty strings. A warning is issued
3560 if the result contains any embedded NUL bytes. */
3561
3562 char *
3563 target_fileio_read_stralloc (const char *filename)
3564 {
3565 gdb_byte *buffer;
3566 char *bufstr;
3567 LONGEST i, transferred;
3568
3569 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3570 bufstr = (char *) buffer;
3571
3572 if (transferred < 0)
3573 return NULL;
3574
3575 if (transferred == 0)
3576 return xstrdup ("");
3577
3578 bufstr[transferred] = 0;
3579
3580 /* Check for embedded NUL bytes; but allow trailing NULs. */
3581 for (i = strlen (bufstr); i < transferred; i++)
3582 if (bufstr[i] != 0)
3583 {
3584 warning (_("target file %s "
3585 "contained unexpected null characters"),
3586 filename);
3587 break;
3588 }
3589
3590 return bufstr;
3591 }
3592
3593
3594 static int
3595 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3596 {
3597 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3598 }
3599
3600 static int
3601 default_watchpoint_addr_within_range (struct target_ops *target,
3602 CORE_ADDR addr,
3603 CORE_ADDR start, int length)
3604 {
3605 return addr >= start && addr < start + length;
3606 }
3607
3608 static struct gdbarch *
3609 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3610 {
3611 return target_gdbarch ();
3612 }
3613
3614 static int
3615 return_zero (void)
3616 {
3617 return 0;
3618 }
3619
3620 static int
3621 return_one (void)
3622 {
3623 return 1;
3624 }
3625
3626 static int
3627 return_minus_one (void)
3628 {
3629 return -1;
3630 }
3631
3632 /*
3633 * Find the next target down the stack from the specified target.
3634 */
3635
3636 struct target_ops *
3637 find_target_beneath (struct target_ops *t)
3638 {
3639 return t->beneath;
3640 }
3641
3642 \f
3643 /* The inferior process has died. Long live the inferior! */
3644
3645 void
3646 generic_mourn_inferior (void)
3647 {
3648 ptid_t ptid;
3649
3650 ptid = inferior_ptid;
3651 inferior_ptid = null_ptid;
3652
3653 /* Mark breakpoints uninserted in case something tries to delete a
3654 breakpoint while we delete the inferior's threads (which would
3655 fail, since the inferior is long gone). */
3656 mark_breakpoints_out ();
3657
3658 if (!ptid_equal (ptid, null_ptid))
3659 {
3660 int pid = ptid_get_pid (ptid);
3661 exit_inferior (pid);
3662 }
3663
3664 /* Note this wipes step-resume breakpoints, so needs to be done
3665 after exit_inferior, which ends up referencing the step-resume
3666 breakpoints through clear_thread_inferior_resources. */
3667 breakpoint_init_inferior (inf_exited);
3668
3669 registers_changed ();
3670
3671 reopen_exec_file ();
3672 reinit_frame_cache ();
3673
3674 if (deprecated_detach_hook)
3675 deprecated_detach_hook ();
3676 }
3677 \f
3678 /* Convert a normal process ID to a string. Returns the string in a
3679 static buffer. */
3680
3681 char *
3682 normal_pid_to_str (ptid_t ptid)
3683 {
3684 static char buf[32];
3685
3686 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3687 return buf;
3688 }
3689
3690 static char *
3691 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3692 {
3693 return normal_pid_to_str (ptid);
3694 }
3695
3696 /* Error-catcher for target_find_memory_regions. */
3697 static int
3698 dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
3699 {
3700 error (_("Command not implemented for this target."));
3701 return 0;
3702 }
3703
3704 /* Error-catcher for target_make_corefile_notes. */
3705 static char *
3706 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3707 {
3708 error (_("Command not implemented for this target."));
3709 return NULL;
3710 }
3711
3712 /* Error-catcher for target_get_bookmark. */
3713 static gdb_byte *
3714 dummy_get_bookmark (char *ignore1, int ignore2)
3715 {
3716 tcomplain ();
3717 return NULL;
3718 }
3719
3720 /* Error-catcher for target_goto_bookmark. */
3721 static void
3722 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3723 {
3724 tcomplain ();
3725 }
3726
3727 /* Set up the handful of non-empty slots needed by the dummy target
3728 vector. */
3729
3730 static void
3731 init_dummy_target (void)
3732 {
3733 dummy_target.to_shortname = "None";
3734 dummy_target.to_longname = "None";
3735 dummy_target.to_doc = "";
3736 dummy_target.to_attach = find_default_attach;
3737 dummy_target.to_detach =
3738 (void (*)(struct target_ops *, char *, int))target_ignore;
3739 dummy_target.to_create_inferior = find_default_create_inferior;
3740 dummy_target.to_can_async_p = find_default_can_async_p;
3741 dummy_target.to_is_async_p = find_default_is_async_p;
3742 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3743 dummy_target.to_supports_disable_randomization
3744 = find_default_supports_disable_randomization;
3745 dummy_target.to_pid_to_str = dummy_pid_to_str;
3746 dummy_target.to_stratum = dummy_stratum;
3747 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3748 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3749 dummy_target.to_get_bookmark = dummy_get_bookmark;
3750 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3751 dummy_target.to_xfer_partial = default_xfer_partial;
3752 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3753 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3754 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3755 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3756 dummy_target.to_has_execution
3757 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3758 dummy_target.to_stopped_by_watchpoint = return_zero;
3759 dummy_target.to_stopped_data_address =
3760 (int (*) (struct target_ops *, CORE_ADDR *)) return_zero;
3761 dummy_target.to_magic = OPS_MAGIC;
3762 }
3763 \f
3764 static void
3765 debug_to_open (char *args, int from_tty)
3766 {
3767 debug_target.to_open (args, from_tty);
3768
3769 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3770 }
3771
3772 void
3773 target_close (struct target_ops *targ)
3774 {
3775 if (targ->to_xclose != NULL)
3776 targ->to_xclose (targ);
3777 else if (targ->to_close != NULL)
3778 targ->to_close ();
3779
3780 if (targetdebug)
3781 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3782 }
3783
3784 void
3785 target_attach (char *args, int from_tty)
3786 {
3787 struct target_ops *t;
3788
3789 for (t = current_target.beneath; t != NULL; t = t->beneath)
3790 {
3791 if (t->to_attach != NULL)
3792 {
3793 t->to_attach (t, args, from_tty);
3794 if (targetdebug)
3795 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3796 args, from_tty);
3797 return;
3798 }
3799 }
3800
3801 internal_error (__FILE__, __LINE__,
3802 _("could not find a target to attach"));
3803 }
3804
3805 int
3806 target_thread_alive (ptid_t ptid)
3807 {
3808 struct target_ops *t;
3809
3810 for (t = current_target.beneath; t != NULL; t = t->beneath)
3811 {
3812 if (t->to_thread_alive != NULL)
3813 {
3814 int retval;
3815
3816 retval = t->to_thread_alive (t, ptid);
3817 if (targetdebug)
3818 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3819 PIDGET (ptid), retval);
3820
3821 return retval;
3822 }
3823 }
3824
3825 return 0;
3826 }
3827
3828 void
3829 target_find_new_threads (void)
3830 {
3831 struct target_ops *t;
3832
3833 for (t = current_target.beneath; t != NULL; t = t->beneath)
3834 {
3835 if (t->to_find_new_threads != NULL)
3836 {
3837 t->to_find_new_threads (t);
3838 if (targetdebug)
3839 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3840
3841 return;
3842 }
3843 }
3844 }
3845
3846 void
3847 target_stop (ptid_t ptid)
3848 {
3849 if (!may_stop)
3850 {
3851 warning (_("May not interrupt or stop the target, ignoring attempt"));
3852 return;
3853 }
3854
3855 (*current_target.to_stop) (ptid);
3856 }
3857
3858 static void
3859 debug_to_post_attach (int pid)
3860 {
3861 debug_target.to_post_attach (pid);
3862
3863 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3864 }
3865
3866 /* Return a pretty printed form of target_waitstatus.
3867 Space for the result is malloc'd, caller must free. */
3868
3869 char *
3870 target_waitstatus_to_string (const struct target_waitstatus *ws)
3871 {
3872 const char *kind_str = "status->kind = ";
3873
3874 switch (ws->kind)
3875 {
3876 case TARGET_WAITKIND_EXITED:
3877 return xstrprintf ("%sexited, status = %d",
3878 kind_str, ws->value.integer);
3879 case TARGET_WAITKIND_STOPPED:
3880 return xstrprintf ("%sstopped, signal = %s",
3881 kind_str, gdb_signal_to_name (ws->value.sig));
3882 case TARGET_WAITKIND_SIGNALLED:
3883 return xstrprintf ("%ssignalled, signal = %s",
3884 kind_str, gdb_signal_to_name (ws->value.sig));
3885 case TARGET_WAITKIND_LOADED:
3886 return xstrprintf ("%sloaded", kind_str);
3887 case TARGET_WAITKIND_FORKED:
3888 return xstrprintf ("%sforked", kind_str);
3889 case TARGET_WAITKIND_VFORKED:
3890 return xstrprintf ("%svforked", kind_str);
3891 case TARGET_WAITKIND_EXECD:
3892 return xstrprintf ("%sexecd", kind_str);
3893 case TARGET_WAITKIND_VFORK_DONE:
3894 return xstrprintf ("%svfork-done", kind_str);
3895 case TARGET_WAITKIND_SYSCALL_ENTRY:
3896 return xstrprintf ("%sentered syscall", kind_str);
3897 case TARGET_WAITKIND_SYSCALL_RETURN:
3898 return xstrprintf ("%sexited syscall", kind_str);
3899 case TARGET_WAITKIND_SPURIOUS:
3900 return xstrprintf ("%sspurious", kind_str);
3901 case TARGET_WAITKIND_IGNORE:
3902 return xstrprintf ("%signore", kind_str);
3903 case TARGET_WAITKIND_NO_HISTORY:
3904 return xstrprintf ("%sno-history", kind_str);
3905 case TARGET_WAITKIND_NO_RESUMED:
3906 return xstrprintf ("%sno-resumed", kind_str);
3907 default:
3908 return xstrprintf ("%sunknown???", kind_str);
3909 }
3910 }
3911
3912 /* Concatenate ELEM to LIST, a comma separate list, and return the
3913 result. The LIST incoming argument is released. */
3914
3915 static char *
3916 str_comma_list_concat_elem (char *list, const char *elem)
3917 {
3918 if (list == NULL)
3919 return xstrdup (elem);
3920 else
3921 return reconcat (list, list, ", ", elem, (char *) NULL);
3922 }
3923
3924 /* Helper for target_options_to_string. If OPT is present in
3925 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3926 Returns the new resulting string. OPT is removed from
3927 TARGET_OPTIONS. */
3928
3929 static char *
3930 do_option (int *target_options, char *ret,
3931 int opt, char *opt_str)
3932 {
3933 if ((*target_options & opt) != 0)
3934 {
3935 ret = str_comma_list_concat_elem (ret, opt_str);
3936 *target_options &= ~opt;
3937 }
3938
3939 return ret;
3940 }
3941
3942 char *
3943 target_options_to_string (int target_options)
3944 {
3945 char *ret = NULL;
3946
3947 #define DO_TARG_OPTION(OPT) \
3948 ret = do_option (&target_options, ret, OPT, #OPT)
3949
3950 DO_TARG_OPTION (TARGET_WNOHANG);
3951
3952 if (target_options != 0)
3953 ret = str_comma_list_concat_elem (ret, "unknown???");
3954
3955 if (ret == NULL)
3956 ret = xstrdup ("");
3957 return ret;
3958 }
3959
3960 static void
3961 debug_print_register (const char * func,
3962 struct regcache *regcache, int regno)
3963 {
3964 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3965
3966 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3967 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3968 && gdbarch_register_name (gdbarch, regno) != NULL
3969 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3970 fprintf_unfiltered (gdb_stdlog, "(%s)",
3971 gdbarch_register_name (gdbarch, regno));
3972 else
3973 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3974 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3975 {
3976 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3977 int i, size = register_size (gdbarch, regno);
3978 gdb_byte buf[MAX_REGISTER_SIZE];
3979
3980 regcache_raw_collect (regcache, regno, buf);
3981 fprintf_unfiltered (gdb_stdlog, " = ");
3982 for (i = 0; i < size; i++)
3983 {
3984 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3985 }
3986 if (size <= sizeof (LONGEST))
3987 {
3988 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3989
3990 fprintf_unfiltered (gdb_stdlog, " %s %s",
3991 core_addr_to_string_nz (val), plongest (val));
3992 }
3993 }
3994 fprintf_unfiltered (gdb_stdlog, "\n");
3995 }
3996
3997 void
3998 target_fetch_registers (struct regcache *regcache, int regno)
3999 {
4000 struct target_ops *t;
4001
4002 for (t = current_target.beneath; t != NULL; t = t->beneath)
4003 {
4004 if (t->to_fetch_registers != NULL)
4005 {
4006 t->to_fetch_registers (t, regcache, regno);
4007 if (targetdebug)
4008 debug_print_register ("target_fetch_registers", regcache, regno);
4009 return;
4010 }
4011 }
4012 }
4013
4014 void
4015 target_store_registers (struct regcache *regcache, int regno)
4016 {
4017 struct target_ops *t;
4018
4019 if (!may_write_registers)
4020 error (_("Writing to registers is not allowed (regno %d)"), regno);
4021
4022 for (t = current_target.beneath; t != NULL; t = t->beneath)
4023 {
4024 if (t->to_store_registers != NULL)
4025 {
4026 t->to_store_registers (t, regcache, regno);
4027 if (targetdebug)
4028 {
4029 debug_print_register ("target_store_registers", regcache, regno);
4030 }
4031 return;
4032 }
4033 }
4034
4035 noprocess ();
4036 }
4037
4038 int
4039 target_core_of_thread (ptid_t ptid)
4040 {
4041 struct target_ops *t;
4042
4043 for (t = current_target.beneath; t != NULL; t = t->beneath)
4044 {
4045 if (t->to_core_of_thread != NULL)
4046 {
4047 int retval = t->to_core_of_thread (t, ptid);
4048
4049 if (targetdebug)
4050 fprintf_unfiltered (gdb_stdlog,
4051 "target_core_of_thread (%d) = %d\n",
4052 PIDGET (ptid), retval);
4053 return retval;
4054 }
4055 }
4056
4057 return -1;
4058 }
4059
4060 int
4061 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4062 {
4063 struct target_ops *t;
4064
4065 for (t = current_target.beneath; t != NULL; t = t->beneath)
4066 {
4067 if (t->to_verify_memory != NULL)
4068 {
4069 int retval = t->to_verify_memory (t, data, memaddr, size);
4070
4071 if (targetdebug)
4072 fprintf_unfiltered (gdb_stdlog,
4073 "target_verify_memory (%s, %s) = %d\n",
4074 paddress (target_gdbarch (), memaddr),
4075 pulongest (size),
4076 retval);
4077 return retval;
4078 }
4079 }
4080
4081 tcomplain ();
4082 }
4083
4084 /* The documentation for this function is in its prototype declaration in
4085 target.h. */
4086
4087 int
4088 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4089 {
4090 struct target_ops *t;
4091
4092 for (t = current_target.beneath; t != NULL; t = t->beneath)
4093 if (t->to_insert_mask_watchpoint != NULL)
4094 {
4095 int ret;
4096
4097 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4098
4099 if (targetdebug)
4100 fprintf_unfiltered (gdb_stdlog, "\
4101 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4102 core_addr_to_string (addr),
4103 core_addr_to_string (mask), rw, ret);
4104
4105 return ret;
4106 }
4107
4108 return 1;
4109 }
4110
4111 /* The documentation for this function is in its prototype declaration in
4112 target.h. */
4113
4114 int
4115 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4116 {
4117 struct target_ops *t;
4118
4119 for (t = current_target.beneath; t != NULL; t = t->beneath)
4120 if (t->to_remove_mask_watchpoint != NULL)
4121 {
4122 int ret;
4123
4124 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4125
4126 if (targetdebug)
4127 fprintf_unfiltered (gdb_stdlog, "\
4128 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4129 core_addr_to_string (addr),
4130 core_addr_to_string (mask), rw, ret);
4131
4132 return ret;
4133 }
4134
4135 return 1;
4136 }
4137
4138 /* The documentation for this function is in its prototype declaration
4139 in target.h. */
4140
4141 int
4142 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4143 {
4144 struct target_ops *t;
4145
4146 for (t = current_target.beneath; t != NULL; t = t->beneath)
4147 if (t->to_masked_watch_num_registers != NULL)
4148 return t->to_masked_watch_num_registers (t, addr, mask);
4149
4150 return -1;
4151 }
4152
4153 /* The documentation for this function is in its prototype declaration
4154 in target.h. */
4155
4156 int
4157 target_ranged_break_num_registers (void)
4158 {
4159 struct target_ops *t;
4160
4161 for (t = current_target.beneath; t != NULL; t = t->beneath)
4162 if (t->to_ranged_break_num_registers != NULL)
4163 return t->to_ranged_break_num_registers (t);
4164
4165 return -1;
4166 }
4167
4168 /* See target.h. */
4169
4170 int
4171 target_supports_btrace (void)
4172 {
4173 struct target_ops *t;
4174
4175 for (t = current_target.beneath; t != NULL; t = t->beneath)
4176 if (t->to_supports_btrace != NULL)
4177 return t->to_supports_btrace ();
4178
4179 return 0;
4180 }
4181
4182 /* See target.h. */
4183
4184 struct btrace_target_info *
4185 target_enable_btrace (ptid_t ptid)
4186 {
4187 struct target_ops *t;
4188
4189 for (t = current_target.beneath; t != NULL; t = t->beneath)
4190 if (t->to_enable_btrace != NULL)
4191 return t->to_enable_btrace (ptid);
4192
4193 tcomplain ();
4194 return NULL;
4195 }
4196
4197 /* See target.h. */
4198
4199 void
4200 target_disable_btrace (struct btrace_target_info *btinfo)
4201 {
4202 struct target_ops *t;
4203
4204 for (t = current_target.beneath; t != NULL; t = t->beneath)
4205 if (t->to_disable_btrace != NULL)
4206 return t->to_disable_btrace (btinfo);
4207
4208 tcomplain ();
4209 }
4210
4211 /* See target.h. */
4212
4213 void
4214 target_teardown_btrace (struct btrace_target_info *btinfo)
4215 {
4216 struct target_ops *t;
4217
4218 for (t = current_target.beneath; t != NULL; t = t->beneath)
4219 if (t->to_teardown_btrace != NULL)
4220 return t->to_teardown_btrace (btinfo);
4221
4222 tcomplain ();
4223 }
4224
4225 /* See target.h. */
4226
4227 VEC (btrace_block_s) *
4228 target_read_btrace (struct btrace_target_info *btinfo,
4229 enum btrace_read_type type)
4230 {
4231 struct target_ops *t;
4232
4233 for (t = current_target.beneath; t != NULL; t = t->beneath)
4234 if (t->to_read_btrace != NULL)
4235 return t->to_read_btrace (btinfo, type);
4236
4237 tcomplain ();
4238 return NULL;
4239 }
4240
4241 /* See target.h. */
4242
4243 void
4244 target_stop_recording (void)
4245 {
4246 struct target_ops *t;
4247
4248 for (t = current_target.beneath; t != NULL; t = t->beneath)
4249 if (t->to_stop_recording != NULL)
4250 {
4251 t->to_stop_recording ();
4252 return;
4253 }
4254
4255 /* This is optional. */
4256 }
4257
4258 /* See target.h. */
4259
4260 void
4261 target_info_record (void)
4262 {
4263 struct target_ops *t;
4264
4265 for (t = current_target.beneath; t != NULL; t = t->beneath)
4266 if (t->to_info_record != NULL)
4267 {
4268 t->to_info_record ();
4269 return;
4270 }
4271
4272 tcomplain ();
4273 }
4274
4275 /* See target.h. */
4276
4277 void
4278 target_save_record (const char *filename)
4279 {
4280 struct target_ops *t;
4281
4282 for (t = current_target.beneath; t != NULL; t = t->beneath)
4283 if (t->to_save_record != NULL)
4284 {
4285 t->to_save_record (filename);
4286 return;
4287 }
4288
4289 tcomplain ();
4290 }
4291
4292 /* See target.h. */
4293
4294 int
4295 target_supports_delete_record (void)
4296 {
4297 struct target_ops *t;
4298
4299 for (t = current_target.beneath; t != NULL; t = t->beneath)
4300 if (t->to_delete_record != NULL)
4301 return 1;
4302
4303 return 0;
4304 }
4305
4306 /* See target.h. */
4307
4308 void
4309 target_delete_record (void)
4310 {
4311 struct target_ops *t;
4312
4313 for (t = current_target.beneath; t != NULL; t = t->beneath)
4314 if (t->to_delete_record != NULL)
4315 {
4316 t->to_delete_record ();
4317 return;
4318 }
4319
4320 tcomplain ();
4321 }
4322
4323 /* See target.h. */
4324
4325 int
4326 target_record_is_replaying (void)
4327 {
4328 struct target_ops *t;
4329
4330 for (t = current_target.beneath; t != NULL; t = t->beneath)
4331 if (t->to_record_is_replaying != NULL)
4332 return t->to_record_is_replaying ();
4333
4334 return 0;
4335 }
4336
4337 /* See target.h. */
4338
4339 void
4340 target_goto_record_begin (void)
4341 {
4342 struct target_ops *t;
4343
4344 for (t = current_target.beneath; t != NULL; t = t->beneath)
4345 if (t->to_goto_record_begin != NULL)
4346 {
4347 t->to_goto_record_begin ();
4348 return;
4349 }
4350
4351 tcomplain ();
4352 }
4353
4354 /* See target.h. */
4355
4356 void
4357 target_goto_record_end (void)
4358 {
4359 struct target_ops *t;
4360
4361 for (t = current_target.beneath; t != NULL; t = t->beneath)
4362 if (t->to_goto_record_end != NULL)
4363 {
4364 t->to_goto_record_end ();
4365 return;
4366 }
4367
4368 tcomplain ();
4369 }
4370
4371 /* See target.h. */
4372
4373 void
4374 target_goto_record (ULONGEST insn)
4375 {
4376 struct target_ops *t;
4377
4378 for (t = current_target.beneath; t != NULL; t = t->beneath)
4379 if (t->to_goto_record != NULL)
4380 {
4381 t->to_goto_record (insn);
4382 return;
4383 }
4384
4385 tcomplain ();
4386 }
4387
4388 /* See target.h. */
4389
4390 void
4391 target_insn_history (int size, int flags)
4392 {
4393 struct target_ops *t;
4394
4395 for (t = current_target.beneath; t != NULL; t = t->beneath)
4396 if (t->to_insn_history != NULL)
4397 {
4398 t->to_insn_history (size, flags);
4399 return;
4400 }
4401
4402 tcomplain ();
4403 }
4404
4405 /* See target.h. */
4406
4407 void
4408 target_insn_history_from (ULONGEST from, int size, int flags)
4409 {
4410 struct target_ops *t;
4411
4412 for (t = current_target.beneath; t != NULL; t = t->beneath)
4413 if (t->to_insn_history_from != NULL)
4414 {
4415 t->to_insn_history_from (from, size, flags);
4416 return;
4417 }
4418
4419 tcomplain ();
4420 }
4421
4422 /* See target.h. */
4423
4424 void
4425 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4426 {
4427 struct target_ops *t;
4428
4429 for (t = current_target.beneath; t != NULL; t = t->beneath)
4430 if (t->to_insn_history_range != NULL)
4431 {
4432 t->to_insn_history_range (begin, end, flags);
4433 return;
4434 }
4435
4436 tcomplain ();
4437 }
4438
4439 /* See target.h. */
4440
4441 void
4442 target_call_history (int size, int flags)
4443 {
4444 struct target_ops *t;
4445
4446 for (t = current_target.beneath; t != NULL; t = t->beneath)
4447 if (t->to_call_history != NULL)
4448 {
4449 t->to_call_history (size, flags);
4450 return;
4451 }
4452
4453 tcomplain ();
4454 }
4455
4456 /* See target.h. */
4457
4458 void
4459 target_call_history_from (ULONGEST begin, int size, int flags)
4460 {
4461 struct target_ops *t;
4462
4463 for (t = current_target.beneath; t != NULL; t = t->beneath)
4464 if (t->to_call_history_from != NULL)
4465 {
4466 t->to_call_history_from (begin, size, flags);
4467 return;
4468 }
4469
4470 tcomplain ();
4471 }
4472
4473 /* See target.h. */
4474
4475 void
4476 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4477 {
4478 struct target_ops *t;
4479
4480 for (t = current_target.beneath; t != NULL; t = t->beneath)
4481 if (t->to_call_history_range != NULL)
4482 {
4483 t->to_call_history_range (begin, end, flags);
4484 return;
4485 }
4486
4487 tcomplain ();
4488 }
4489
4490 static void
4491 debug_to_prepare_to_store (struct regcache *regcache)
4492 {
4493 debug_target.to_prepare_to_store (regcache);
4494
4495 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4496 }
4497
4498 static int
4499 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4500 int write, struct mem_attrib *attrib,
4501 struct target_ops *target)
4502 {
4503 int retval;
4504
4505 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4506 attrib, target);
4507
4508 fprintf_unfiltered (gdb_stdlog,
4509 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4510 paddress (target_gdbarch (), memaddr), len,
4511 write ? "write" : "read", retval);
4512
4513 if (retval > 0)
4514 {
4515 int i;
4516
4517 fputs_unfiltered (", bytes =", gdb_stdlog);
4518 for (i = 0; i < retval; i++)
4519 {
4520 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4521 {
4522 if (targetdebug < 2 && i > 0)
4523 {
4524 fprintf_unfiltered (gdb_stdlog, " ...");
4525 break;
4526 }
4527 fprintf_unfiltered (gdb_stdlog, "\n");
4528 }
4529
4530 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4531 }
4532 }
4533
4534 fputc_unfiltered ('\n', gdb_stdlog);
4535
4536 return retval;
4537 }
4538
4539 static void
4540 debug_to_files_info (struct target_ops *target)
4541 {
4542 debug_target.to_files_info (target);
4543
4544 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4545 }
4546
4547 static int
4548 debug_to_insert_breakpoint (struct gdbarch *gdbarch,
4549 struct bp_target_info *bp_tgt)
4550 {
4551 int retval;
4552
4553 retval = debug_target.to_insert_breakpoint (gdbarch, bp_tgt);
4554
4555 fprintf_unfiltered (gdb_stdlog,
4556 "target_insert_breakpoint (%s, xxx) = %ld\n",
4557 core_addr_to_string (bp_tgt->placed_address),
4558 (unsigned long) retval);
4559 return retval;
4560 }
4561
4562 static int
4563 debug_to_remove_breakpoint (struct gdbarch *gdbarch,
4564 struct bp_target_info *bp_tgt)
4565 {
4566 int retval;
4567
4568 retval = debug_target.to_remove_breakpoint (gdbarch, bp_tgt);
4569
4570 fprintf_unfiltered (gdb_stdlog,
4571 "target_remove_breakpoint (%s, xxx) = %ld\n",
4572 core_addr_to_string (bp_tgt->placed_address),
4573 (unsigned long) retval);
4574 return retval;
4575 }
4576
4577 static int
4578 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
4579 {
4580 int retval;
4581
4582 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
4583
4584 fprintf_unfiltered (gdb_stdlog,
4585 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4586 (unsigned long) type,
4587 (unsigned long) cnt,
4588 (unsigned long) from_tty,
4589 (unsigned long) retval);
4590 return retval;
4591 }
4592
4593 static int
4594 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
4595 {
4596 CORE_ADDR retval;
4597
4598 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
4599
4600 fprintf_unfiltered (gdb_stdlog,
4601 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4602 core_addr_to_string (addr), (unsigned long) len,
4603 core_addr_to_string (retval));
4604 return retval;
4605 }
4606
4607 static int
4608 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int rw,
4609 struct expression *cond)
4610 {
4611 int retval;
4612
4613 retval = debug_target.to_can_accel_watchpoint_condition (addr, len,
4614 rw, cond);
4615
4616 fprintf_unfiltered (gdb_stdlog,
4617 "target_can_accel_watchpoint_condition "
4618 "(%s, %d, %d, %s) = %ld\n",
4619 core_addr_to_string (addr), len, rw,
4620 host_address_to_string (cond), (unsigned long) retval);
4621 return retval;
4622 }
4623
4624 static int
4625 debug_to_stopped_by_watchpoint (void)
4626 {
4627 int retval;
4628
4629 retval = debug_target.to_stopped_by_watchpoint ();
4630
4631 fprintf_unfiltered (gdb_stdlog,
4632 "target_stopped_by_watchpoint () = %ld\n",
4633 (unsigned long) retval);
4634 return retval;
4635 }
4636
4637 static int
4638 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4639 {
4640 int retval;
4641
4642 retval = debug_target.to_stopped_data_address (target, addr);
4643
4644 fprintf_unfiltered (gdb_stdlog,
4645 "target_stopped_data_address ([%s]) = %ld\n",
4646 core_addr_to_string (*addr),
4647 (unsigned long)retval);
4648 return retval;
4649 }
4650
4651 static int
4652 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4653 CORE_ADDR addr,
4654 CORE_ADDR start, int length)
4655 {
4656 int retval;
4657
4658 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4659 start, length);
4660
4661 fprintf_filtered (gdb_stdlog,
4662 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4663 core_addr_to_string (addr), core_addr_to_string (start),
4664 length, retval);
4665 return retval;
4666 }
4667
4668 static int
4669 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
4670 struct bp_target_info *bp_tgt)
4671 {
4672 int retval;
4673
4674 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
4675
4676 fprintf_unfiltered (gdb_stdlog,
4677 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4678 core_addr_to_string (bp_tgt->placed_address),
4679 (unsigned long) retval);
4680 return retval;
4681 }
4682
4683 static int
4684 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
4685 struct bp_target_info *bp_tgt)
4686 {
4687 int retval;
4688
4689 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
4690
4691 fprintf_unfiltered (gdb_stdlog,
4692 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4693 core_addr_to_string (bp_tgt->placed_address),
4694 (unsigned long) retval);
4695 return retval;
4696 }
4697
4698 static int
4699 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type,
4700 struct expression *cond)
4701 {
4702 int retval;
4703
4704 retval = debug_target.to_insert_watchpoint (addr, len, type, cond);
4705
4706 fprintf_unfiltered (gdb_stdlog,
4707 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4708 core_addr_to_string (addr), len, type,
4709 host_address_to_string (cond), (unsigned long) retval);
4710 return retval;
4711 }
4712
4713 static int
4714 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type,
4715 struct expression *cond)
4716 {
4717 int retval;
4718
4719 retval = debug_target.to_remove_watchpoint (addr, len, type, cond);
4720
4721 fprintf_unfiltered (gdb_stdlog,
4722 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4723 core_addr_to_string (addr), len, type,
4724 host_address_to_string (cond), (unsigned long) retval);
4725 return retval;
4726 }
4727
4728 static void
4729 debug_to_terminal_init (void)
4730 {
4731 debug_target.to_terminal_init ();
4732
4733 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4734 }
4735
4736 static void
4737 debug_to_terminal_inferior (void)
4738 {
4739 debug_target.to_terminal_inferior ();
4740
4741 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4742 }
4743
4744 static void
4745 debug_to_terminal_ours_for_output (void)
4746 {
4747 debug_target.to_terminal_ours_for_output ();
4748
4749 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4750 }
4751
4752 static void
4753 debug_to_terminal_ours (void)
4754 {
4755 debug_target.to_terminal_ours ();
4756
4757 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4758 }
4759
4760 static void
4761 debug_to_terminal_save_ours (void)
4762 {
4763 debug_target.to_terminal_save_ours ();
4764
4765 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4766 }
4767
4768 static void
4769 debug_to_terminal_info (const char *arg, int from_tty)
4770 {
4771 debug_target.to_terminal_info (arg, from_tty);
4772
4773 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4774 from_tty);
4775 }
4776
4777 static void
4778 debug_to_load (char *args, int from_tty)
4779 {
4780 debug_target.to_load (args, from_tty);
4781
4782 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4783 }
4784
4785 static void
4786 debug_to_post_startup_inferior (ptid_t ptid)
4787 {
4788 debug_target.to_post_startup_inferior (ptid);
4789
4790 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4791 PIDGET (ptid));
4792 }
4793
4794 static int
4795 debug_to_insert_fork_catchpoint (int pid)
4796 {
4797 int retval;
4798
4799 retval = debug_target.to_insert_fork_catchpoint (pid);
4800
4801 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4802 pid, retval);
4803
4804 return retval;
4805 }
4806
4807 static int
4808 debug_to_remove_fork_catchpoint (int pid)
4809 {
4810 int retval;
4811
4812 retval = debug_target.to_remove_fork_catchpoint (pid);
4813
4814 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4815 pid, retval);
4816
4817 return retval;
4818 }
4819
4820 static int
4821 debug_to_insert_vfork_catchpoint (int pid)
4822 {
4823 int retval;
4824
4825 retval = debug_target.to_insert_vfork_catchpoint (pid);
4826
4827 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4828 pid, retval);
4829
4830 return retval;
4831 }
4832
4833 static int
4834 debug_to_remove_vfork_catchpoint (int pid)
4835 {
4836 int retval;
4837
4838 retval = debug_target.to_remove_vfork_catchpoint (pid);
4839
4840 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4841 pid, retval);
4842
4843 return retval;
4844 }
4845
4846 static int
4847 debug_to_insert_exec_catchpoint (int pid)
4848 {
4849 int retval;
4850
4851 retval = debug_target.to_insert_exec_catchpoint (pid);
4852
4853 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4854 pid, retval);
4855
4856 return retval;
4857 }
4858
4859 static int
4860 debug_to_remove_exec_catchpoint (int pid)
4861 {
4862 int retval;
4863
4864 retval = debug_target.to_remove_exec_catchpoint (pid);
4865
4866 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4867 pid, retval);
4868
4869 return retval;
4870 }
4871
4872 static int
4873 debug_to_has_exited (int pid, int wait_status, int *exit_status)
4874 {
4875 int has_exited;
4876
4877 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
4878
4879 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4880 pid, wait_status, *exit_status, has_exited);
4881
4882 return has_exited;
4883 }
4884
4885 static int
4886 debug_to_can_run (void)
4887 {
4888 int retval;
4889
4890 retval = debug_target.to_can_run ();
4891
4892 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4893
4894 return retval;
4895 }
4896
4897 static struct gdbarch *
4898 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4899 {
4900 struct gdbarch *retval;
4901
4902 retval = debug_target.to_thread_architecture (ops, ptid);
4903
4904 fprintf_unfiltered (gdb_stdlog,
4905 "target_thread_architecture (%s) = %s [%s]\n",
4906 target_pid_to_str (ptid),
4907 host_address_to_string (retval),
4908 gdbarch_bfd_arch_info (retval)->printable_name);
4909 return retval;
4910 }
4911
4912 static void
4913 debug_to_stop (ptid_t ptid)
4914 {
4915 debug_target.to_stop (ptid);
4916
4917 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4918 target_pid_to_str (ptid));
4919 }
4920
4921 static void
4922 debug_to_rcmd (char *command,
4923 struct ui_file *outbuf)
4924 {
4925 debug_target.to_rcmd (command, outbuf);
4926 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4927 }
4928
4929 static char *
4930 debug_to_pid_to_exec_file (int pid)
4931 {
4932 char *exec_file;
4933
4934 exec_file = debug_target.to_pid_to_exec_file (pid);
4935
4936 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4937 pid, exec_file);
4938
4939 return exec_file;
4940 }
4941
4942 static void
4943 setup_target_debug (void)
4944 {
4945 memcpy (&debug_target, &current_target, sizeof debug_target);
4946
4947 current_target.to_open = debug_to_open;
4948 current_target.to_post_attach = debug_to_post_attach;
4949 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4950 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4951 current_target.to_files_info = debug_to_files_info;
4952 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4953 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4954 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4955 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4956 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4957 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4958 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4959 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4960 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4961 current_target.to_watchpoint_addr_within_range
4962 = debug_to_watchpoint_addr_within_range;
4963 current_target.to_region_ok_for_hw_watchpoint
4964 = debug_to_region_ok_for_hw_watchpoint;
4965 current_target.to_can_accel_watchpoint_condition
4966 = debug_to_can_accel_watchpoint_condition;
4967 current_target.to_terminal_init = debug_to_terminal_init;
4968 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4969 current_target.to_terminal_ours_for_output
4970 = debug_to_terminal_ours_for_output;
4971 current_target.to_terminal_ours = debug_to_terminal_ours;
4972 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4973 current_target.to_terminal_info = debug_to_terminal_info;
4974 current_target.to_load = debug_to_load;
4975 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4976 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4977 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4978 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4979 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4980 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4981 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4982 current_target.to_has_exited = debug_to_has_exited;
4983 current_target.to_can_run = debug_to_can_run;
4984 current_target.to_stop = debug_to_stop;
4985 current_target.to_rcmd = debug_to_rcmd;
4986 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4987 current_target.to_thread_architecture = debug_to_thread_architecture;
4988 }
4989 \f
4990
4991 static char targ_desc[] =
4992 "Names of targets and files being debugged.\nShows the entire \
4993 stack of targets currently in use (including the exec-file,\n\
4994 core-file, and process, if any), as well as the symbol file name.";
4995
4996 static void
4997 do_monitor_command (char *cmd,
4998 int from_tty)
4999 {
5000 if ((current_target.to_rcmd
5001 == (void (*) (char *, struct ui_file *)) tcomplain)
5002 || (current_target.to_rcmd == debug_to_rcmd
5003 && (debug_target.to_rcmd
5004 == (void (*) (char *, struct ui_file *)) tcomplain)))
5005 error (_("\"monitor\" command not supported by this target."));
5006 target_rcmd (cmd, gdb_stdtarg);
5007 }
5008
5009 /* Print the name of each layers of our target stack. */
5010
5011 static void
5012 maintenance_print_target_stack (char *cmd, int from_tty)
5013 {
5014 struct target_ops *t;
5015
5016 printf_filtered (_("The current target stack is:\n"));
5017
5018 for (t = target_stack; t != NULL; t = t->beneath)
5019 {
5020 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
5021 }
5022 }
5023
5024 /* Controls if async mode is permitted. */
5025 int target_async_permitted = 0;
5026
5027 /* The set command writes to this variable. If the inferior is
5028 executing, linux_nat_async_permitted is *not* updated. */
5029 static int target_async_permitted_1 = 0;
5030
5031 static void
5032 set_target_async_command (char *args, int from_tty,
5033 struct cmd_list_element *c)
5034 {
5035 if (have_live_inferiors ())
5036 {
5037 target_async_permitted_1 = target_async_permitted;
5038 error (_("Cannot change this setting while the inferior is running."));
5039 }
5040
5041 target_async_permitted = target_async_permitted_1;
5042 }
5043
5044 static void
5045 show_target_async_command (struct ui_file *file, int from_tty,
5046 struct cmd_list_element *c,
5047 const char *value)
5048 {
5049 fprintf_filtered (file,
5050 _("Controlling the inferior in "
5051 "asynchronous mode is %s.\n"), value);
5052 }
5053
5054 /* Temporary copies of permission settings. */
5055
5056 static int may_write_registers_1 = 1;
5057 static int may_write_memory_1 = 1;
5058 static int may_insert_breakpoints_1 = 1;
5059 static int may_insert_tracepoints_1 = 1;
5060 static int may_insert_fast_tracepoints_1 = 1;
5061 static int may_stop_1 = 1;
5062
5063 /* Make the user-set values match the real values again. */
5064
5065 void
5066 update_target_permissions (void)
5067 {
5068 may_write_registers_1 = may_write_registers;
5069 may_write_memory_1 = may_write_memory;
5070 may_insert_breakpoints_1 = may_insert_breakpoints;
5071 may_insert_tracepoints_1 = may_insert_tracepoints;
5072 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5073 may_stop_1 = may_stop;
5074 }
5075
5076 /* The one function handles (most of) the permission flags in the same
5077 way. */
5078
5079 static void
5080 set_target_permissions (char *args, int from_tty,
5081 struct cmd_list_element *c)
5082 {
5083 if (target_has_execution)
5084 {
5085 update_target_permissions ();
5086 error (_("Cannot change this setting while the inferior is running."));
5087 }
5088
5089 /* Make the real values match the user-changed values. */
5090 may_write_registers = may_write_registers_1;
5091 may_insert_breakpoints = may_insert_breakpoints_1;
5092 may_insert_tracepoints = may_insert_tracepoints_1;
5093 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5094 may_stop = may_stop_1;
5095 update_observer_mode ();
5096 }
5097
5098 /* Set memory write permission independently of observer mode. */
5099
5100 static void
5101 set_write_memory_permission (char *args, int from_tty,
5102 struct cmd_list_element *c)
5103 {
5104 /* Make the real values match the user-changed values. */
5105 may_write_memory = may_write_memory_1;
5106 update_observer_mode ();
5107 }
5108
5109
5110 void
5111 initialize_targets (void)
5112 {
5113 init_dummy_target ();
5114 push_target (&dummy_target);
5115
5116 add_info ("target", target_info, targ_desc);
5117 add_info ("files", target_info, targ_desc);
5118
5119 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5120 Set target debugging."), _("\
5121 Show target debugging."), _("\
5122 When non-zero, target debugging is enabled. Higher numbers are more\n\
5123 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5124 command."),
5125 NULL,
5126 show_targetdebug,
5127 &setdebuglist, &showdebuglist);
5128
5129 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5130 &trust_readonly, _("\
5131 Set mode for reading from readonly sections."), _("\
5132 Show mode for reading from readonly sections."), _("\
5133 When this mode is on, memory reads from readonly sections (such as .text)\n\
5134 will be read from the object file instead of from the target. This will\n\
5135 result in significant performance improvement for remote targets."),
5136 NULL,
5137 show_trust_readonly,
5138 &setlist, &showlist);
5139
5140 add_com ("monitor", class_obscure, do_monitor_command,
5141 _("Send a command to the remote monitor (remote targets only)."));
5142
5143 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5144 _("Print the name of each layer of the internal target stack."),
5145 &maintenanceprintlist);
5146
5147 add_setshow_boolean_cmd ("target-async", no_class,
5148 &target_async_permitted_1, _("\
5149 Set whether gdb controls the inferior in asynchronous mode."), _("\
5150 Show whether gdb controls the inferior in asynchronous mode."), _("\
5151 Tells gdb whether to control the inferior in asynchronous mode."),
5152 set_target_async_command,
5153 show_target_async_command,
5154 &setlist,
5155 &showlist);
5156
5157 add_setshow_boolean_cmd ("stack-cache", class_support,
5158 &stack_cache_enabled_p_1, _("\
5159 Set cache use for stack access."), _("\
5160 Show cache use for stack access."), _("\
5161 When on, use the data cache for all stack access, regardless of any\n\
5162 configured memory regions. This improves remote performance significantly.\n\
5163 By default, caching for stack access is on."),
5164 set_stack_cache_enabled_p,
5165 show_stack_cache_enabled_p,
5166 &setlist, &showlist);
5167
5168 add_setshow_boolean_cmd ("may-write-registers", class_support,
5169 &may_write_registers_1, _("\
5170 Set permission to write into registers."), _("\
5171 Show permission to write into registers."), _("\
5172 When this permission is on, GDB may write into the target's registers.\n\
5173 Otherwise, any sort of write attempt will result in an error."),
5174 set_target_permissions, NULL,
5175 &setlist, &showlist);
5176
5177 add_setshow_boolean_cmd ("may-write-memory", class_support,
5178 &may_write_memory_1, _("\
5179 Set permission to write into target memory."), _("\
5180 Show permission to write into target memory."), _("\
5181 When this permission is on, GDB may write into the target's memory.\n\
5182 Otherwise, any sort of write attempt will result in an error."),
5183 set_write_memory_permission, NULL,
5184 &setlist, &showlist);
5185
5186 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5187 &may_insert_breakpoints_1, _("\
5188 Set permission to insert breakpoints in the target."), _("\
5189 Show permission to insert breakpoints in the target."), _("\
5190 When this permission is on, GDB may insert breakpoints in the program.\n\
5191 Otherwise, any sort of insertion attempt will result in an error."),
5192 set_target_permissions, NULL,
5193 &setlist, &showlist);
5194
5195 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5196 &may_insert_tracepoints_1, _("\
5197 Set permission to insert tracepoints in the target."), _("\
5198 Show permission to insert tracepoints in the target."), _("\
5199 When this permission is on, GDB may insert tracepoints in the program.\n\
5200 Otherwise, any sort of insertion attempt will result in an error."),
5201 set_target_permissions, NULL,
5202 &setlist, &showlist);
5203
5204 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5205 &may_insert_fast_tracepoints_1, _("\
5206 Set permission to insert fast tracepoints in the target."), _("\
5207 Show permission to insert fast tracepoints in the target."), _("\
5208 When this permission is on, GDB may insert fast tracepoints.\n\
5209 Otherwise, any sort of insertion attempt will result in an error."),
5210 set_target_permissions, NULL,
5211 &setlist, &showlist);
5212
5213 add_setshow_boolean_cmd ("may-interrupt", class_support,
5214 &may_stop_1, _("\
5215 Set permission to interrupt or signal the target."), _("\
5216 Show permission to interrupt or signal the target."), _("\
5217 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5218 Otherwise, any attempt to interrupt or stop will be ignored."),
5219 set_target_permissions, NULL,
5220 &setlist, &showlist);
5221
5222
5223 target_dcache = dcache_init ();
5224 }