convert to_get_tracepoint_status
[binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (struct target_ops *, const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
59
60 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
61 long lwp, long tid);
62
63 static void tcomplain (void) ATTRIBUTE_NORETURN;
64
65 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
66
67 static int return_zero (void);
68
69 static int return_minus_one (void);
70
71 static void *return_null (void);
72
73 void target_ignore (void);
74
75 static void target_command (char *, int);
76
77 static struct target_ops *find_default_run_target (char *);
78
79 static target_xfer_partial_ftype default_xfer_partial;
80
81 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
82 ptid_t ptid);
83
84 static int dummy_find_memory_regions (struct target_ops *self,
85 find_memory_region_ftype ignore1,
86 void *ignore2);
87
88 static char *dummy_make_corefile_notes (struct target_ops *self,
89 bfd *ignore1, int *ignore2);
90
91 static int find_default_can_async_p (struct target_ops *ignore);
92
93 static int find_default_is_async_p (struct target_ops *ignore);
94
95 static enum exec_direction_kind default_execution_direction
96 (struct target_ops *self);
97
98 #include "target-delegates.c"
99
100 static void init_dummy_target (void);
101
102 static struct target_ops debug_target;
103
104 static void debug_to_open (char *, int);
105
106 static void debug_to_prepare_to_store (struct target_ops *self,
107 struct regcache *);
108
109 static void debug_to_files_info (struct target_ops *);
110
111 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
112 struct bp_target_info *);
113
114 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
115 struct bp_target_info *);
116
117 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
118 int, int, int);
119
120 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
121 struct gdbarch *,
122 struct bp_target_info *);
123
124 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
125 struct gdbarch *,
126 struct bp_target_info *);
127
128 static int debug_to_insert_watchpoint (struct target_ops *self,
129 CORE_ADDR, int, int,
130 struct expression *);
131
132 static int debug_to_remove_watchpoint (struct target_ops *self,
133 CORE_ADDR, int, int,
134 struct expression *);
135
136 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
137
138 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
139 CORE_ADDR, CORE_ADDR, int);
140
141 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
142 CORE_ADDR, int);
143
144 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
145 CORE_ADDR, int, int,
146 struct expression *);
147
148 static void debug_to_terminal_init (struct target_ops *self);
149
150 static void debug_to_terminal_inferior (struct target_ops *self);
151
152 static void debug_to_terminal_ours_for_output (struct target_ops *self);
153
154 static void debug_to_terminal_save_ours (struct target_ops *self);
155
156 static void debug_to_terminal_ours (struct target_ops *self);
157
158 static void debug_to_load (struct target_ops *self, char *, int);
159
160 static int debug_to_can_run (struct target_ops *self);
161
162 static void debug_to_stop (struct target_ops *self, ptid_t);
163
164 /* Pointer to array of target architecture structures; the size of the
165 array; the current index into the array; the allocated size of the
166 array. */
167 struct target_ops **target_structs;
168 unsigned target_struct_size;
169 unsigned target_struct_allocsize;
170 #define DEFAULT_ALLOCSIZE 10
171
172 /* The initial current target, so that there is always a semi-valid
173 current target. */
174
175 static struct target_ops dummy_target;
176
177 /* Top of target stack. */
178
179 static struct target_ops *target_stack;
180
181 /* The target structure we are currently using to talk to a process
182 or file or whatever "inferior" we have. */
183
184 struct target_ops current_target;
185
186 /* Command list for target. */
187
188 static struct cmd_list_element *targetlist = NULL;
189
190 /* Nonzero if we should trust readonly sections from the
191 executable when reading memory. */
192
193 static int trust_readonly = 0;
194
195 /* Nonzero if we should show true memory content including
196 memory breakpoint inserted by gdb. */
197
198 static int show_memory_breakpoints = 0;
199
200 /* These globals control whether GDB attempts to perform these
201 operations; they are useful for targets that need to prevent
202 inadvertant disruption, such as in non-stop mode. */
203
204 int may_write_registers = 1;
205
206 int may_write_memory = 1;
207
208 int may_insert_breakpoints = 1;
209
210 int may_insert_tracepoints = 1;
211
212 int may_insert_fast_tracepoints = 1;
213
214 int may_stop = 1;
215
216 /* Non-zero if we want to see trace of target level stuff. */
217
218 static unsigned int targetdebug = 0;
219 static void
220 show_targetdebug (struct ui_file *file, int from_tty,
221 struct cmd_list_element *c, const char *value)
222 {
223 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
224 }
225
226 static void setup_target_debug (void);
227
228 /* The user just typed 'target' without the name of a target. */
229
230 static void
231 target_command (char *arg, int from_tty)
232 {
233 fputs_filtered ("Argument required (target name). Try `help target'\n",
234 gdb_stdout);
235 }
236
237 /* Default target_has_* methods for process_stratum targets. */
238
239 int
240 default_child_has_all_memory (struct target_ops *ops)
241 {
242 /* If no inferior selected, then we can't read memory here. */
243 if (ptid_equal (inferior_ptid, null_ptid))
244 return 0;
245
246 return 1;
247 }
248
249 int
250 default_child_has_memory (struct target_ops *ops)
251 {
252 /* If no inferior selected, then we can't read memory here. */
253 if (ptid_equal (inferior_ptid, null_ptid))
254 return 0;
255
256 return 1;
257 }
258
259 int
260 default_child_has_stack (struct target_ops *ops)
261 {
262 /* If no inferior selected, there's no stack. */
263 if (ptid_equal (inferior_ptid, null_ptid))
264 return 0;
265
266 return 1;
267 }
268
269 int
270 default_child_has_registers (struct target_ops *ops)
271 {
272 /* Can't read registers from no inferior. */
273 if (ptid_equal (inferior_ptid, null_ptid))
274 return 0;
275
276 return 1;
277 }
278
279 int
280 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
281 {
282 /* If there's no thread selected, then we can't make it run through
283 hoops. */
284 if (ptid_equal (the_ptid, null_ptid))
285 return 0;
286
287 return 1;
288 }
289
290
291 int
292 target_has_all_memory_1 (void)
293 {
294 struct target_ops *t;
295
296 for (t = current_target.beneath; t != NULL; t = t->beneath)
297 if (t->to_has_all_memory (t))
298 return 1;
299
300 return 0;
301 }
302
303 int
304 target_has_memory_1 (void)
305 {
306 struct target_ops *t;
307
308 for (t = current_target.beneath; t != NULL; t = t->beneath)
309 if (t->to_has_memory (t))
310 return 1;
311
312 return 0;
313 }
314
315 int
316 target_has_stack_1 (void)
317 {
318 struct target_ops *t;
319
320 for (t = current_target.beneath; t != NULL; t = t->beneath)
321 if (t->to_has_stack (t))
322 return 1;
323
324 return 0;
325 }
326
327 int
328 target_has_registers_1 (void)
329 {
330 struct target_ops *t;
331
332 for (t = current_target.beneath; t != NULL; t = t->beneath)
333 if (t->to_has_registers (t))
334 return 1;
335
336 return 0;
337 }
338
339 int
340 target_has_execution_1 (ptid_t the_ptid)
341 {
342 struct target_ops *t;
343
344 for (t = current_target.beneath; t != NULL; t = t->beneath)
345 if (t->to_has_execution (t, the_ptid))
346 return 1;
347
348 return 0;
349 }
350
351 int
352 target_has_execution_current (void)
353 {
354 return target_has_execution_1 (inferior_ptid);
355 }
356
357 /* Complete initialization of T. This ensures that various fields in
358 T are set, if needed by the target implementation. */
359
360 void
361 complete_target_initialization (struct target_ops *t)
362 {
363 /* Provide default values for all "must have" methods. */
364 if (t->to_xfer_partial == NULL)
365 t->to_xfer_partial = default_xfer_partial;
366
367 if (t->to_has_all_memory == NULL)
368 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
369
370 if (t->to_has_memory == NULL)
371 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
372
373 if (t->to_has_stack == NULL)
374 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
375
376 if (t->to_has_registers == NULL)
377 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
378
379 if (t->to_has_execution == NULL)
380 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
381
382 install_delegators (t);
383 }
384
385 /* Add possible target architecture T to the list and add a new
386 command 'target T->to_shortname'. Set COMPLETER as the command's
387 completer if not NULL. */
388
389 void
390 add_target_with_completer (struct target_ops *t,
391 completer_ftype *completer)
392 {
393 struct cmd_list_element *c;
394
395 complete_target_initialization (t);
396
397 if (!target_structs)
398 {
399 target_struct_allocsize = DEFAULT_ALLOCSIZE;
400 target_structs = (struct target_ops **) xmalloc
401 (target_struct_allocsize * sizeof (*target_structs));
402 }
403 if (target_struct_size >= target_struct_allocsize)
404 {
405 target_struct_allocsize *= 2;
406 target_structs = (struct target_ops **)
407 xrealloc ((char *) target_structs,
408 target_struct_allocsize * sizeof (*target_structs));
409 }
410 target_structs[target_struct_size++] = t;
411
412 if (targetlist == NULL)
413 add_prefix_cmd ("target", class_run, target_command, _("\
414 Connect to a target machine or process.\n\
415 The first argument is the type or protocol of the target machine.\n\
416 Remaining arguments are interpreted by the target protocol. For more\n\
417 information on the arguments for a particular protocol, type\n\
418 `help target ' followed by the protocol name."),
419 &targetlist, "target ", 0, &cmdlist);
420 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
421 &targetlist);
422 if (completer != NULL)
423 set_cmd_completer (c, completer);
424 }
425
426 /* Add a possible target architecture to the list. */
427
428 void
429 add_target (struct target_ops *t)
430 {
431 add_target_with_completer (t, NULL);
432 }
433
434 /* See target.h. */
435
436 void
437 add_deprecated_target_alias (struct target_ops *t, char *alias)
438 {
439 struct cmd_list_element *c;
440 char *alt;
441
442 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
443 see PR cli/15104. */
444 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
445 alt = xstrprintf ("target %s", t->to_shortname);
446 deprecate_cmd (c, alt);
447 }
448
449 /* Stub functions */
450
451 void
452 target_ignore (void)
453 {
454 }
455
456 void
457 target_kill (void)
458 {
459 struct target_ops *t;
460
461 for (t = current_target.beneath; t != NULL; t = t->beneath)
462 if (t->to_kill != NULL)
463 {
464 if (targetdebug)
465 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
466
467 t->to_kill (t);
468 return;
469 }
470
471 noprocess ();
472 }
473
474 void
475 target_load (char *arg, int from_tty)
476 {
477 target_dcache_invalidate ();
478 (*current_target.to_load) (&current_target, arg, from_tty);
479 }
480
481 void
482 target_create_inferior (char *exec_file, char *args,
483 char **env, int from_tty)
484 {
485 struct target_ops *t;
486
487 for (t = current_target.beneath; t != NULL; t = t->beneath)
488 {
489 if (t->to_create_inferior != NULL)
490 {
491 t->to_create_inferior (t, exec_file, args, env, from_tty);
492 if (targetdebug)
493 fprintf_unfiltered (gdb_stdlog,
494 "target_create_inferior (%s, %s, xxx, %d)\n",
495 exec_file, args, from_tty);
496 return;
497 }
498 }
499
500 internal_error (__FILE__, __LINE__,
501 _("could not find a target to create inferior"));
502 }
503
504 void
505 target_terminal_inferior (void)
506 {
507 /* A background resume (``run&'') should leave GDB in control of the
508 terminal. Use target_can_async_p, not target_is_async_p, since at
509 this point the target is not async yet. However, if sync_execution
510 is not set, we know it will become async prior to resume. */
511 if (target_can_async_p () && !sync_execution)
512 return;
513
514 /* If GDB is resuming the inferior in the foreground, install
515 inferior's terminal modes. */
516 (*current_target.to_terminal_inferior) (&current_target);
517 }
518
519 static int
520 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
521 struct target_ops *t)
522 {
523 errno = EIO; /* Can't read/write this location. */
524 return 0; /* No bytes handled. */
525 }
526
527 static void
528 tcomplain (void)
529 {
530 error (_("You can't do that when your target is `%s'"),
531 current_target.to_shortname);
532 }
533
534 void
535 noprocess (void)
536 {
537 error (_("You can't do that without a process to debug."));
538 }
539
540 static void
541 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
542 {
543 printf_unfiltered (_("No saved terminal information.\n"));
544 }
545
546 /* A default implementation for the to_get_ada_task_ptid target method.
547
548 This function builds the PTID by using both LWP and TID as part of
549 the PTID lwp and tid elements. The pid used is the pid of the
550 inferior_ptid. */
551
552 static ptid_t
553 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
554 {
555 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
556 }
557
558 static enum exec_direction_kind
559 default_execution_direction (struct target_ops *self)
560 {
561 if (!target_can_execute_reverse)
562 return EXEC_FORWARD;
563 else if (!target_can_async_p ())
564 return EXEC_FORWARD;
565 else
566 gdb_assert_not_reached ("\
567 to_execution_direction must be implemented for reverse async");
568 }
569
570 /* Go through the target stack from top to bottom, copying over zero
571 entries in current_target, then filling in still empty entries. In
572 effect, we are doing class inheritance through the pushed target
573 vectors.
574
575 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
576 is currently implemented, is that it discards any knowledge of
577 which target an inherited method originally belonged to.
578 Consequently, new new target methods should instead explicitly and
579 locally search the target stack for the target that can handle the
580 request. */
581
582 static void
583 update_current_target (void)
584 {
585 struct target_ops *t;
586
587 /* First, reset current's contents. */
588 memset (&current_target, 0, sizeof (current_target));
589
590 /* Install the delegators. */
591 install_delegators (&current_target);
592
593 #define INHERIT(FIELD, TARGET) \
594 if (!current_target.FIELD) \
595 current_target.FIELD = (TARGET)->FIELD
596
597 for (t = target_stack; t; t = t->beneath)
598 {
599 INHERIT (to_shortname, t);
600 INHERIT (to_longname, t);
601 INHERIT (to_doc, t);
602 /* Do not inherit to_open. */
603 /* Do not inherit to_close. */
604 /* Do not inherit to_attach. */
605 /* Do not inherit to_post_attach. */
606 INHERIT (to_attach_no_wait, t);
607 /* Do not inherit to_detach. */
608 /* Do not inherit to_disconnect. */
609 /* Do not inherit to_resume. */
610 /* Do not inherit to_wait. */
611 /* Do not inherit to_fetch_registers. */
612 /* Do not inherit to_store_registers. */
613 /* Do not inherit to_prepare_to_store. */
614 INHERIT (deprecated_xfer_memory, t);
615 /* Do not inherit to_files_info. */
616 /* Do not inherit to_insert_breakpoint. */
617 /* Do not inherit to_remove_breakpoint. */
618 /* Do not inherit to_can_use_hw_breakpoint. */
619 /* Do not inherit to_insert_hw_breakpoint. */
620 /* Do not inherit to_remove_hw_breakpoint. */
621 /* Do not inherit to_ranged_break_num_registers. */
622 /* Do not inherit to_insert_watchpoint. */
623 /* Do not inherit to_remove_watchpoint. */
624 /* Do not inherit to_insert_mask_watchpoint. */
625 /* Do not inherit to_remove_mask_watchpoint. */
626 /* Do not inherit to_stopped_data_address. */
627 INHERIT (to_have_steppable_watchpoint, t);
628 INHERIT (to_have_continuable_watchpoint, t);
629 /* Do not inherit to_stopped_by_watchpoint. */
630 /* Do not inherit to_watchpoint_addr_within_range. */
631 /* Do not inherit to_region_ok_for_hw_watchpoint. */
632 /* Do not inherit to_can_accel_watchpoint_condition. */
633 /* Do not inherit to_masked_watch_num_registers. */
634 /* Do not inherit to_terminal_init. */
635 /* Do not inherit to_terminal_inferior. */
636 /* Do not inherit to_terminal_ours_for_output. */
637 /* Do not inherit to_terminal_ours. */
638 /* Do not inherit to_terminal_save_ours. */
639 /* Do not inherit to_terminal_info. */
640 /* Do not inherit to_kill. */
641 /* Do not inherit to_load. */
642 /* Do no inherit to_create_inferior. */
643 /* Do not inherit to_post_startup_inferior. */
644 /* Do not inherit to_insert_fork_catchpoint. */
645 /* Do not inherit to_remove_fork_catchpoint. */
646 /* Do not inherit to_insert_vfork_catchpoint. */
647 /* Do not inherit to_remove_vfork_catchpoint. */
648 /* Do not inherit to_follow_fork. */
649 /* Do not inherit to_insert_exec_catchpoint. */
650 /* Do not inherit to_remove_exec_catchpoint. */
651 /* Do not inherit to_set_syscall_catchpoint. */
652 /* Do not inherit to_has_exited. */
653 /* Do not inherit to_mourn_inferior. */
654 INHERIT (to_can_run, t);
655 /* Do not inherit to_pass_signals. */
656 /* Do not inherit to_program_signals. */
657 /* Do not inherit to_thread_alive. */
658 /* Do not inherit to_find_new_threads. */
659 /* Do not inherit to_pid_to_str. */
660 /* Do not inherit to_extra_thread_info. */
661 /* Do not inherit to_thread_name. */
662 INHERIT (to_stop, t);
663 /* Do not inherit to_xfer_partial. */
664 /* Do not inherit to_rcmd. */
665 /* Do not inherit to_pid_to_exec_file. */
666 /* Do not inherit to_log_command. */
667 INHERIT (to_stratum, t);
668 /* Do not inherit to_has_all_memory. */
669 /* Do not inherit to_has_memory. */
670 /* Do not inherit to_has_stack. */
671 /* Do not inherit to_has_registers. */
672 /* Do not inherit to_has_execution. */
673 INHERIT (to_has_thread_control, t);
674 /* Do not inherit to_can_async_p. */
675 /* Do not inherit to_is_async_p. */
676 /* Do not inherit to_async. */
677 /* Do not inherit to_find_memory_regions. */
678 /* Do not inherit to_make_corefile_notes. */
679 /* Do not inherit to_get_bookmark. */
680 /* Do not inherit to_goto_bookmark. */
681 /* Do not inherit to_get_thread_local_address. */
682 /* Do not inherit to_can_execute_reverse. */
683 /* Do not inherit to_execution_direction. */
684 /* Do not inherit to_thread_architecture. */
685 /* Do not inherit to_read_description. */
686 /* Do not inherit to_get_ada_task_ptid. */
687 /* Do not inherit to_search_memory. */
688 /* Do not inherit to_supports_multi_process. */
689 /* Do not inherit to_supports_enable_disable_tracepoint. */
690 /* Do not inherit to_supports_string_tracing. */
691 /* Do not inherit to_trace_init. */
692 /* Do not inherit to_download_tracepoint. */
693 /* Do not inherit to_can_download_tracepoint. */
694 /* Do not inherit to_download_trace_state_variable. */
695 /* Do not inherit to_enable_tracepoint. */
696 /* Do not inherit to_disable_tracepoint. */
697 /* Do not inherit to_trace_set_readonly_regions. */
698 /* Do not inherit to_trace_start. */
699 /* Do not inherit to_get_trace_status. */
700 /* Do not inherit to_get_tracepoint_status. */
701 INHERIT (to_trace_stop, t);
702 INHERIT (to_trace_find, t);
703 INHERIT (to_get_trace_state_variable_value, t);
704 INHERIT (to_save_trace_data, t);
705 INHERIT (to_upload_tracepoints, t);
706 INHERIT (to_upload_trace_state_variables, t);
707 INHERIT (to_get_raw_trace_data, t);
708 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
709 INHERIT (to_set_disconnected_tracing, t);
710 INHERIT (to_set_circular_trace_buffer, t);
711 INHERIT (to_set_trace_buffer_size, t);
712 INHERIT (to_set_trace_notes, t);
713 INHERIT (to_get_tib_address, t);
714 INHERIT (to_set_permissions, t);
715 INHERIT (to_static_tracepoint_marker_at, t);
716 INHERIT (to_static_tracepoint_markers_by_strid, t);
717 INHERIT (to_traceframe_info, t);
718 INHERIT (to_use_agent, t);
719 INHERIT (to_can_use_agent, t);
720 INHERIT (to_augmented_libraries_svr4_read, t);
721 INHERIT (to_magic, t);
722 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
723 INHERIT (to_can_run_breakpoint_commands, t);
724 /* Do not inherit to_memory_map. */
725 /* Do not inherit to_flash_erase. */
726 /* Do not inherit to_flash_done. */
727 }
728 #undef INHERIT
729
730 /* Clean up a target struct so it no longer has any zero pointers in
731 it. Some entries are defaulted to a method that print an error,
732 others are hard-wired to a standard recursive default. */
733
734 #define de_fault(field, value) \
735 if (!current_target.field) \
736 current_target.field = value
737
738 de_fault (to_open,
739 (void (*) (char *, int))
740 tcomplain);
741 de_fault (to_close,
742 (void (*) (struct target_ops *))
743 target_ignore);
744 de_fault (deprecated_xfer_memory,
745 (int (*) (CORE_ADDR, gdb_byte *, int, int,
746 struct mem_attrib *, struct target_ops *))
747 nomemory);
748 de_fault (to_can_run,
749 (int (*) (struct target_ops *))
750 return_zero);
751 de_fault (to_stop,
752 (void (*) (struct target_ops *, ptid_t))
753 target_ignore);
754 current_target.to_read_description = NULL;
755 de_fault (to_trace_stop,
756 (void (*) (struct target_ops *))
757 tcomplain);
758 de_fault (to_trace_find,
759 (int (*) (struct target_ops *,
760 enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
761 return_minus_one);
762 de_fault (to_get_trace_state_variable_value,
763 (int (*) (struct target_ops *, int, LONGEST *))
764 return_zero);
765 de_fault (to_save_trace_data,
766 (int (*) (struct target_ops *, const char *))
767 tcomplain);
768 de_fault (to_upload_tracepoints,
769 (int (*) (struct target_ops *, struct uploaded_tp **))
770 return_zero);
771 de_fault (to_upload_trace_state_variables,
772 (int (*) (struct target_ops *, struct uploaded_tsv **))
773 return_zero);
774 de_fault (to_get_raw_trace_data,
775 (LONGEST (*) (struct target_ops *, gdb_byte *, ULONGEST, LONGEST))
776 tcomplain);
777 de_fault (to_get_min_fast_tracepoint_insn_len,
778 (int (*) (struct target_ops *))
779 return_minus_one);
780 de_fault (to_set_disconnected_tracing,
781 (void (*) (struct target_ops *, int))
782 target_ignore);
783 de_fault (to_set_circular_trace_buffer,
784 (void (*) (struct target_ops *, int))
785 target_ignore);
786 de_fault (to_set_trace_buffer_size,
787 (void (*) (struct target_ops *, LONGEST))
788 target_ignore);
789 de_fault (to_set_trace_notes,
790 (int (*) (struct target_ops *,
791 const char *, const char *, const char *))
792 return_zero);
793 de_fault (to_get_tib_address,
794 (int (*) (struct target_ops *, ptid_t, CORE_ADDR *))
795 tcomplain);
796 de_fault (to_set_permissions,
797 (void (*) (struct target_ops *))
798 target_ignore);
799 de_fault (to_static_tracepoint_marker_at,
800 (int (*) (struct target_ops *,
801 CORE_ADDR, struct static_tracepoint_marker *))
802 return_zero);
803 de_fault (to_static_tracepoint_markers_by_strid,
804 (VEC(static_tracepoint_marker_p) * (*) (struct target_ops *,
805 const char *))
806 tcomplain);
807 de_fault (to_traceframe_info,
808 (struct traceframe_info * (*) (struct target_ops *))
809 return_null);
810 de_fault (to_supports_evaluation_of_breakpoint_conditions,
811 (int (*) (struct target_ops *))
812 return_zero);
813 de_fault (to_can_run_breakpoint_commands,
814 (int (*) (struct target_ops *))
815 return_zero);
816 de_fault (to_use_agent,
817 (int (*) (struct target_ops *, int))
818 tcomplain);
819 de_fault (to_can_use_agent,
820 (int (*) (struct target_ops *))
821 return_zero);
822 de_fault (to_augmented_libraries_svr4_read,
823 (int (*) (struct target_ops *))
824 return_zero);
825
826 #undef de_fault
827
828 /* Finally, position the target-stack beneath the squashed
829 "current_target". That way code looking for a non-inherited
830 target method can quickly and simply find it. */
831 current_target.beneath = target_stack;
832
833 if (targetdebug)
834 setup_target_debug ();
835 }
836
837 /* Push a new target type into the stack of the existing target accessors,
838 possibly superseding some of the existing accessors.
839
840 Rather than allow an empty stack, we always have the dummy target at
841 the bottom stratum, so we can call the function vectors without
842 checking them. */
843
844 void
845 push_target (struct target_ops *t)
846 {
847 struct target_ops **cur;
848
849 /* Check magic number. If wrong, it probably means someone changed
850 the struct definition, but not all the places that initialize one. */
851 if (t->to_magic != OPS_MAGIC)
852 {
853 fprintf_unfiltered (gdb_stderr,
854 "Magic number of %s target struct wrong\n",
855 t->to_shortname);
856 internal_error (__FILE__, __LINE__,
857 _("failed internal consistency check"));
858 }
859
860 /* Find the proper stratum to install this target in. */
861 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
862 {
863 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
864 break;
865 }
866
867 /* If there's already targets at this stratum, remove them. */
868 /* FIXME: cagney/2003-10-15: I think this should be popping all
869 targets to CUR, and not just those at this stratum level. */
870 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
871 {
872 /* There's already something at this stratum level. Close it,
873 and un-hook it from the stack. */
874 struct target_ops *tmp = (*cur);
875
876 (*cur) = (*cur)->beneath;
877 tmp->beneath = NULL;
878 target_close (tmp);
879 }
880
881 /* We have removed all targets in our stratum, now add the new one. */
882 t->beneath = (*cur);
883 (*cur) = t;
884
885 update_current_target ();
886 }
887
888 /* Remove a target_ops vector from the stack, wherever it may be.
889 Return how many times it was removed (0 or 1). */
890
891 int
892 unpush_target (struct target_ops *t)
893 {
894 struct target_ops **cur;
895 struct target_ops *tmp;
896
897 if (t->to_stratum == dummy_stratum)
898 internal_error (__FILE__, __LINE__,
899 _("Attempt to unpush the dummy target"));
900
901 /* Look for the specified target. Note that we assume that a target
902 can only occur once in the target stack. */
903
904 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
905 {
906 if ((*cur) == t)
907 break;
908 }
909
910 /* If we don't find target_ops, quit. Only open targets should be
911 closed. */
912 if ((*cur) == NULL)
913 return 0;
914
915 /* Unchain the target. */
916 tmp = (*cur);
917 (*cur) = (*cur)->beneath;
918 tmp->beneath = NULL;
919
920 update_current_target ();
921
922 /* Finally close the target. Note we do this after unchaining, so
923 any target method calls from within the target_close
924 implementation don't end up in T anymore. */
925 target_close (t);
926
927 return 1;
928 }
929
930 void
931 pop_all_targets_above (enum strata above_stratum)
932 {
933 while ((int) (current_target.to_stratum) > (int) above_stratum)
934 {
935 if (!unpush_target (target_stack))
936 {
937 fprintf_unfiltered (gdb_stderr,
938 "pop_all_targets couldn't find target %s\n",
939 target_stack->to_shortname);
940 internal_error (__FILE__, __LINE__,
941 _("failed internal consistency check"));
942 break;
943 }
944 }
945 }
946
947 void
948 pop_all_targets (void)
949 {
950 pop_all_targets_above (dummy_stratum);
951 }
952
953 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
954
955 int
956 target_is_pushed (struct target_ops *t)
957 {
958 struct target_ops **cur;
959
960 /* Check magic number. If wrong, it probably means someone changed
961 the struct definition, but not all the places that initialize one. */
962 if (t->to_magic != OPS_MAGIC)
963 {
964 fprintf_unfiltered (gdb_stderr,
965 "Magic number of %s target struct wrong\n",
966 t->to_shortname);
967 internal_error (__FILE__, __LINE__,
968 _("failed internal consistency check"));
969 }
970
971 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
972 if (*cur == t)
973 return 1;
974
975 return 0;
976 }
977
978 /* Using the objfile specified in OBJFILE, find the address for the
979 current thread's thread-local storage with offset OFFSET. */
980 CORE_ADDR
981 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
982 {
983 volatile CORE_ADDR addr = 0;
984 struct target_ops *target;
985
986 for (target = current_target.beneath;
987 target != NULL;
988 target = target->beneath)
989 {
990 if (target->to_get_thread_local_address != NULL)
991 break;
992 }
993
994 if (target != NULL
995 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
996 {
997 ptid_t ptid = inferior_ptid;
998 volatile struct gdb_exception ex;
999
1000 TRY_CATCH (ex, RETURN_MASK_ALL)
1001 {
1002 CORE_ADDR lm_addr;
1003
1004 /* Fetch the load module address for this objfile. */
1005 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1006 objfile);
1007 /* If it's 0, throw the appropriate exception. */
1008 if (lm_addr == 0)
1009 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1010 _("TLS load module not found"));
1011
1012 addr = target->to_get_thread_local_address (target, ptid,
1013 lm_addr, offset);
1014 }
1015 /* If an error occurred, print TLS related messages here. Otherwise,
1016 throw the error to some higher catcher. */
1017 if (ex.reason < 0)
1018 {
1019 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1020
1021 switch (ex.error)
1022 {
1023 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1024 error (_("Cannot find thread-local variables "
1025 "in this thread library."));
1026 break;
1027 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1028 if (objfile_is_library)
1029 error (_("Cannot find shared library `%s' in dynamic"
1030 " linker's load module list"), objfile_name (objfile));
1031 else
1032 error (_("Cannot find executable file `%s' in dynamic"
1033 " linker's load module list"), objfile_name (objfile));
1034 break;
1035 case TLS_NOT_ALLOCATED_YET_ERROR:
1036 if (objfile_is_library)
1037 error (_("The inferior has not yet allocated storage for"
1038 " thread-local variables in\n"
1039 "the shared library `%s'\n"
1040 "for %s"),
1041 objfile_name (objfile), target_pid_to_str (ptid));
1042 else
1043 error (_("The inferior has not yet allocated storage for"
1044 " thread-local variables in\n"
1045 "the executable `%s'\n"
1046 "for %s"),
1047 objfile_name (objfile), target_pid_to_str (ptid));
1048 break;
1049 case TLS_GENERIC_ERROR:
1050 if (objfile_is_library)
1051 error (_("Cannot find thread-local storage for %s, "
1052 "shared library %s:\n%s"),
1053 target_pid_to_str (ptid),
1054 objfile_name (objfile), ex.message);
1055 else
1056 error (_("Cannot find thread-local storage for %s, "
1057 "executable file %s:\n%s"),
1058 target_pid_to_str (ptid),
1059 objfile_name (objfile), ex.message);
1060 break;
1061 default:
1062 throw_exception (ex);
1063 break;
1064 }
1065 }
1066 }
1067 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1068 TLS is an ABI-specific thing. But we don't do that yet. */
1069 else
1070 error (_("Cannot find thread-local variables on this target"));
1071
1072 return addr;
1073 }
1074
1075 const char *
1076 target_xfer_status_to_string (enum target_xfer_status err)
1077 {
1078 #define CASE(X) case X: return #X
1079 switch (err)
1080 {
1081 CASE(TARGET_XFER_E_IO);
1082 CASE(TARGET_XFER_E_UNAVAILABLE);
1083 default:
1084 return "<unknown>";
1085 }
1086 #undef CASE
1087 };
1088
1089
1090 #undef MIN
1091 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1092
1093 /* target_read_string -- read a null terminated string, up to LEN bytes,
1094 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1095 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1096 is responsible for freeing it. Return the number of bytes successfully
1097 read. */
1098
1099 int
1100 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1101 {
1102 int tlen, offset, i;
1103 gdb_byte buf[4];
1104 int errcode = 0;
1105 char *buffer;
1106 int buffer_allocated;
1107 char *bufptr;
1108 unsigned int nbytes_read = 0;
1109
1110 gdb_assert (string);
1111
1112 /* Small for testing. */
1113 buffer_allocated = 4;
1114 buffer = xmalloc (buffer_allocated);
1115 bufptr = buffer;
1116
1117 while (len > 0)
1118 {
1119 tlen = MIN (len, 4 - (memaddr & 3));
1120 offset = memaddr & 3;
1121
1122 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1123 if (errcode != 0)
1124 {
1125 /* The transfer request might have crossed the boundary to an
1126 unallocated region of memory. Retry the transfer, requesting
1127 a single byte. */
1128 tlen = 1;
1129 offset = 0;
1130 errcode = target_read_memory (memaddr, buf, 1);
1131 if (errcode != 0)
1132 goto done;
1133 }
1134
1135 if (bufptr - buffer + tlen > buffer_allocated)
1136 {
1137 unsigned int bytes;
1138
1139 bytes = bufptr - buffer;
1140 buffer_allocated *= 2;
1141 buffer = xrealloc (buffer, buffer_allocated);
1142 bufptr = buffer + bytes;
1143 }
1144
1145 for (i = 0; i < tlen; i++)
1146 {
1147 *bufptr++ = buf[i + offset];
1148 if (buf[i + offset] == '\000')
1149 {
1150 nbytes_read += i + 1;
1151 goto done;
1152 }
1153 }
1154
1155 memaddr += tlen;
1156 len -= tlen;
1157 nbytes_read += tlen;
1158 }
1159 done:
1160 *string = buffer;
1161 if (errnop != NULL)
1162 *errnop = errcode;
1163 return nbytes_read;
1164 }
1165
1166 struct target_section_table *
1167 target_get_section_table (struct target_ops *target)
1168 {
1169 struct target_ops *t;
1170
1171 if (targetdebug)
1172 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1173
1174 for (t = target; t != NULL; t = t->beneath)
1175 if (t->to_get_section_table != NULL)
1176 return (*t->to_get_section_table) (t);
1177
1178 return NULL;
1179 }
1180
1181 /* Find a section containing ADDR. */
1182
1183 struct target_section *
1184 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1185 {
1186 struct target_section_table *table = target_get_section_table (target);
1187 struct target_section *secp;
1188
1189 if (table == NULL)
1190 return NULL;
1191
1192 for (secp = table->sections; secp < table->sections_end; secp++)
1193 {
1194 if (addr >= secp->addr && addr < secp->endaddr)
1195 return secp;
1196 }
1197 return NULL;
1198 }
1199
1200 /* Read memory from the live target, even if currently inspecting a
1201 traceframe. The return is the same as that of target_read. */
1202
1203 static enum target_xfer_status
1204 target_read_live_memory (enum target_object object,
1205 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1206 ULONGEST *xfered_len)
1207 {
1208 enum target_xfer_status ret;
1209 struct cleanup *cleanup;
1210
1211 /* Switch momentarily out of tfind mode so to access live memory.
1212 Note that this must not clear global state, such as the frame
1213 cache, which must still remain valid for the previous traceframe.
1214 We may be _building_ the frame cache at this point. */
1215 cleanup = make_cleanup_restore_traceframe_number ();
1216 set_traceframe_number (-1);
1217
1218 ret = target_xfer_partial (current_target.beneath, object, NULL,
1219 myaddr, NULL, memaddr, len, xfered_len);
1220
1221 do_cleanups (cleanup);
1222 return ret;
1223 }
1224
1225 /* Using the set of read-only target sections of OPS, read live
1226 read-only memory. Note that the actual reads start from the
1227 top-most target again.
1228
1229 For interface/parameters/return description see target.h,
1230 to_xfer_partial. */
1231
1232 static enum target_xfer_status
1233 memory_xfer_live_readonly_partial (struct target_ops *ops,
1234 enum target_object object,
1235 gdb_byte *readbuf, ULONGEST memaddr,
1236 ULONGEST len, ULONGEST *xfered_len)
1237 {
1238 struct target_section *secp;
1239 struct target_section_table *table;
1240
1241 secp = target_section_by_addr (ops, memaddr);
1242 if (secp != NULL
1243 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1244 secp->the_bfd_section)
1245 & SEC_READONLY))
1246 {
1247 struct target_section *p;
1248 ULONGEST memend = memaddr + len;
1249
1250 table = target_get_section_table (ops);
1251
1252 for (p = table->sections; p < table->sections_end; p++)
1253 {
1254 if (memaddr >= p->addr)
1255 {
1256 if (memend <= p->endaddr)
1257 {
1258 /* Entire transfer is within this section. */
1259 return target_read_live_memory (object, memaddr,
1260 readbuf, len, xfered_len);
1261 }
1262 else if (memaddr >= p->endaddr)
1263 {
1264 /* This section ends before the transfer starts. */
1265 continue;
1266 }
1267 else
1268 {
1269 /* This section overlaps the transfer. Just do half. */
1270 len = p->endaddr - memaddr;
1271 return target_read_live_memory (object, memaddr,
1272 readbuf, len, xfered_len);
1273 }
1274 }
1275 }
1276 }
1277
1278 return TARGET_XFER_EOF;
1279 }
1280
1281 /* Read memory from more than one valid target. A core file, for
1282 instance, could have some of memory but delegate other bits to
1283 the target below it. So, we must manually try all targets. */
1284
1285 static enum target_xfer_status
1286 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1287 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1288 ULONGEST *xfered_len)
1289 {
1290 enum target_xfer_status res;
1291
1292 do
1293 {
1294 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1295 readbuf, writebuf, memaddr, len,
1296 xfered_len);
1297 if (res == TARGET_XFER_OK)
1298 break;
1299
1300 /* Stop if the target reports that the memory is not available. */
1301 if (res == TARGET_XFER_E_UNAVAILABLE)
1302 break;
1303
1304 /* We want to continue past core files to executables, but not
1305 past a running target's memory. */
1306 if (ops->to_has_all_memory (ops))
1307 break;
1308
1309 ops = ops->beneath;
1310 }
1311 while (ops != NULL);
1312
1313 return res;
1314 }
1315
1316 /* Perform a partial memory transfer.
1317 For docs see target.h, to_xfer_partial. */
1318
1319 static enum target_xfer_status
1320 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1321 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1322 ULONGEST len, ULONGEST *xfered_len)
1323 {
1324 enum target_xfer_status res;
1325 int reg_len;
1326 struct mem_region *region;
1327 struct inferior *inf;
1328
1329 /* For accesses to unmapped overlay sections, read directly from
1330 files. Must do this first, as MEMADDR may need adjustment. */
1331 if (readbuf != NULL && overlay_debugging)
1332 {
1333 struct obj_section *section = find_pc_overlay (memaddr);
1334
1335 if (pc_in_unmapped_range (memaddr, section))
1336 {
1337 struct target_section_table *table
1338 = target_get_section_table (ops);
1339 const char *section_name = section->the_bfd_section->name;
1340
1341 memaddr = overlay_mapped_address (memaddr, section);
1342 return section_table_xfer_memory_partial (readbuf, writebuf,
1343 memaddr, len, xfered_len,
1344 table->sections,
1345 table->sections_end,
1346 section_name);
1347 }
1348 }
1349
1350 /* Try the executable files, if "trust-readonly-sections" is set. */
1351 if (readbuf != NULL && trust_readonly)
1352 {
1353 struct target_section *secp;
1354 struct target_section_table *table;
1355
1356 secp = target_section_by_addr (ops, memaddr);
1357 if (secp != NULL
1358 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1359 secp->the_bfd_section)
1360 & SEC_READONLY))
1361 {
1362 table = target_get_section_table (ops);
1363 return section_table_xfer_memory_partial (readbuf, writebuf,
1364 memaddr, len, xfered_len,
1365 table->sections,
1366 table->sections_end,
1367 NULL);
1368 }
1369 }
1370
1371 /* If reading unavailable memory in the context of traceframes, and
1372 this address falls within a read-only section, fallback to
1373 reading from live memory. */
1374 if (readbuf != NULL && get_traceframe_number () != -1)
1375 {
1376 VEC(mem_range_s) *available;
1377
1378 /* If we fail to get the set of available memory, then the
1379 target does not support querying traceframe info, and so we
1380 attempt reading from the traceframe anyway (assuming the
1381 target implements the old QTro packet then). */
1382 if (traceframe_available_memory (&available, memaddr, len))
1383 {
1384 struct cleanup *old_chain;
1385
1386 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1387
1388 if (VEC_empty (mem_range_s, available)
1389 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1390 {
1391 /* Don't read into the traceframe's available
1392 memory. */
1393 if (!VEC_empty (mem_range_s, available))
1394 {
1395 LONGEST oldlen = len;
1396
1397 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1398 gdb_assert (len <= oldlen);
1399 }
1400
1401 do_cleanups (old_chain);
1402
1403 /* This goes through the topmost target again. */
1404 res = memory_xfer_live_readonly_partial (ops, object,
1405 readbuf, memaddr,
1406 len, xfered_len);
1407 if (res == TARGET_XFER_OK)
1408 return TARGET_XFER_OK;
1409 else
1410 {
1411 /* No use trying further, we know some memory starting
1412 at MEMADDR isn't available. */
1413 *xfered_len = len;
1414 return TARGET_XFER_E_UNAVAILABLE;
1415 }
1416 }
1417
1418 /* Don't try to read more than how much is available, in
1419 case the target implements the deprecated QTro packet to
1420 cater for older GDBs (the target's knowledge of read-only
1421 sections may be outdated by now). */
1422 len = VEC_index (mem_range_s, available, 0)->length;
1423
1424 do_cleanups (old_chain);
1425 }
1426 }
1427
1428 /* Try GDB's internal data cache. */
1429 region = lookup_mem_region (memaddr);
1430 /* region->hi == 0 means there's no upper bound. */
1431 if (memaddr + len < region->hi || region->hi == 0)
1432 reg_len = len;
1433 else
1434 reg_len = region->hi - memaddr;
1435
1436 switch (region->attrib.mode)
1437 {
1438 case MEM_RO:
1439 if (writebuf != NULL)
1440 return TARGET_XFER_E_IO;
1441 break;
1442
1443 case MEM_WO:
1444 if (readbuf != NULL)
1445 return TARGET_XFER_E_IO;
1446 break;
1447
1448 case MEM_FLASH:
1449 /* We only support writing to flash during "load" for now. */
1450 if (writebuf != NULL)
1451 error (_("Writing to flash memory forbidden in this context"));
1452 break;
1453
1454 case MEM_NONE:
1455 return TARGET_XFER_E_IO;
1456 }
1457
1458 if (!ptid_equal (inferior_ptid, null_ptid))
1459 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1460 else
1461 inf = NULL;
1462
1463 if (inf != NULL
1464 /* The dcache reads whole cache lines; that doesn't play well
1465 with reading from a trace buffer, because reading outside of
1466 the collected memory range fails. */
1467 && get_traceframe_number () == -1
1468 && (region->attrib.cache
1469 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1470 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1471 {
1472 DCACHE *dcache = target_dcache_get_or_init ();
1473 int l;
1474
1475 if (readbuf != NULL)
1476 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1477 else
1478 /* FIXME drow/2006-08-09: If we're going to preserve const
1479 correctness dcache_xfer_memory should take readbuf and
1480 writebuf. */
1481 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1482 reg_len, 1);
1483 if (l <= 0)
1484 return TARGET_XFER_E_IO;
1485 else
1486 {
1487 *xfered_len = (ULONGEST) l;
1488 return TARGET_XFER_OK;
1489 }
1490 }
1491
1492 /* If none of those methods found the memory we wanted, fall back
1493 to a target partial transfer. Normally a single call to
1494 to_xfer_partial is enough; if it doesn't recognize an object
1495 it will call the to_xfer_partial of the next target down.
1496 But for memory this won't do. Memory is the only target
1497 object which can be read from more than one valid target.
1498 A core file, for instance, could have some of memory but
1499 delegate other bits to the target below it. So, we must
1500 manually try all targets. */
1501
1502 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1503 xfered_len);
1504
1505 /* Make sure the cache gets updated no matter what - if we are writing
1506 to the stack. Even if this write is not tagged as such, we still need
1507 to update the cache. */
1508
1509 if (res == TARGET_XFER_OK
1510 && inf != NULL
1511 && writebuf != NULL
1512 && target_dcache_init_p ()
1513 && !region->attrib.cache
1514 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1515 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1516 {
1517 DCACHE *dcache = target_dcache_get ();
1518
1519 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1520 }
1521
1522 /* If we still haven't got anything, return the last error. We
1523 give up. */
1524 return res;
1525 }
1526
1527 /* Perform a partial memory transfer. For docs see target.h,
1528 to_xfer_partial. */
1529
1530 static enum target_xfer_status
1531 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1532 gdb_byte *readbuf, const gdb_byte *writebuf,
1533 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1534 {
1535 enum target_xfer_status res;
1536
1537 /* Zero length requests are ok and require no work. */
1538 if (len == 0)
1539 return TARGET_XFER_EOF;
1540
1541 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1542 breakpoint insns, thus hiding out from higher layers whether
1543 there are software breakpoints inserted in the code stream. */
1544 if (readbuf != NULL)
1545 {
1546 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1547 xfered_len);
1548
1549 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1550 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1551 }
1552 else
1553 {
1554 void *buf;
1555 struct cleanup *old_chain;
1556
1557 /* A large write request is likely to be partially satisfied
1558 by memory_xfer_partial_1. We will continually malloc
1559 and free a copy of the entire write request for breakpoint
1560 shadow handling even though we only end up writing a small
1561 subset of it. Cap writes to 4KB to mitigate this. */
1562 len = min (4096, len);
1563
1564 buf = xmalloc (len);
1565 old_chain = make_cleanup (xfree, buf);
1566 memcpy (buf, writebuf, len);
1567
1568 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1569 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1570 xfered_len);
1571
1572 do_cleanups (old_chain);
1573 }
1574
1575 return res;
1576 }
1577
1578 static void
1579 restore_show_memory_breakpoints (void *arg)
1580 {
1581 show_memory_breakpoints = (uintptr_t) arg;
1582 }
1583
1584 struct cleanup *
1585 make_show_memory_breakpoints_cleanup (int show)
1586 {
1587 int current = show_memory_breakpoints;
1588
1589 show_memory_breakpoints = show;
1590 return make_cleanup (restore_show_memory_breakpoints,
1591 (void *) (uintptr_t) current);
1592 }
1593
1594 /* For docs see target.h, to_xfer_partial. */
1595
1596 enum target_xfer_status
1597 target_xfer_partial (struct target_ops *ops,
1598 enum target_object object, const char *annex,
1599 gdb_byte *readbuf, const gdb_byte *writebuf,
1600 ULONGEST offset, ULONGEST len,
1601 ULONGEST *xfered_len)
1602 {
1603 enum target_xfer_status retval;
1604
1605 gdb_assert (ops->to_xfer_partial != NULL);
1606
1607 /* Transfer is done when LEN is zero. */
1608 if (len == 0)
1609 return TARGET_XFER_EOF;
1610
1611 if (writebuf && !may_write_memory)
1612 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1613 core_addr_to_string_nz (offset), plongest (len));
1614
1615 *xfered_len = 0;
1616
1617 /* If this is a memory transfer, let the memory-specific code
1618 have a look at it instead. Memory transfers are more
1619 complicated. */
1620 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1621 || object == TARGET_OBJECT_CODE_MEMORY)
1622 retval = memory_xfer_partial (ops, object, readbuf,
1623 writebuf, offset, len, xfered_len);
1624 else if (object == TARGET_OBJECT_RAW_MEMORY)
1625 {
1626 /* Request the normal memory object from other layers. */
1627 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1628 xfered_len);
1629 }
1630 else
1631 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1632 writebuf, offset, len, xfered_len);
1633
1634 if (targetdebug)
1635 {
1636 const unsigned char *myaddr = NULL;
1637
1638 fprintf_unfiltered (gdb_stdlog,
1639 "%s:target_xfer_partial "
1640 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1641 ops->to_shortname,
1642 (int) object,
1643 (annex ? annex : "(null)"),
1644 host_address_to_string (readbuf),
1645 host_address_to_string (writebuf),
1646 core_addr_to_string_nz (offset),
1647 pulongest (len), retval,
1648 pulongest (*xfered_len));
1649
1650 if (readbuf)
1651 myaddr = readbuf;
1652 if (writebuf)
1653 myaddr = writebuf;
1654 if (retval == TARGET_XFER_OK && myaddr != NULL)
1655 {
1656 int i;
1657
1658 fputs_unfiltered (", bytes =", gdb_stdlog);
1659 for (i = 0; i < *xfered_len; i++)
1660 {
1661 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1662 {
1663 if (targetdebug < 2 && i > 0)
1664 {
1665 fprintf_unfiltered (gdb_stdlog, " ...");
1666 break;
1667 }
1668 fprintf_unfiltered (gdb_stdlog, "\n");
1669 }
1670
1671 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1672 }
1673 }
1674
1675 fputc_unfiltered ('\n', gdb_stdlog);
1676 }
1677
1678 /* Check implementations of to_xfer_partial update *XFERED_LEN
1679 properly. Do assertion after printing debug messages, so that we
1680 can find more clues on assertion failure from debugging messages. */
1681 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1682 gdb_assert (*xfered_len > 0);
1683
1684 return retval;
1685 }
1686
1687 /* Read LEN bytes of target memory at address MEMADDR, placing the
1688 results in GDB's memory at MYADDR. Returns either 0 for success or
1689 TARGET_XFER_E_IO if any error occurs.
1690
1691 If an error occurs, no guarantee is made about the contents of the data at
1692 MYADDR. In particular, the caller should not depend upon partial reads
1693 filling the buffer with good data. There is no way for the caller to know
1694 how much good data might have been transfered anyway. Callers that can
1695 deal with partial reads should call target_read (which will retry until
1696 it makes no progress, and then return how much was transferred). */
1697
1698 int
1699 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1700 {
1701 /* Dispatch to the topmost target, not the flattened current_target.
1702 Memory accesses check target->to_has_(all_)memory, and the
1703 flattened target doesn't inherit those. */
1704 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1705 myaddr, memaddr, len) == len)
1706 return 0;
1707 else
1708 return TARGET_XFER_E_IO;
1709 }
1710
1711 /* Like target_read_memory, but specify explicitly that this is a read
1712 from the target's raw memory. That is, this read bypasses the
1713 dcache, breakpoint shadowing, etc. */
1714
1715 int
1716 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1717 {
1718 /* See comment in target_read_memory about why the request starts at
1719 current_target.beneath. */
1720 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1721 myaddr, memaddr, len) == len)
1722 return 0;
1723 else
1724 return TARGET_XFER_E_IO;
1725 }
1726
1727 /* Like target_read_memory, but specify explicitly that this is a read from
1728 the target's stack. This may trigger different cache behavior. */
1729
1730 int
1731 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1732 {
1733 /* See comment in target_read_memory about why the request starts at
1734 current_target.beneath. */
1735 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1736 myaddr, memaddr, len) == len)
1737 return 0;
1738 else
1739 return TARGET_XFER_E_IO;
1740 }
1741
1742 /* Like target_read_memory, but specify explicitly that this is a read from
1743 the target's code. This may trigger different cache behavior. */
1744
1745 int
1746 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1747 {
1748 /* See comment in target_read_memory about why the request starts at
1749 current_target.beneath. */
1750 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1751 myaddr, memaddr, len) == len)
1752 return 0;
1753 else
1754 return TARGET_XFER_E_IO;
1755 }
1756
1757 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1758 Returns either 0 for success or TARGET_XFER_E_IO if any
1759 error occurs. If an error occurs, no guarantee is made about how
1760 much data got written. Callers that can deal with partial writes
1761 should call target_write. */
1762
1763 int
1764 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1765 {
1766 /* See comment in target_read_memory about why the request starts at
1767 current_target.beneath. */
1768 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1769 myaddr, memaddr, len) == len)
1770 return 0;
1771 else
1772 return TARGET_XFER_E_IO;
1773 }
1774
1775 /* Write LEN bytes from MYADDR to target raw memory at address
1776 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1777 if any error occurs. If an error occurs, no guarantee is made
1778 about how much data got written. Callers that can deal with
1779 partial writes should call target_write. */
1780
1781 int
1782 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1783 {
1784 /* See comment in target_read_memory about why the request starts at
1785 current_target.beneath. */
1786 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1787 myaddr, memaddr, len) == len)
1788 return 0;
1789 else
1790 return TARGET_XFER_E_IO;
1791 }
1792
1793 /* Fetch the target's memory map. */
1794
1795 VEC(mem_region_s) *
1796 target_memory_map (void)
1797 {
1798 VEC(mem_region_s) *result;
1799 struct mem_region *last_one, *this_one;
1800 int ix;
1801 struct target_ops *t;
1802
1803 if (targetdebug)
1804 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1805
1806 for (t = current_target.beneath; t != NULL; t = t->beneath)
1807 if (t->to_memory_map != NULL)
1808 break;
1809
1810 if (t == NULL)
1811 return NULL;
1812
1813 result = t->to_memory_map (t);
1814 if (result == NULL)
1815 return NULL;
1816
1817 qsort (VEC_address (mem_region_s, result),
1818 VEC_length (mem_region_s, result),
1819 sizeof (struct mem_region), mem_region_cmp);
1820
1821 /* Check that regions do not overlap. Simultaneously assign
1822 a numbering for the "mem" commands to use to refer to
1823 each region. */
1824 last_one = NULL;
1825 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1826 {
1827 this_one->number = ix;
1828
1829 if (last_one && last_one->hi > this_one->lo)
1830 {
1831 warning (_("Overlapping regions in memory map: ignoring"));
1832 VEC_free (mem_region_s, result);
1833 return NULL;
1834 }
1835 last_one = this_one;
1836 }
1837
1838 return result;
1839 }
1840
1841 void
1842 target_flash_erase (ULONGEST address, LONGEST length)
1843 {
1844 struct target_ops *t;
1845
1846 for (t = current_target.beneath; t != NULL; t = t->beneath)
1847 if (t->to_flash_erase != NULL)
1848 {
1849 if (targetdebug)
1850 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1851 hex_string (address), phex (length, 0));
1852 t->to_flash_erase (t, address, length);
1853 return;
1854 }
1855
1856 tcomplain ();
1857 }
1858
1859 void
1860 target_flash_done (void)
1861 {
1862 struct target_ops *t;
1863
1864 for (t = current_target.beneath; t != NULL; t = t->beneath)
1865 if (t->to_flash_done != NULL)
1866 {
1867 if (targetdebug)
1868 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1869 t->to_flash_done (t);
1870 return;
1871 }
1872
1873 tcomplain ();
1874 }
1875
1876 static void
1877 show_trust_readonly (struct ui_file *file, int from_tty,
1878 struct cmd_list_element *c, const char *value)
1879 {
1880 fprintf_filtered (file,
1881 _("Mode for reading from readonly sections is %s.\n"),
1882 value);
1883 }
1884
1885 /* More generic transfers. */
1886
1887 static enum target_xfer_status
1888 default_xfer_partial (struct target_ops *ops, enum target_object object,
1889 const char *annex, gdb_byte *readbuf,
1890 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
1891 ULONGEST *xfered_len)
1892 {
1893 if (object == TARGET_OBJECT_MEMORY
1894 && ops->deprecated_xfer_memory != NULL)
1895 /* If available, fall back to the target's
1896 "deprecated_xfer_memory" method. */
1897 {
1898 int xfered = -1;
1899
1900 errno = 0;
1901 if (writebuf != NULL)
1902 {
1903 void *buffer = xmalloc (len);
1904 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1905
1906 memcpy (buffer, writebuf, len);
1907 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1908 1/*write*/, NULL, ops);
1909 do_cleanups (cleanup);
1910 }
1911 if (readbuf != NULL)
1912 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1913 0/*read*/, NULL, ops);
1914 if (xfered > 0)
1915 {
1916 *xfered_len = (ULONGEST) xfered;
1917 return TARGET_XFER_E_IO;
1918 }
1919 else if (xfered == 0 && errno == 0)
1920 /* "deprecated_xfer_memory" uses 0, cross checked against
1921 ERRNO as one indication of an error. */
1922 return TARGET_XFER_EOF;
1923 else
1924 return TARGET_XFER_E_IO;
1925 }
1926 else
1927 {
1928 gdb_assert (ops->beneath != NULL);
1929 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1930 readbuf, writebuf, offset, len,
1931 xfered_len);
1932 }
1933 }
1934
1935 /* Target vector read/write partial wrapper functions. */
1936
1937 static enum target_xfer_status
1938 target_read_partial (struct target_ops *ops,
1939 enum target_object object,
1940 const char *annex, gdb_byte *buf,
1941 ULONGEST offset, ULONGEST len,
1942 ULONGEST *xfered_len)
1943 {
1944 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1945 xfered_len);
1946 }
1947
1948 static enum target_xfer_status
1949 target_write_partial (struct target_ops *ops,
1950 enum target_object object,
1951 const char *annex, const gdb_byte *buf,
1952 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1953 {
1954 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1955 xfered_len);
1956 }
1957
1958 /* Wrappers to perform the full transfer. */
1959
1960 /* For docs on target_read see target.h. */
1961
1962 LONGEST
1963 target_read (struct target_ops *ops,
1964 enum target_object object,
1965 const char *annex, gdb_byte *buf,
1966 ULONGEST offset, LONGEST len)
1967 {
1968 LONGEST xfered = 0;
1969
1970 while (xfered < len)
1971 {
1972 ULONGEST xfered_len;
1973 enum target_xfer_status status;
1974
1975 status = target_read_partial (ops, object, annex,
1976 (gdb_byte *) buf + xfered,
1977 offset + xfered, len - xfered,
1978 &xfered_len);
1979
1980 /* Call an observer, notifying them of the xfer progress? */
1981 if (status == TARGET_XFER_EOF)
1982 return xfered;
1983 else if (status == TARGET_XFER_OK)
1984 {
1985 xfered += xfered_len;
1986 QUIT;
1987 }
1988 else
1989 return -1;
1990
1991 }
1992 return len;
1993 }
1994
1995 /* Assuming that the entire [begin, end) range of memory cannot be
1996 read, try to read whatever subrange is possible to read.
1997
1998 The function returns, in RESULT, either zero or one memory block.
1999 If there's a readable subrange at the beginning, it is completely
2000 read and returned. Any further readable subrange will not be read.
2001 Otherwise, if there's a readable subrange at the end, it will be
2002 completely read and returned. Any readable subranges before it
2003 (obviously, not starting at the beginning), will be ignored. In
2004 other cases -- either no readable subrange, or readable subrange(s)
2005 that is neither at the beginning, or end, nothing is returned.
2006
2007 The purpose of this function is to handle a read across a boundary
2008 of accessible memory in a case when memory map is not available.
2009 The above restrictions are fine for this case, but will give
2010 incorrect results if the memory is 'patchy'. However, supporting
2011 'patchy' memory would require trying to read every single byte,
2012 and it seems unacceptable solution. Explicit memory map is
2013 recommended for this case -- and target_read_memory_robust will
2014 take care of reading multiple ranges then. */
2015
2016 static void
2017 read_whatever_is_readable (struct target_ops *ops,
2018 ULONGEST begin, ULONGEST end,
2019 VEC(memory_read_result_s) **result)
2020 {
2021 gdb_byte *buf = xmalloc (end - begin);
2022 ULONGEST current_begin = begin;
2023 ULONGEST current_end = end;
2024 int forward;
2025 memory_read_result_s r;
2026 ULONGEST xfered_len;
2027
2028 /* If we previously failed to read 1 byte, nothing can be done here. */
2029 if (end - begin <= 1)
2030 {
2031 xfree (buf);
2032 return;
2033 }
2034
2035 /* Check that either first or the last byte is readable, and give up
2036 if not. This heuristic is meant to permit reading accessible memory
2037 at the boundary of accessible region. */
2038 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2039 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
2040 {
2041 forward = 1;
2042 ++current_begin;
2043 }
2044 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2045 buf + (end-begin) - 1, end - 1, 1,
2046 &xfered_len) == TARGET_XFER_OK)
2047 {
2048 forward = 0;
2049 --current_end;
2050 }
2051 else
2052 {
2053 xfree (buf);
2054 return;
2055 }
2056
2057 /* Loop invariant is that the [current_begin, current_end) was previously
2058 found to be not readable as a whole.
2059
2060 Note loop condition -- if the range has 1 byte, we can't divide the range
2061 so there's no point trying further. */
2062 while (current_end - current_begin > 1)
2063 {
2064 ULONGEST first_half_begin, first_half_end;
2065 ULONGEST second_half_begin, second_half_end;
2066 LONGEST xfer;
2067 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2068
2069 if (forward)
2070 {
2071 first_half_begin = current_begin;
2072 first_half_end = middle;
2073 second_half_begin = middle;
2074 second_half_end = current_end;
2075 }
2076 else
2077 {
2078 first_half_begin = middle;
2079 first_half_end = current_end;
2080 second_half_begin = current_begin;
2081 second_half_end = middle;
2082 }
2083
2084 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2085 buf + (first_half_begin - begin),
2086 first_half_begin,
2087 first_half_end - first_half_begin);
2088
2089 if (xfer == first_half_end - first_half_begin)
2090 {
2091 /* This half reads up fine. So, the error must be in the
2092 other half. */
2093 current_begin = second_half_begin;
2094 current_end = second_half_end;
2095 }
2096 else
2097 {
2098 /* This half is not readable. Because we've tried one byte, we
2099 know some part of this half if actually redable. Go to the next
2100 iteration to divide again and try to read.
2101
2102 We don't handle the other half, because this function only tries
2103 to read a single readable subrange. */
2104 current_begin = first_half_begin;
2105 current_end = first_half_end;
2106 }
2107 }
2108
2109 if (forward)
2110 {
2111 /* The [begin, current_begin) range has been read. */
2112 r.begin = begin;
2113 r.end = current_begin;
2114 r.data = buf;
2115 }
2116 else
2117 {
2118 /* The [current_end, end) range has been read. */
2119 LONGEST rlen = end - current_end;
2120
2121 r.data = xmalloc (rlen);
2122 memcpy (r.data, buf + current_end - begin, rlen);
2123 r.begin = current_end;
2124 r.end = end;
2125 xfree (buf);
2126 }
2127 VEC_safe_push(memory_read_result_s, (*result), &r);
2128 }
2129
2130 void
2131 free_memory_read_result_vector (void *x)
2132 {
2133 VEC(memory_read_result_s) *v = x;
2134 memory_read_result_s *current;
2135 int ix;
2136
2137 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2138 {
2139 xfree (current->data);
2140 }
2141 VEC_free (memory_read_result_s, v);
2142 }
2143
2144 VEC(memory_read_result_s) *
2145 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2146 {
2147 VEC(memory_read_result_s) *result = 0;
2148
2149 LONGEST xfered = 0;
2150 while (xfered < len)
2151 {
2152 struct mem_region *region = lookup_mem_region (offset + xfered);
2153 LONGEST rlen;
2154
2155 /* If there is no explicit region, a fake one should be created. */
2156 gdb_assert (region);
2157
2158 if (region->hi == 0)
2159 rlen = len - xfered;
2160 else
2161 rlen = region->hi - offset;
2162
2163 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2164 {
2165 /* Cannot read this region. Note that we can end up here only
2166 if the region is explicitly marked inaccessible, or
2167 'inaccessible-by-default' is in effect. */
2168 xfered += rlen;
2169 }
2170 else
2171 {
2172 LONGEST to_read = min (len - xfered, rlen);
2173 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2174
2175 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2176 (gdb_byte *) buffer,
2177 offset + xfered, to_read);
2178 /* Call an observer, notifying them of the xfer progress? */
2179 if (xfer <= 0)
2180 {
2181 /* Got an error reading full chunk. See if maybe we can read
2182 some subrange. */
2183 xfree (buffer);
2184 read_whatever_is_readable (ops, offset + xfered,
2185 offset + xfered + to_read, &result);
2186 xfered += to_read;
2187 }
2188 else
2189 {
2190 struct memory_read_result r;
2191 r.data = buffer;
2192 r.begin = offset + xfered;
2193 r.end = r.begin + xfer;
2194 VEC_safe_push (memory_read_result_s, result, &r);
2195 xfered += xfer;
2196 }
2197 QUIT;
2198 }
2199 }
2200 return result;
2201 }
2202
2203
2204 /* An alternative to target_write with progress callbacks. */
2205
2206 LONGEST
2207 target_write_with_progress (struct target_ops *ops,
2208 enum target_object object,
2209 const char *annex, const gdb_byte *buf,
2210 ULONGEST offset, LONGEST len,
2211 void (*progress) (ULONGEST, void *), void *baton)
2212 {
2213 LONGEST xfered = 0;
2214
2215 /* Give the progress callback a chance to set up. */
2216 if (progress)
2217 (*progress) (0, baton);
2218
2219 while (xfered < len)
2220 {
2221 ULONGEST xfered_len;
2222 enum target_xfer_status status;
2223
2224 status = target_write_partial (ops, object, annex,
2225 (gdb_byte *) buf + xfered,
2226 offset + xfered, len - xfered,
2227 &xfered_len);
2228
2229 if (status == TARGET_XFER_EOF)
2230 return xfered;
2231 if (TARGET_XFER_STATUS_ERROR_P (status))
2232 return -1;
2233
2234 gdb_assert (status == TARGET_XFER_OK);
2235 if (progress)
2236 (*progress) (xfered_len, baton);
2237
2238 xfered += xfered_len;
2239 QUIT;
2240 }
2241 return len;
2242 }
2243
2244 /* For docs on target_write see target.h. */
2245
2246 LONGEST
2247 target_write (struct target_ops *ops,
2248 enum target_object object,
2249 const char *annex, const gdb_byte *buf,
2250 ULONGEST offset, LONGEST len)
2251 {
2252 return target_write_with_progress (ops, object, annex, buf, offset, len,
2253 NULL, NULL);
2254 }
2255
2256 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2257 the size of the transferred data. PADDING additional bytes are
2258 available in *BUF_P. This is a helper function for
2259 target_read_alloc; see the declaration of that function for more
2260 information. */
2261
2262 static LONGEST
2263 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2264 const char *annex, gdb_byte **buf_p, int padding)
2265 {
2266 size_t buf_alloc, buf_pos;
2267 gdb_byte *buf;
2268
2269 /* This function does not have a length parameter; it reads the
2270 entire OBJECT). Also, it doesn't support objects fetched partly
2271 from one target and partly from another (in a different stratum,
2272 e.g. a core file and an executable). Both reasons make it
2273 unsuitable for reading memory. */
2274 gdb_assert (object != TARGET_OBJECT_MEMORY);
2275
2276 /* Start by reading up to 4K at a time. The target will throttle
2277 this number down if necessary. */
2278 buf_alloc = 4096;
2279 buf = xmalloc (buf_alloc);
2280 buf_pos = 0;
2281 while (1)
2282 {
2283 ULONGEST xfered_len;
2284 enum target_xfer_status status;
2285
2286 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2287 buf_pos, buf_alloc - buf_pos - padding,
2288 &xfered_len);
2289
2290 if (status == TARGET_XFER_EOF)
2291 {
2292 /* Read all there was. */
2293 if (buf_pos == 0)
2294 xfree (buf);
2295 else
2296 *buf_p = buf;
2297 return buf_pos;
2298 }
2299 else if (status != TARGET_XFER_OK)
2300 {
2301 /* An error occurred. */
2302 xfree (buf);
2303 return TARGET_XFER_E_IO;
2304 }
2305
2306 buf_pos += xfered_len;
2307
2308 /* If the buffer is filling up, expand it. */
2309 if (buf_alloc < buf_pos * 2)
2310 {
2311 buf_alloc *= 2;
2312 buf = xrealloc (buf, buf_alloc);
2313 }
2314
2315 QUIT;
2316 }
2317 }
2318
2319 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2320 the size of the transferred data. See the declaration in "target.h"
2321 function for more information about the return value. */
2322
2323 LONGEST
2324 target_read_alloc (struct target_ops *ops, enum target_object object,
2325 const char *annex, gdb_byte **buf_p)
2326 {
2327 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2328 }
2329
2330 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2331 returned as a string, allocated using xmalloc. If an error occurs
2332 or the transfer is unsupported, NULL is returned. Empty objects
2333 are returned as allocated but empty strings. A warning is issued
2334 if the result contains any embedded NUL bytes. */
2335
2336 char *
2337 target_read_stralloc (struct target_ops *ops, enum target_object object,
2338 const char *annex)
2339 {
2340 gdb_byte *buffer;
2341 char *bufstr;
2342 LONGEST i, transferred;
2343
2344 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2345 bufstr = (char *) buffer;
2346
2347 if (transferred < 0)
2348 return NULL;
2349
2350 if (transferred == 0)
2351 return xstrdup ("");
2352
2353 bufstr[transferred] = 0;
2354
2355 /* Check for embedded NUL bytes; but allow trailing NULs. */
2356 for (i = strlen (bufstr); i < transferred; i++)
2357 if (bufstr[i] != 0)
2358 {
2359 warning (_("target object %d, annex %s, "
2360 "contained unexpected null characters"),
2361 (int) object, annex ? annex : "(none)");
2362 break;
2363 }
2364
2365 return bufstr;
2366 }
2367
2368 /* Memory transfer methods. */
2369
2370 void
2371 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2372 LONGEST len)
2373 {
2374 /* This method is used to read from an alternate, non-current
2375 target. This read must bypass the overlay support (as symbols
2376 don't match this target), and GDB's internal cache (wrong cache
2377 for this target). */
2378 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2379 != len)
2380 memory_error (TARGET_XFER_E_IO, addr);
2381 }
2382
2383 ULONGEST
2384 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2385 int len, enum bfd_endian byte_order)
2386 {
2387 gdb_byte buf[sizeof (ULONGEST)];
2388
2389 gdb_assert (len <= sizeof (buf));
2390 get_target_memory (ops, addr, buf, len);
2391 return extract_unsigned_integer (buf, len, byte_order);
2392 }
2393
2394 /* See target.h. */
2395
2396 int
2397 target_insert_breakpoint (struct gdbarch *gdbarch,
2398 struct bp_target_info *bp_tgt)
2399 {
2400 if (!may_insert_breakpoints)
2401 {
2402 warning (_("May not insert breakpoints"));
2403 return 1;
2404 }
2405
2406 return current_target.to_insert_breakpoint (&current_target,
2407 gdbarch, bp_tgt);
2408 }
2409
2410 /* See target.h. */
2411
2412 int
2413 target_remove_breakpoint (struct gdbarch *gdbarch,
2414 struct bp_target_info *bp_tgt)
2415 {
2416 /* This is kind of a weird case to handle, but the permission might
2417 have been changed after breakpoints were inserted - in which case
2418 we should just take the user literally and assume that any
2419 breakpoints should be left in place. */
2420 if (!may_insert_breakpoints)
2421 {
2422 warning (_("May not remove breakpoints"));
2423 return 1;
2424 }
2425
2426 return current_target.to_remove_breakpoint (&current_target,
2427 gdbarch, bp_tgt);
2428 }
2429
2430 static void
2431 target_info (char *args, int from_tty)
2432 {
2433 struct target_ops *t;
2434 int has_all_mem = 0;
2435
2436 if (symfile_objfile != NULL)
2437 printf_unfiltered (_("Symbols from \"%s\".\n"),
2438 objfile_name (symfile_objfile));
2439
2440 for (t = target_stack; t != NULL; t = t->beneath)
2441 {
2442 if (!(*t->to_has_memory) (t))
2443 continue;
2444
2445 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2446 continue;
2447 if (has_all_mem)
2448 printf_unfiltered (_("\tWhile running this, "
2449 "GDB does not access memory from...\n"));
2450 printf_unfiltered ("%s:\n", t->to_longname);
2451 (t->to_files_info) (t);
2452 has_all_mem = (*t->to_has_all_memory) (t);
2453 }
2454 }
2455
2456 /* This function is called before any new inferior is created, e.g.
2457 by running a program, attaching, or connecting to a target.
2458 It cleans up any state from previous invocations which might
2459 change between runs. This is a subset of what target_preopen
2460 resets (things which might change between targets). */
2461
2462 void
2463 target_pre_inferior (int from_tty)
2464 {
2465 /* Clear out solib state. Otherwise the solib state of the previous
2466 inferior might have survived and is entirely wrong for the new
2467 target. This has been observed on GNU/Linux using glibc 2.3. How
2468 to reproduce:
2469
2470 bash$ ./foo&
2471 [1] 4711
2472 bash$ ./foo&
2473 [1] 4712
2474 bash$ gdb ./foo
2475 [...]
2476 (gdb) attach 4711
2477 (gdb) detach
2478 (gdb) attach 4712
2479 Cannot access memory at address 0xdeadbeef
2480 */
2481
2482 /* In some OSs, the shared library list is the same/global/shared
2483 across inferiors. If code is shared between processes, so are
2484 memory regions and features. */
2485 if (!gdbarch_has_global_solist (target_gdbarch ()))
2486 {
2487 no_shared_libraries (NULL, from_tty);
2488
2489 invalidate_target_mem_regions ();
2490
2491 target_clear_description ();
2492 }
2493
2494 agent_capability_invalidate ();
2495 }
2496
2497 /* Callback for iterate_over_inferiors. Gets rid of the given
2498 inferior. */
2499
2500 static int
2501 dispose_inferior (struct inferior *inf, void *args)
2502 {
2503 struct thread_info *thread;
2504
2505 thread = any_thread_of_process (inf->pid);
2506 if (thread)
2507 {
2508 switch_to_thread (thread->ptid);
2509
2510 /* Core inferiors actually should be detached, not killed. */
2511 if (target_has_execution)
2512 target_kill ();
2513 else
2514 target_detach (NULL, 0);
2515 }
2516
2517 return 0;
2518 }
2519
2520 /* This is to be called by the open routine before it does
2521 anything. */
2522
2523 void
2524 target_preopen (int from_tty)
2525 {
2526 dont_repeat ();
2527
2528 if (have_inferiors ())
2529 {
2530 if (!from_tty
2531 || !have_live_inferiors ()
2532 || query (_("A program is being debugged already. Kill it? ")))
2533 iterate_over_inferiors (dispose_inferior, NULL);
2534 else
2535 error (_("Program not killed."));
2536 }
2537
2538 /* Calling target_kill may remove the target from the stack. But if
2539 it doesn't (which seems like a win for UDI), remove it now. */
2540 /* Leave the exec target, though. The user may be switching from a
2541 live process to a core of the same program. */
2542 pop_all_targets_above (file_stratum);
2543
2544 target_pre_inferior (from_tty);
2545 }
2546
2547 /* Detach a target after doing deferred register stores. */
2548
2549 void
2550 target_detach (const char *args, int from_tty)
2551 {
2552 struct target_ops* t;
2553
2554 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2555 /* Don't remove global breakpoints here. They're removed on
2556 disconnection from the target. */
2557 ;
2558 else
2559 /* If we're in breakpoints-always-inserted mode, have to remove
2560 them before detaching. */
2561 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2562
2563 prepare_for_detach ();
2564
2565 current_target.to_detach (&current_target, args, from_tty);
2566 if (targetdebug)
2567 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2568 args, from_tty);
2569 }
2570
2571 void
2572 target_disconnect (char *args, int from_tty)
2573 {
2574 struct target_ops *t;
2575
2576 /* If we're in breakpoints-always-inserted mode or if breakpoints
2577 are global across processes, we have to remove them before
2578 disconnecting. */
2579 remove_breakpoints ();
2580
2581 for (t = current_target.beneath; t != NULL; t = t->beneath)
2582 if (t->to_disconnect != NULL)
2583 {
2584 if (targetdebug)
2585 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2586 args, from_tty);
2587 t->to_disconnect (t, args, from_tty);
2588 return;
2589 }
2590
2591 tcomplain ();
2592 }
2593
2594 ptid_t
2595 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2596 {
2597 struct target_ops *t;
2598 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2599 status, options);
2600
2601 if (targetdebug)
2602 {
2603 char *status_string;
2604 char *options_string;
2605
2606 status_string = target_waitstatus_to_string (status);
2607 options_string = target_options_to_string (options);
2608 fprintf_unfiltered (gdb_stdlog,
2609 "target_wait (%d, status, options={%s})"
2610 " = %d, %s\n",
2611 ptid_get_pid (ptid), options_string,
2612 ptid_get_pid (retval), status_string);
2613 xfree (status_string);
2614 xfree (options_string);
2615 }
2616
2617 return retval;
2618 }
2619
2620 char *
2621 target_pid_to_str (ptid_t ptid)
2622 {
2623 struct target_ops *t;
2624
2625 for (t = current_target.beneath; t != NULL; t = t->beneath)
2626 {
2627 if (t->to_pid_to_str != NULL)
2628 return (*t->to_pid_to_str) (t, ptid);
2629 }
2630
2631 return normal_pid_to_str (ptid);
2632 }
2633
2634 char *
2635 target_thread_name (struct thread_info *info)
2636 {
2637 return current_target.to_thread_name (&current_target, info);
2638 }
2639
2640 void
2641 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2642 {
2643 struct target_ops *t;
2644
2645 target_dcache_invalidate ();
2646
2647 current_target.to_resume (&current_target, ptid, step, signal);
2648 if (targetdebug)
2649 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2650 ptid_get_pid (ptid),
2651 step ? "step" : "continue",
2652 gdb_signal_to_name (signal));
2653
2654 registers_changed_ptid (ptid);
2655 set_executing (ptid, 1);
2656 set_running (ptid, 1);
2657 clear_inline_frame_state (ptid);
2658 }
2659
2660 void
2661 target_pass_signals (int numsigs, unsigned char *pass_signals)
2662 {
2663 struct target_ops *t;
2664
2665 for (t = current_target.beneath; t != NULL; t = t->beneath)
2666 {
2667 if (t->to_pass_signals != NULL)
2668 {
2669 if (targetdebug)
2670 {
2671 int i;
2672
2673 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2674 numsigs);
2675
2676 for (i = 0; i < numsigs; i++)
2677 if (pass_signals[i])
2678 fprintf_unfiltered (gdb_stdlog, " %s",
2679 gdb_signal_to_name (i));
2680
2681 fprintf_unfiltered (gdb_stdlog, " })\n");
2682 }
2683
2684 (*t->to_pass_signals) (t, numsigs, pass_signals);
2685 return;
2686 }
2687 }
2688 }
2689
2690 void
2691 target_program_signals (int numsigs, unsigned char *program_signals)
2692 {
2693 struct target_ops *t;
2694
2695 for (t = current_target.beneath; t != NULL; t = t->beneath)
2696 {
2697 if (t->to_program_signals != NULL)
2698 {
2699 if (targetdebug)
2700 {
2701 int i;
2702
2703 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2704 numsigs);
2705
2706 for (i = 0; i < numsigs; i++)
2707 if (program_signals[i])
2708 fprintf_unfiltered (gdb_stdlog, " %s",
2709 gdb_signal_to_name (i));
2710
2711 fprintf_unfiltered (gdb_stdlog, " })\n");
2712 }
2713
2714 (*t->to_program_signals) (t, numsigs, program_signals);
2715 return;
2716 }
2717 }
2718 }
2719
2720 /* Look through the list of possible targets for a target that can
2721 follow forks. */
2722
2723 int
2724 target_follow_fork (int follow_child, int detach_fork)
2725 {
2726 struct target_ops *t;
2727
2728 for (t = current_target.beneath; t != NULL; t = t->beneath)
2729 {
2730 if (t->to_follow_fork != NULL)
2731 {
2732 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2733
2734 if (targetdebug)
2735 fprintf_unfiltered (gdb_stdlog,
2736 "target_follow_fork (%d, %d) = %d\n",
2737 follow_child, detach_fork, retval);
2738 return retval;
2739 }
2740 }
2741
2742 /* Some target returned a fork event, but did not know how to follow it. */
2743 internal_error (__FILE__, __LINE__,
2744 _("could not find a target to follow fork"));
2745 }
2746
2747 void
2748 target_mourn_inferior (void)
2749 {
2750 struct target_ops *t;
2751
2752 for (t = current_target.beneath; t != NULL; t = t->beneath)
2753 {
2754 if (t->to_mourn_inferior != NULL)
2755 {
2756 t->to_mourn_inferior (t);
2757 if (targetdebug)
2758 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2759
2760 /* We no longer need to keep handles on any of the object files.
2761 Make sure to release them to avoid unnecessarily locking any
2762 of them while we're not actually debugging. */
2763 bfd_cache_close_all ();
2764
2765 return;
2766 }
2767 }
2768
2769 internal_error (__FILE__, __LINE__,
2770 _("could not find a target to follow mourn inferior"));
2771 }
2772
2773 /* Look for a target which can describe architectural features, starting
2774 from TARGET. If we find one, return its description. */
2775
2776 const struct target_desc *
2777 target_read_description (struct target_ops *target)
2778 {
2779 struct target_ops *t;
2780
2781 for (t = target; t != NULL; t = t->beneath)
2782 if (t->to_read_description != NULL)
2783 {
2784 const struct target_desc *tdesc;
2785
2786 tdesc = t->to_read_description (t);
2787 if (tdesc)
2788 return tdesc;
2789 }
2790
2791 return NULL;
2792 }
2793
2794 /* The default implementation of to_search_memory.
2795 This implements a basic search of memory, reading target memory and
2796 performing the search here (as opposed to performing the search in on the
2797 target side with, for example, gdbserver). */
2798
2799 int
2800 simple_search_memory (struct target_ops *ops,
2801 CORE_ADDR start_addr, ULONGEST search_space_len,
2802 const gdb_byte *pattern, ULONGEST pattern_len,
2803 CORE_ADDR *found_addrp)
2804 {
2805 /* NOTE: also defined in find.c testcase. */
2806 #define SEARCH_CHUNK_SIZE 16000
2807 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2808 /* Buffer to hold memory contents for searching. */
2809 gdb_byte *search_buf;
2810 unsigned search_buf_size;
2811 struct cleanup *old_cleanups;
2812
2813 search_buf_size = chunk_size + pattern_len - 1;
2814
2815 /* No point in trying to allocate a buffer larger than the search space. */
2816 if (search_space_len < search_buf_size)
2817 search_buf_size = search_space_len;
2818
2819 search_buf = malloc (search_buf_size);
2820 if (search_buf == NULL)
2821 error (_("Unable to allocate memory to perform the search."));
2822 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2823
2824 /* Prime the search buffer. */
2825
2826 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2827 search_buf, start_addr, search_buf_size) != search_buf_size)
2828 {
2829 warning (_("Unable to access %s bytes of target "
2830 "memory at %s, halting search."),
2831 pulongest (search_buf_size), hex_string (start_addr));
2832 do_cleanups (old_cleanups);
2833 return -1;
2834 }
2835
2836 /* Perform the search.
2837
2838 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2839 When we've scanned N bytes we copy the trailing bytes to the start and
2840 read in another N bytes. */
2841
2842 while (search_space_len >= pattern_len)
2843 {
2844 gdb_byte *found_ptr;
2845 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2846
2847 found_ptr = memmem (search_buf, nr_search_bytes,
2848 pattern, pattern_len);
2849
2850 if (found_ptr != NULL)
2851 {
2852 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2853
2854 *found_addrp = found_addr;
2855 do_cleanups (old_cleanups);
2856 return 1;
2857 }
2858
2859 /* Not found in this chunk, skip to next chunk. */
2860
2861 /* Don't let search_space_len wrap here, it's unsigned. */
2862 if (search_space_len >= chunk_size)
2863 search_space_len -= chunk_size;
2864 else
2865 search_space_len = 0;
2866
2867 if (search_space_len >= pattern_len)
2868 {
2869 unsigned keep_len = search_buf_size - chunk_size;
2870 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2871 int nr_to_read;
2872
2873 /* Copy the trailing part of the previous iteration to the front
2874 of the buffer for the next iteration. */
2875 gdb_assert (keep_len == pattern_len - 1);
2876 memcpy (search_buf, search_buf + chunk_size, keep_len);
2877
2878 nr_to_read = min (search_space_len - keep_len, chunk_size);
2879
2880 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2881 search_buf + keep_len, read_addr,
2882 nr_to_read) != nr_to_read)
2883 {
2884 warning (_("Unable to access %s bytes of target "
2885 "memory at %s, halting search."),
2886 plongest (nr_to_read),
2887 hex_string (read_addr));
2888 do_cleanups (old_cleanups);
2889 return -1;
2890 }
2891
2892 start_addr += chunk_size;
2893 }
2894 }
2895
2896 /* Not found. */
2897
2898 do_cleanups (old_cleanups);
2899 return 0;
2900 }
2901
2902 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2903 sequence of bytes in PATTERN with length PATTERN_LEN.
2904
2905 The result is 1 if found, 0 if not found, and -1 if there was an error
2906 requiring halting of the search (e.g. memory read error).
2907 If the pattern is found the address is recorded in FOUND_ADDRP. */
2908
2909 int
2910 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2911 const gdb_byte *pattern, ULONGEST pattern_len,
2912 CORE_ADDR *found_addrp)
2913 {
2914 struct target_ops *t;
2915 int found;
2916
2917 /* We don't use INHERIT to set current_target.to_search_memory,
2918 so we have to scan the target stack and handle targetdebug
2919 ourselves. */
2920
2921 if (targetdebug)
2922 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2923 hex_string (start_addr));
2924
2925 for (t = current_target.beneath; t != NULL; t = t->beneath)
2926 if (t->to_search_memory != NULL)
2927 break;
2928
2929 if (t != NULL)
2930 {
2931 found = t->to_search_memory (t, start_addr, search_space_len,
2932 pattern, pattern_len, found_addrp);
2933 }
2934 else
2935 {
2936 /* If a special version of to_search_memory isn't available, use the
2937 simple version. */
2938 found = simple_search_memory (current_target.beneath,
2939 start_addr, search_space_len,
2940 pattern, pattern_len, found_addrp);
2941 }
2942
2943 if (targetdebug)
2944 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2945
2946 return found;
2947 }
2948
2949 /* Look through the currently pushed targets. If none of them will
2950 be able to restart the currently running process, issue an error
2951 message. */
2952
2953 void
2954 target_require_runnable (void)
2955 {
2956 struct target_ops *t;
2957
2958 for (t = target_stack; t != NULL; t = t->beneath)
2959 {
2960 /* If this target knows how to create a new program, then
2961 assume we will still be able to after killing the current
2962 one. Either killing and mourning will not pop T, or else
2963 find_default_run_target will find it again. */
2964 if (t->to_create_inferior != NULL)
2965 return;
2966
2967 /* Do not worry about thread_stratum targets that can not
2968 create inferiors. Assume they will be pushed again if
2969 necessary, and continue to the process_stratum. */
2970 if (t->to_stratum == thread_stratum
2971 || t->to_stratum == arch_stratum)
2972 continue;
2973
2974 error (_("The \"%s\" target does not support \"run\". "
2975 "Try \"help target\" or \"continue\"."),
2976 t->to_shortname);
2977 }
2978
2979 /* This function is only called if the target is running. In that
2980 case there should have been a process_stratum target and it
2981 should either know how to create inferiors, or not... */
2982 internal_error (__FILE__, __LINE__, _("No targets found"));
2983 }
2984
2985 /* Look through the list of possible targets for a target that can
2986 execute a run or attach command without any other data. This is
2987 used to locate the default process stratum.
2988
2989 If DO_MESG is not NULL, the result is always valid (error() is
2990 called for errors); else, return NULL on error. */
2991
2992 static struct target_ops *
2993 find_default_run_target (char *do_mesg)
2994 {
2995 struct target_ops **t;
2996 struct target_ops *runable = NULL;
2997 int count;
2998
2999 count = 0;
3000
3001 for (t = target_structs; t < target_structs + target_struct_size;
3002 ++t)
3003 {
3004 if ((*t)->to_can_run && target_can_run (*t))
3005 {
3006 runable = *t;
3007 ++count;
3008 }
3009 }
3010
3011 if (count != 1)
3012 {
3013 if (do_mesg)
3014 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3015 else
3016 return NULL;
3017 }
3018
3019 return runable;
3020 }
3021
3022 void
3023 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3024 {
3025 struct target_ops *t;
3026
3027 t = find_default_run_target ("attach");
3028 (t->to_attach) (t, args, from_tty);
3029 return;
3030 }
3031
3032 void
3033 find_default_create_inferior (struct target_ops *ops,
3034 char *exec_file, char *allargs, char **env,
3035 int from_tty)
3036 {
3037 struct target_ops *t;
3038
3039 t = find_default_run_target ("run");
3040 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3041 return;
3042 }
3043
3044 static int
3045 find_default_can_async_p (struct target_ops *ignore)
3046 {
3047 struct target_ops *t;
3048
3049 /* This may be called before the target is pushed on the stack;
3050 look for the default process stratum. If there's none, gdb isn't
3051 configured with a native debugger, and target remote isn't
3052 connected yet. */
3053 t = find_default_run_target (NULL);
3054 if (t && t->to_can_async_p != delegate_can_async_p)
3055 return (t->to_can_async_p) (t);
3056 return 0;
3057 }
3058
3059 static int
3060 find_default_is_async_p (struct target_ops *ignore)
3061 {
3062 struct target_ops *t;
3063
3064 /* This may be called before the target is pushed on the stack;
3065 look for the default process stratum. If there's none, gdb isn't
3066 configured with a native debugger, and target remote isn't
3067 connected yet. */
3068 t = find_default_run_target (NULL);
3069 if (t && t->to_is_async_p != delegate_is_async_p)
3070 return (t->to_is_async_p) (t);
3071 return 0;
3072 }
3073
3074 static int
3075 find_default_supports_non_stop (struct target_ops *self)
3076 {
3077 struct target_ops *t;
3078
3079 t = find_default_run_target (NULL);
3080 if (t && t->to_supports_non_stop)
3081 return (t->to_supports_non_stop) (t);
3082 return 0;
3083 }
3084
3085 int
3086 target_supports_non_stop (void)
3087 {
3088 struct target_ops *t;
3089
3090 for (t = &current_target; t != NULL; t = t->beneath)
3091 if (t->to_supports_non_stop)
3092 return t->to_supports_non_stop (t);
3093
3094 return 0;
3095 }
3096
3097 /* Implement the "info proc" command. */
3098
3099 int
3100 target_info_proc (char *args, enum info_proc_what what)
3101 {
3102 struct target_ops *t;
3103
3104 /* If we're already connected to something that can get us OS
3105 related data, use it. Otherwise, try using the native
3106 target. */
3107 if (current_target.to_stratum >= process_stratum)
3108 t = current_target.beneath;
3109 else
3110 t = find_default_run_target (NULL);
3111
3112 for (; t != NULL; t = t->beneath)
3113 {
3114 if (t->to_info_proc != NULL)
3115 {
3116 t->to_info_proc (t, args, what);
3117
3118 if (targetdebug)
3119 fprintf_unfiltered (gdb_stdlog,
3120 "target_info_proc (\"%s\", %d)\n", args, what);
3121
3122 return 1;
3123 }
3124 }
3125
3126 return 0;
3127 }
3128
3129 static int
3130 find_default_supports_disable_randomization (struct target_ops *self)
3131 {
3132 struct target_ops *t;
3133
3134 t = find_default_run_target (NULL);
3135 if (t && t->to_supports_disable_randomization)
3136 return (t->to_supports_disable_randomization) (t);
3137 return 0;
3138 }
3139
3140 int
3141 target_supports_disable_randomization (void)
3142 {
3143 struct target_ops *t;
3144
3145 for (t = &current_target; t != NULL; t = t->beneath)
3146 if (t->to_supports_disable_randomization)
3147 return t->to_supports_disable_randomization (t);
3148
3149 return 0;
3150 }
3151
3152 char *
3153 target_get_osdata (const char *type)
3154 {
3155 struct target_ops *t;
3156
3157 /* If we're already connected to something that can get us OS
3158 related data, use it. Otherwise, try using the native
3159 target. */
3160 if (current_target.to_stratum >= process_stratum)
3161 t = current_target.beneath;
3162 else
3163 t = find_default_run_target ("get OS data");
3164
3165 if (!t)
3166 return NULL;
3167
3168 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3169 }
3170
3171 /* Determine the current address space of thread PTID. */
3172
3173 struct address_space *
3174 target_thread_address_space (ptid_t ptid)
3175 {
3176 struct address_space *aspace;
3177 struct inferior *inf;
3178 struct target_ops *t;
3179
3180 for (t = current_target.beneath; t != NULL; t = t->beneath)
3181 {
3182 if (t->to_thread_address_space != NULL)
3183 {
3184 aspace = t->to_thread_address_space (t, ptid);
3185 gdb_assert (aspace);
3186
3187 if (targetdebug)
3188 fprintf_unfiltered (gdb_stdlog,
3189 "target_thread_address_space (%s) = %d\n",
3190 target_pid_to_str (ptid),
3191 address_space_num (aspace));
3192 return aspace;
3193 }
3194 }
3195
3196 /* Fall-back to the "main" address space of the inferior. */
3197 inf = find_inferior_pid (ptid_get_pid (ptid));
3198
3199 if (inf == NULL || inf->aspace == NULL)
3200 internal_error (__FILE__, __LINE__,
3201 _("Can't determine the current "
3202 "address space of thread %s\n"),
3203 target_pid_to_str (ptid));
3204
3205 return inf->aspace;
3206 }
3207
3208
3209 /* Target file operations. */
3210
3211 static struct target_ops *
3212 default_fileio_target (void)
3213 {
3214 /* If we're already connected to something that can perform
3215 file I/O, use it. Otherwise, try using the native target. */
3216 if (current_target.to_stratum >= process_stratum)
3217 return current_target.beneath;
3218 else
3219 return find_default_run_target ("file I/O");
3220 }
3221
3222 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3223 target file descriptor, or -1 if an error occurs (and set
3224 *TARGET_ERRNO). */
3225 int
3226 target_fileio_open (const char *filename, int flags, int mode,
3227 int *target_errno)
3228 {
3229 struct target_ops *t;
3230
3231 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3232 {
3233 if (t->to_fileio_open != NULL)
3234 {
3235 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
3236
3237 if (targetdebug)
3238 fprintf_unfiltered (gdb_stdlog,
3239 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3240 filename, flags, mode,
3241 fd, fd != -1 ? 0 : *target_errno);
3242 return fd;
3243 }
3244 }
3245
3246 *target_errno = FILEIO_ENOSYS;
3247 return -1;
3248 }
3249
3250 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3251 Return the number of bytes written, or -1 if an error occurs
3252 (and set *TARGET_ERRNO). */
3253 int
3254 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3255 ULONGEST offset, int *target_errno)
3256 {
3257 struct target_ops *t;
3258
3259 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3260 {
3261 if (t->to_fileio_pwrite != NULL)
3262 {
3263 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
3264 target_errno);
3265
3266 if (targetdebug)
3267 fprintf_unfiltered (gdb_stdlog,
3268 "target_fileio_pwrite (%d,...,%d,%s) "
3269 "= %d (%d)\n",
3270 fd, len, pulongest (offset),
3271 ret, ret != -1 ? 0 : *target_errno);
3272 return ret;
3273 }
3274 }
3275
3276 *target_errno = FILEIO_ENOSYS;
3277 return -1;
3278 }
3279
3280 /* Read up to LEN bytes FD on the target into READ_BUF.
3281 Return the number of bytes read, or -1 if an error occurs
3282 (and set *TARGET_ERRNO). */
3283 int
3284 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3285 ULONGEST offset, int *target_errno)
3286 {
3287 struct target_ops *t;
3288
3289 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3290 {
3291 if (t->to_fileio_pread != NULL)
3292 {
3293 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3294 target_errno);
3295
3296 if (targetdebug)
3297 fprintf_unfiltered (gdb_stdlog,
3298 "target_fileio_pread (%d,...,%d,%s) "
3299 "= %d (%d)\n",
3300 fd, len, pulongest (offset),
3301 ret, ret != -1 ? 0 : *target_errno);
3302 return ret;
3303 }
3304 }
3305
3306 *target_errno = FILEIO_ENOSYS;
3307 return -1;
3308 }
3309
3310 /* Close FD on the target. Return 0, or -1 if an error occurs
3311 (and set *TARGET_ERRNO). */
3312 int
3313 target_fileio_close (int fd, int *target_errno)
3314 {
3315 struct target_ops *t;
3316
3317 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3318 {
3319 if (t->to_fileio_close != NULL)
3320 {
3321 int ret = t->to_fileio_close (t, fd, target_errno);
3322
3323 if (targetdebug)
3324 fprintf_unfiltered (gdb_stdlog,
3325 "target_fileio_close (%d) = %d (%d)\n",
3326 fd, ret, ret != -1 ? 0 : *target_errno);
3327 return ret;
3328 }
3329 }
3330
3331 *target_errno = FILEIO_ENOSYS;
3332 return -1;
3333 }
3334
3335 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3336 occurs (and set *TARGET_ERRNO). */
3337 int
3338 target_fileio_unlink (const char *filename, int *target_errno)
3339 {
3340 struct target_ops *t;
3341
3342 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3343 {
3344 if (t->to_fileio_unlink != NULL)
3345 {
3346 int ret = t->to_fileio_unlink (t, filename, target_errno);
3347
3348 if (targetdebug)
3349 fprintf_unfiltered (gdb_stdlog,
3350 "target_fileio_unlink (%s) = %d (%d)\n",
3351 filename, ret, ret != -1 ? 0 : *target_errno);
3352 return ret;
3353 }
3354 }
3355
3356 *target_errno = FILEIO_ENOSYS;
3357 return -1;
3358 }
3359
3360 /* Read value of symbolic link FILENAME on the target. Return a
3361 null-terminated string allocated via xmalloc, or NULL if an error
3362 occurs (and set *TARGET_ERRNO). */
3363 char *
3364 target_fileio_readlink (const char *filename, int *target_errno)
3365 {
3366 struct target_ops *t;
3367
3368 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3369 {
3370 if (t->to_fileio_readlink != NULL)
3371 {
3372 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3373
3374 if (targetdebug)
3375 fprintf_unfiltered (gdb_stdlog,
3376 "target_fileio_readlink (%s) = %s (%d)\n",
3377 filename, ret? ret : "(nil)",
3378 ret? 0 : *target_errno);
3379 return ret;
3380 }
3381 }
3382
3383 *target_errno = FILEIO_ENOSYS;
3384 return NULL;
3385 }
3386
3387 static void
3388 target_fileio_close_cleanup (void *opaque)
3389 {
3390 int fd = *(int *) opaque;
3391 int target_errno;
3392
3393 target_fileio_close (fd, &target_errno);
3394 }
3395
3396 /* Read target file FILENAME. Store the result in *BUF_P and
3397 return the size of the transferred data. PADDING additional bytes are
3398 available in *BUF_P. This is a helper function for
3399 target_fileio_read_alloc; see the declaration of that function for more
3400 information. */
3401
3402 static LONGEST
3403 target_fileio_read_alloc_1 (const char *filename,
3404 gdb_byte **buf_p, int padding)
3405 {
3406 struct cleanup *close_cleanup;
3407 size_t buf_alloc, buf_pos;
3408 gdb_byte *buf;
3409 LONGEST n;
3410 int fd;
3411 int target_errno;
3412
3413 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3414 if (fd == -1)
3415 return -1;
3416
3417 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3418
3419 /* Start by reading up to 4K at a time. The target will throttle
3420 this number down if necessary. */
3421 buf_alloc = 4096;
3422 buf = xmalloc (buf_alloc);
3423 buf_pos = 0;
3424 while (1)
3425 {
3426 n = target_fileio_pread (fd, &buf[buf_pos],
3427 buf_alloc - buf_pos - padding, buf_pos,
3428 &target_errno);
3429 if (n < 0)
3430 {
3431 /* An error occurred. */
3432 do_cleanups (close_cleanup);
3433 xfree (buf);
3434 return -1;
3435 }
3436 else if (n == 0)
3437 {
3438 /* Read all there was. */
3439 do_cleanups (close_cleanup);
3440 if (buf_pos == 0)
3441 xfree (buf);
3442 else
3443 *buf_p = buf;
3444 return buf_pos;
3445 }
3446
3447 buf_pos += n;
3448
3449 /* If the buffer is filling up, expand it. */
3450 if (buf_alloc < buf_pos * 2)
3451 {
3452 buf_alloc *= 2;
3453 buf = xrealloc (buf, buf_alloc);
3454 }
3455
3456 QUIT;
3457 }
3458 }
3459
3460 /* Read target file FILENAME. Store the result in *BUF_P and return
3461 the size of the transferred data. See the declaration in "target.h"
3462 function for more information about the return value. */
3463
3464 LONGEST
3465 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3466 {
3467 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3468 }
3469
3470 /* Read target file FILENAME. The result is NUL-terminated and
3471 returned as a string, allocated using xmalloc. If an error occurs
3472 or the transfer is unsupported, NULL is returned. Empty objects
3473 are returned as allocated but empty strings. A warning is issued
3474 if the result contains any embedded NUL bytes. */
3475
3476 char *
3477 target_fileio_read_stralloc (const char *filename)
3478 {
3479 gdb_byte *buffer;
3480 char *bufstr;
3481 LONGEST i, transferred;
3482
3483 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3484 bufstr = (char *) buffer;
3485
3486 if (transferred < 0)
3487 return NULL;
3488
3489 if (transferred == 0)
3490 return xstrdup ("");
3491
3492 bufstr[transferred] = 0;
3493
3494 /* Check for embedded NUL bytes; but allow trailing NULs. */
3495 for (i = strlen (bufstr); i < transferred; i++)
3496 if (bufstr[i] != 0)
3497 {
3498 warning (_("target file %s "
3499 "contained unexpected null characters"),
3500 filename);
3501 break;
3502 }
3503
3504 return bufstr;
3505 }
3506
3507
3508 static int
3509 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3510 CORE_ADDR addr, int len)
3511 {
3512 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3513 }
3514
3515 static int
3516 default_watchpoint_addr_within_range (struct target_ops *target,
3517 CORE_ADDR addr,
3518 CORE_ADDR start, int length)
3519 {
3520 return addr >= start && addr < start + length;
3521 }
3522
3523 static struct gdbarch *
3524 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3525 {
3526 return target_gdbarch ();
3527 }
3528
3529 static int
3530 return_zero (void)
3531 {
3532 return 0;
3533 }
3534
3535 static int
3536 return_minus_one (void)
3537 {
3538 return -1;
3539 }
3540
3541 static void *
3542 return_null (void)
3543 {
3544 return 0;
3545 }
3546
3547 /*
3548 * Find the next target down the stack from the specified target.
3549 */
3550
3551 struct target_ops *
3552 find_target_beneath (struct target_ops *t)
3553 {
3554 return t->beneath;
3555 }
3556
3557 /* See target.h. */
3558
3559 struct target_ops *
3560 find_target_at (enum strata stratum)
3561 {
3562 struct target_ops *t;
3563
3564 for (t = current_target.beneath; t != NULL; t = t->beneath)
3565 if (t->to_stratum == stratum)
3566 return t;
3567
3568 return NULL;
3569 }
3570
3571 \f
3572 /* The inferior process has died. Long live the inferior! */
3573
3574 void
3575 generic_mourn_inferior (void)
3576 {
3577 ptid_t ptid;
3578
3579 ptid = inferior_ptid;
3580 inferior_ptid = null_ptid;
3581
3582 /* Mark breakpoints uninserted in case something tries to delete a
3583 breakpoint while we delete the inferior's threads (which would
3584 fail, since the inferior is long gone). */
3585 mark_breakpoints_out ();
3586
3587 if (!ptid_equal (ptid, null_ptid))
3588 {
3589 int pid = ptid_get_pid (ptid);
3590 exit_inferior (pid);
3591 }
3592
3593 /* Note this wipes step-resume breakpoints, so needs to be done
3594 after exit_inferior, which ends up referencing the step-resume
3595 breakpoints through clear_thread_inferior_resources. */
3596 breakpoint_init_inferior (inf_exited);
3597
3598 registers_changed ();
3599
3600 reopen_exec_file ();
3601 reinit_frame_cache ();
3602
3603 if (deprecated_detach_hook)
3604 deprecated_detach_hook ();
3605 }
3606 \f
3607 /* Convert a normal process ID to a string. Returns the string in a
3608 static buffer. */
3609
3610 char *
3611 normal_pid_to_str (ptid_t ptid)
3612 {
3613 static char buf[32];
3614
3615 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3616 return buf;
3617 }
3618
3619 static char *
3620 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3621 {
3622 return normal_pid_to_str (ptid);
3623 }
3624
3625 /* Error-catcher for target_find_memory_regions. */
3626 static int
3627 dummy_find_memory_regions (struct target_ops *self,
3628 find_memory_region_ftype ignore1, void *ignore2)
3629 {
3630 error (_("Command not implemented for this target."));
3631 return 0;
3632 }
3633
3634 /* Error-catcher for target_make_corefile_notes. */
3635 static char *
3636 dummy_make_corefile_notes (struct target_ops *self,
3637 bfd *ignore1, int *ignore2)
3638 {
3639 error (_("Command not implemented for this target."));
3640 return NULL;
3641 }
3642
3643 /* Set up the handful of non-empty slots needed by the dummy target
3644 vector. */
3645
3646 static void
3647 init_dummy_target (void)
3648 {
3649 dummy_target.to_shortname = "None";
3650 dummy_target.to_longname = "None";
3651 dummy_target.to_doc = "";
3652 dummy_target.to_create_inferior = find_default_create_inferior;
3653 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3654 dummy_target.to_supports_disable_randomization
3655 = find_default_supports_disable_randomization;
3656 dummy_target.to_pid_to_str = dummy_pid_to_str;
3657 dummy_target.to_stratum = dummy_stratum;
3658 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3659 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3660 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3661 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3662 dummy_target.to_has_execution
3663 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3664 dummy_target.to_magic = OPS_MAGIC;
3665
3666 install_dummy_methods (&dummy_target);
3667 }
3668 \f
3669 static void
3670 debug_to_open (char *args, int from_tty)
3671 {
3672 debug_target.to_open (args, from_tty);
3673
3674 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3675 }
3676
3677 void
3678 target_close (struct target_ops *targ)
3679 {
3680 gdb_assert (!target_is_pushed (targ));
3681
3682 if (targ->to_xclose != NULL)
3683 targ->to_xclose (targ);
3684 else if (targ->to_close != NULL)
3685 targ->to_close (targ);
3686
3687 if (targetdebug)
3688 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3689 }
3690
3691 void
3692 target_attach (char *args, int from_tty)
3693 {
3694 current_target.to_attach (&current_target, args, from_tty);
3695 if (targetdebug)
3696 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3697 args, from_tty);
3698 }
3699
3700 int
3701 target_thread_alive (ptid_t ptid)
3702 {
3703 struct target_ops *t;
3704
3705 for (t = current_target.beneath; t != NULL; t = t->beneath)
3706 {
3707 if (t->to_thread_alive != NULL)
3708 {
3709 int retval;
3710
3711 retval = t->to_thread_alive (t, ptid);
3712 if (targetdebug)
3713 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3714 ptid_get_pid (ptid), retval);
3715
3716 return retval;
3717 }
3718 }
3719
3720 return 0;
3721 }
3722
3723 void
3724 target_find_new_threads (void)
3725 {
3726 struct target_ops *t;
3727
3728 for (t = current_target.beneath; t != NULL; t = t->beneath)
3729 {
3730 if (t->to_find_new_threads != NULL)
3731 {
3732 t->to_find_new_threads (t);
3733 if (targetdebug)
3734 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3735
3736 return;
3737 }
3738 }
3739 }
3740
3741 void
3742 target_stop (ptid_t ptid)
3743 {
3744 if (!may_stop)
3745 {
3746 warning (_("May not interrupt or stop the target, ignoring attempt"));
3747 return;
3748 }
3749
3750 (*current_target.to_stop) (&current_target, ptid);
3751 }
3752
3753 static void
3754 debug_to_post_attach (struct target_ops *self, int pid)
3755 {
3756 debug_target.to_post_attach (&debug_target, pid);
3757
3758 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3759 }
3760
3761 /* Concatenate ELEM to LIST, a comma separate list, and return the
3762 result. The LIST incoming argument is released. */
3763
3764 static char *
3765 str_comma_list_concat_elem (char *list, const char *elem)
3766 {
3767 if (list == NULL)
3768 return xstrdup (elem);
3769 else
3770 return reconcat (list, list, ", ", elem, (char *) NULL);
3771 }
3772
3773 /* Helper for target_options_to_string. If OPT is present in
3774 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3775 Returns the new resulting string. OPT is removed from
3776 TARGET_OPTIONS. */
3777
3778 static char *
3779 do_option (int *target_options, char *ret,
3780 int opt, char *opt_str)
3781 {
3782 if ((*target_options & opt) != 0)
3783 {
3784 ret = str_comma_list_concat_elem (ret, opt_str);
3785 *target_options &= ~opt;
3786 }
3787
3788 return ret;
3789 }
3790
3791 char *
3792 target_options_to_string (int target_options)
3793 {
3794 char *ret = NULL;
3795
3796 #define DO_TARG_OPTION(OPT) \
3797 ret = do_option (&target_options, ret, OPT, #OPT)
3798
3799 DO_TARG_OPTION (TARGET_WNOHANG);
3800
3801 if (target_options != 0)
3802 ret = str_comma_list_concat_elem (ret, "unknown???");
3803
3804 if (ret == NULL)
3805 ret = xstrdup ("");
3806 return ret;
3807 }
3808
3809 static void
3810 debug_print_register (const char * func,
3811 struct regcache *regcache, int regno)
3812 {
3813 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3814
3815 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3816 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3817 && gdbarch_register_name (gdbarch, regno) != NULL
3818 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3819 fprintf_unfiltered (gdb_stdlog, "(%s)",
3820 gdbarch_register_name (gdbarch, regno));
3821 else
3822 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3823 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3824 {
3825 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3826 int i, size = register_size (gdbarch, regno);
3827 gdb_byte buf[MAX_REGISTER_SIZE];
3828
3829 regcache_raw_collect (regcache, regno, buf);
3830 fprintf_unfiltered (gdb_stdlog, " = ");
3831 for (i = 0; i < size; i++)
3832 {
3833 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3834 }
3835 if (size <= sizeof (LONGEST))
3836 {
3837 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3838
3839 fprintf_unfiltered (gdb_stdlog, " %s %s",
3840 core_addr_to_string_nz (val), plongest (val));
3841 }
3842 }
3843 fprintf_unfiltered (gdb_stdlog, "\n");
3844 }
3845
3846 void
3847 target_fetch_registers (struct regcache *regcache, int regno)
3848 {
3849 struct target_ops *t;
3850
3851 for (t = current_target.beneath; t != NULL; t = t->beneath)
3852 {
3853 if (t->to_fetch_registers != NULL)
3854 {
3855 t->to_fetch_registers (t, regcache, regno);
3856 if (targetdebug)
3857 debug_print_register ("target_fetch_registers", regcache, regno);
3858 return;
3859 }
3860 }
3861 }
3862
3863 void
3864 target_store_registers (struct regcache *regcache, int regno)
3865 {
3866 struct target_ops *t;
3867
3868 if (!may_write_registers)
3869 error (_("Writing to registers is not allowed (regno %d)"), regno);
3870
3871 current_target.to_store_registers (&current_target, regcache, regno);
3872 if (targetdebug)
3873 {
3874 debug_print_register ("target_store_registers", regcache, regno);
3875 }
3876 }
3877
3878 int
3879 target_core_of_thread (ptid_t ptid)
3880 {
3881 struct target_ops *t;
3882
3883 for (t = current_target.beneath; t != NULL; t = t->beneath)
3884 {
3885 if (t->to_core_of_thread != NULL)
3886 {
3887 int retval = t->to_core_of_thread (t, ptid);
3888
3889 if (targetdebug)
3890 fprintf_unfiltered (gdb_stdlog,
3891 "target_core_of_thread (%d) = %d\n",
3892 ptid_get_pid (ptid), retval);
3893 return retval;
3894 }
3895 }
3896
3897 return -1;
3898 }
3899
3900 int
3901 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3902 {
3903 struct target_ops *t;
3904
3905 for (t = current_target.beneath; t != NULL; t = t->beneath)
3906 {
3907 if (t->to_verify_memory != NULL)
3908 {
3909 int retval = t->to_verify_memory (t, data, memaddr, size);
3910
3911 if (targetdebug)
3912 fprintf_unfiltered (gdb_stdlog,
3913 "target_verify_memory (%s, %s) = %d\n",
3914 paddress (target_gdbarch (), memaddr),
3915 pulongest (size),
3916 retval);
3917 return retval;
3918 }
3919 }
3920
3921 tcomplain ();
3922 }
3923
3924 /* The documentation for this function is in its prototype declaration in
3925 target.h. */
3926
3927 int
3928 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3929 {
3930 struct target_ops *t;
3931
3932 for (t = current_target.beneath; t != NULL; t = t->beneath)
3933 if (t->to_insert_mask_watchpoint != NULL)
3934 {
3935 int ret;
3936
3937 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
3938
3939 if (targetdebug)
3940 fprintf_unfiltered (gdb_stdlog, "\
3941 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3942 core_addr_to_string (addr),
3943 core_addr_to_string (mask), rw, ret);
3944
3945 return ret;
3946 }
3947
3948 return 1;
3949 }
3950
3951 /* The documentation for this function is in its prototype declaration in
3952 target.h. */
3953
3954 int
3955 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3956 {
3957 struct target_ops *t;
3958
3959 for (t = current_target.beneath; t != NULL; t = t->beneath)
3960 if (t->to_remove_mask_watchpoint != NULL)
3961 {
3962 int ret;
3963
3964 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
3965
3966 if (targetdebug)
3967 fprintf_unfiltered (gdb_stdlog, "\
3968 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3969 core_addr_to_string (addr),
3970 core_addr_to_string (mask), rw, ret);
3971
3972 return ret;
3973 }
3974
3975 return 1;
3976 }
3977
3978 /* The documentation for this function is in its prototype declaration
3979 in target.h. */
3980
3981 int
3982 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3983 {
3984 struct target_ops *t;
3985
3986 for (t = current_target.beneath; t != NULL; t = t->beneath)
3987 if (t->to_masked_watch_num_registers != NULL)
3988 return t->to_masked_watch_num_registers (t, addr, mask);
3989
3990 return -1;
3991 }
3992
3993 /* The documentation for this function is in its prototype declaration
3994 in target.h. */
3995
3996 int
3997 target_ranged_break_num_registers (void)
3998 {
3999 struct target_ops *t;
4000
4001 for (t = current_target.beneath; t != NULL; t = t->beneath)
4002 if (t->to_ranged_break_num_registers != NULL)
4003 return t->to_ranged_break_num_registers (t);
4004
4005 return -1;
4006 }
4007
4008 /* See target.h. */
4009
4010 struct btrace_target_info *
4011 target_enable_btrace (ptid_t ptid)
4012 {
4013 struct target_ops *t;
4014
4015 for (t = current_target.beneath; t != NULL; t = t->beneath)
4016 if (t->to_enable_btrace != NULL)
4017 return t->to_enable_btrace (t, ptid);
4018
4019 tcomplain ();
4020 return NULL;
4021 }
4022
4023 /* See target.h. */
4024
4025 void
4026 target_disable_btrace (struct btrace_target_info *btinfo)
4027 {
4028 struct target_ops *t;
4029
4030 for (t = current_target.beneath; t != NULL; t = t->beneath)
4031 if (t->to_disable_btrace != NULL)
4032 {
4033 t->to_disable_btrace (t, btinfo);
4034 return;
4035 }
4036
4037 tcomplain ();
4038 }
4039
4040 /* See target.h. */
4041
4042 void
4043 target_teardown_btrace (struct btrace_target_info *btinfo)
4044 {
4045 struct target_ops *t;
4046
4047 for (t = current_target.beneath; t != NULL; t = t->beneath)
4048 if (t->to_teardown_btrace != NULL)
4049 {
4050 t->to_teardown_btrace (t, btinfo);
4051 return;
4052 }
4053
4054 tcomplain ();
4055 }
4056
4057 /* See target.h. */
4058
4059 enum btrace_error
4060 target_read_btrace (VEC (btrace_block_s) **btrace,
4061 struct btrace_target_info *btinfo,
4062 enum btrace_read_type type)
4063 {
4064 struct target_ops *t;
4065
4066 for (t = current_target.beneath; t != NULL; t = t->beneath)
4067 if (t->to_read_btrace != NULL)
4068 return t->to_read_btrace (t, btrace, btinfo, type);
4069
4070 tcomplain ();
4071 return BTRACE_ERR_NOT_SUPPORTED;
4072 }
4073
4074 /* See target.h. */
4075
4076 void
4077 target_stop_recording (void)
4078 {
4079 struct target_ops *t;
4080
4081 for (t = current_target.beneath; t != NULL; t = t->beneath)
4082 if (t->to_stop_recording != NULL)
4083 {
4084 t->to_stop_recording (t);
4085 return;
4086 }
4087
4088 /* This is optional. */
4089 }
4090
4091 /* See target.h. */
4092
4093 void
4094 target_info_record (void)
4095 {
4096 struct target_ops *t;
4097
4098 for (t = current_target.beneath; t != NULL; t = t->beneath)
4099 if (t->to_info_record != NULL)
4100 {
4101 t->to_info_record (t);
4102 return;
4103 }
4104
4105 tcomplain ();
4106 }
4107
4108 /* See target.h. */
4109
4110 void
4111 target_save_record (const char *filename)
4112 {
4113 struct target_ops *t;
4114
4115 for (t = current_target.beneath; t != NULL; t = t->beneath)
4116 if (t->to_save_record != NULL)
4117 {
4118 t->to_save_record (t, filename);
4119 return;
4120 }
4121
4122 tcomplain ();
4123 }
4124
4125 /* See target.h. */
4126
4127 int
4128 target_supports_delete_record (void)
4129 {
4130 struct target_ops *t;
4131
4132 for (t = current_target.beneath; t != NULL; t = t->beneath)
4133 if (t->to_delete_record != NULL)
4134 return 1;
4135
4136 return 0;
4137 }
4138
4139 /* See target.h. */
4140
4141 void
4142 target_delete_record (void)
4143 {
4144 struct target_ops *t;
4145
4146 for (t = current_target.beneath; t != NULL; t = t->beneath)
4147 if (t->to_delete_record != NULL)
4148 {
4149 t->to_delete_record (t);
4150 return;
4151 }
4152
4153 tcomplain ();
4154 }
4155
4156 /* See target.h. */
4157
4158 int
4159 target_record_is_replaying (void)
4160 {
4161 struct target_ops *t;
4162
4163 for (t = current_target.beneath; t != NULL; t = t->beneath)
4164 if (t->to_record_is_replaying != NULL)
4165 return t->to_record_is_replaying (t);
4166
4167 return 0;
4168 }
4169
4170 /* See target.h. */
4171
4172 void
4173 target_goto_record_begin (void)
4174 {
4175 struct target_ops *t;
4176
4177 for (t = current_target.beneath; t != NULL; t = t->beneath)
4178 if (t->to_goto_record_begin != NULL)
4179 {
4180 t->to_goto_record_begin (t);
4181 return;
4182 }
4183
4184 tcomplain ();
4185 }
4186
4187 /* See target.h. */
4188
4189 void
4190 target_goto_record_end (void)
4191 {
4192 struct target_ops *t;
4193
4194 for (t = current_target.beneath; t != NULL; t = t->beneath)
4195 if (t->to_goto_record_end != NULL)
4196 {
4197 t->to_goto_record_end (t);
4198 return;
4199 }
4200
4201 tcomplain ();
4202 }
4203
4204 /* See target.h. */
4205
4206 void
4207 target_goto_record (ULONGEST insn)
4208 {
4209 struct target_ops *t;
4210
4211 for (t = current_target.beneath; t != NULL; t = t->beneath)
4212 if (t->to_goto_record != NULL)
4213 {
4214 t->to_goto_record (t, insn);
4215 return;
4216 }
4217
4218 tcomplain ();
4219 }
4220
4221 /* See target.h. */
4222
4223 void
4224 target_insn_history (int size, int flags)
4225 {
4226 struct target_ops *t;
4227
4228 for (t = current_target.beneath; t != NULL; t = t->beneath)
4229 if (t->to_insn_history != NULL)
4230 {
4231 t->to_insn_history (t, size, flags);
4232 return;
4233 }
4234
4235 tcomplain ();
4236 }
4237
4238 /* See target.h. */
4239
4240 void
4241 target_insn_history_from (ULONGEST from, int size, int flags)
4242 {
4243 struct target_ops *t;
4244
4245 for (t = current_target.beneath; t != NULL; t = t->beneath)
4246 if (t->to_insn_history_from != NULL)
4247 {
4248 t->to_insn_history_from (t, from, size, flags);
4249 return;
4250 }
4251
4252 tcomplain ();
4253 }
4254
4255 /* See target.h. */
4256
4257 void
4258 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4259 {
4260 struct target_ops *t;
4261
4262 for (t = current_target.beneath; t != NULL; t = t->beneath)
4263 if (t->to_insn_history_range != NULL)
4264 {
4265 t->to_insn_history_range (t, begin, end, flags);
4266 return;
4267 }
4268
4269 tcomplain ();
4270 }
4271
4272 /* See target.h. */
4273
4274 void
4275 target_call_history (int size, int flags)
4276 {
4277 struct target_ops *t;
4278
4279 for (t = current_target.beneath; t != NULL; t = t->beneath)
4280 if (t->to_call_history != NULL)
4281 {
4282 t->to_call_history (t, size, flags);
4283 return;
4284 }
4285
4286 tcomplain ();
4287 }
4288
4289 /* See target.h. */
4290
4291 void
4292 target_call_history_from (ULONGEST begin, int size, int flags)
4293 {
4294 struct target_ops *t;
4295
4296 for (t = current_target.beneath; t != NULL; t = t->beneath)
4297 if (t->to_call_history_from != NULL)
4298 {
4299 t->to_call_history_from (t, begin, size, flags);
4300 return;
4301 }
4302
4303 tcomplain ();
4304 }
4305
4306 /* See target.h. */
4307
4308 void
4309 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4310 {
4311 struct target_ops *t;
4312
4313 for (t = current_target.beneath; t != NULL; t = t->beneath)
4314 if (t->to_call_history_range != NULL)
4315 {
4316 t->to_call_history_range (t, begin, end, flags);
4317 return;
4318 }
4319
4320 tcomplain ();
4321 }
4322
4323 static void
4324 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4325 {
4326 debug_target.to_prepare_to_store (&debug_target, regcache);
4327
4328 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4329 }
4330
4331 /* See target.h. */
4332
4333 const struct frame_unwind *
4334 target_get_unwinder (void)
4335 {
4336 struct target_ops *t;
4337
4338 for (t = current_target.beneath; t != NULL; t = t->beneath)
4339 if (t->to_get_unwinder != NULL)
4340 return t->to_get_unwinder;
4341
4342 return NULL;
4343 }
4344
4345 /* See target.h. */
4346
4347 const struct frame_unwind *
4348 target_get_tailcall_unwinder (void)
4349 {
4350 struct target_ops *t;
4351
4352 for (t = current_target.beneath; t != NULL; t = t->beneath)
4353 if (t->to_get_tailcall_unwinder != NULL)
4354 return t->to_get_tailcall_unwinder;
4355
4356 return NULL;
4357 }
4358
4359 /* See target.h. */
4360
4361 CORE_ADDR
4362 forward_target_decr_pc_after_break (struct target_ops *ops,
4363 struct gdbarch *gdbarch)
4364 {
4365 for (; ops != NULL; ops = ops->beneath)
4366 if (ops->to_decr_pc_after_break != NULL)
4367 return ops->to_decr_pc_after_break (ops, gdbarch);
4368
4369 return gdbarch_decr_pc_after_break (gdbarch);
4370 }
4371
4372 /* See target.h. */
4373
4374 CORE_ADDR
4375 target_decr_pc_after_break (struct gdbarch *gdbarch)
4376 {
4377 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4378 }
4379
4380 static int
4381 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4382 int write, struct mem_attrib *attrib,
4383 struct target_ops *target)
4384 {
4385 int retval;
4386
4387 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4388 attrib, target);
4389
4390 fprintf_unfiltered (gdb_stdlog,
4391 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4392 paddress (target_gdbarch (), memaddr), len,
4393 write ? "write" : "read", retval);
4394
4395 if (retval > 0)
4396 {
4397 int i;
4398
4399 fputs_unfiltered (", bytes =", gdb_stdlog);
4400 for (i = 0; i < retval; i++)
4401 {
4402 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4403 {
4404 if (targetdebug < 2 && i > 0)
4405 {
4406 fprintf_unfiltered (gdb_stdlog, " ...");
4407 break;
4408 }
4409 fprintf_unfiltered (gdb_stdlog, "\n");
4410 }
4411
4412 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4413 }
4414 }
4415
4416 fputc_unfiltered ('\n', gdb_stdlog);
4417
4418 return retval;
4419 }
4420
4421 static void
4422 debug_to_files_info (struct target_ops *target)
4423 {
4424 debug_target.to_files_info (target);
4425
4426 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4427 }
4428
4429 static int
4430 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4431 struct bp_target_info *bp_tgt)
4432 {
4433 int retval;
4434
4435 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4436
4437 fprintf_unfiltered (gdb_stdlog,
4438 "target_insert_breakpoint (%s, xxx) = %ld\n",
4439 core_addr_to_string (bp_tgt->placed_address),
4440 (unsigned long) retval);
4441 return retval;
4442 }
4443
4444 static int
4445 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4446 struct bp_target_info *bp_tgt)
4447 {
4448 int retval;
4449
4450 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4451
4452 fprintf_unfiltered (gdb_stdlog,
4453 "target_remove_breakpoint (%s, xxx) = %ld\n",
4454 core_addr_to_string (bp_tgt->placed_address),
4455 (unsigned long) retval);
4456 return retval;
4457 }
4458
4459 static int
4460 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4461 int type, int cnt, int from_tty)
4462 {
4463 int retval;
4464
4465 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4466 type, cnt, from_tty);
4467
4468 fprintf_unfiltered (gdb_stdlog,
4469 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4470 (unsigned long) type,
4471 (unsigned long) cnt,
4472 (unsigned long) from_tty,
4473 (unsigned long) retval);
4474 return retval;
4475 }
4476
4477 static int
4478 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4479 CORE_ADDR addr, int len)
4480 {
4481 CORE_ADDR retval;
4482
4483 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4484 addr, len);
4485
4486 fprintf_unfiltered (gdb_stdlog,
4487 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4488 core_addr_to_string (addr), (unsigned long) len,
4489 core_addr_to_string (retval));
4490 return retval;
4491 }
4492
4493 static int
4494 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4495 CORE_ADDR addr, int len, int rw,
4496 struct expression *cond)
4497 {
4498 int retval;
4499
4500 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4501 addr, len,
4502 rw, cond);
4503
4504 fprintf_unfiltered (gdb_stdlog,
4505 "target_can_accel_watchpoint_condition "
4506 "(%s, %d, %d, %s) = %ld\n",
4507 core_addr_to_string (addr), len, rw,
4508 host_address_to_string (cond), (unsigned long) retval);
4509 return retval;
4510 }
4511
4512 static int
4513 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4514 {
4515 int retval;
4516
4517 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4518
4519 fprintf_unfiltered (gdb_stdlog,
4520 "target_stopped_by_watchpoint () = %ld\n",
4521 (unsigned long) retval);
4522 return retval;
4523 }
4524
4525 static int
4526 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4527 {
4528 int retval;
4529
4530 retval = debug_target.to_stopped_data_address (target, addr);
4531
4532 fprintf_unfiltered (gdb_stdlog,
4533 "target_stopped_data_address ([%s]) = %ld\n",
4534 core_addr_to_string (*addr),
4535 (unsigned long)retval);
4536 return retval;
4537 }
4538
4539 static int
4540 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4541 CORE_ADDR addr,
4542 CORE_ADDR start, int length)
4543 {
4544 int retval;
4545
4546 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4547 start, length);
4548
4549 fprintf_filtered (gdb_stdlog,
4550 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4551 core_addr_to_string (addr), core_addr_to_string (start),
4552 length, retval);
4553 return retval;
4554 }
4555
4556 static int
4557 debug_to_insert_hw_breakpoint (struct target_ops *self,
4558 struct gdbarch *gdbarch,
4559 struct bp_target_info *bp_tgt)
4560 {
4561 int retval;
4562
4563 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4564 gdbarch, bp_tgt);
4565
4566 fprintf_unfiltered (gdb_stdlog,
4567 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4568 core_addr_to_string (bp_tgt->placed_address),
4569 (unsigned long) retval);
4570 return retval;
4571 }
4572
4573 static int
4574 debug_to_remove_hw_breakpoint (struct target_ops *self,
4575 struct gdbarch *gdbarch,
4576 struct bp_target_info *bp_tgt)
4577 {
4578 int retval;
4579
4580 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4581 gdbarch, bp_tgt);
4582
4583 fprintf_unfiltered (gdb_stdlog,
4584 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4585 core_addr_to_string (bp_tgt->placed_address),
4586 (unsigned long) retval);
4587 return retval;
4588 }
4589
4590 static int
4591 debug_to_insert_watchpoint (struct target_ops *self,
4592 CORE_ADDR addr, int len, int type,
4593 struct expression *cond)
4594 {
4595 int retval;
4596
4597 retval = debug_target.to_insert_watchpoint (&debug_target,
4598 addr, len, type, cond);
4599
4600 fprintf_unfiltered (gdb_stdlog,
4601 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4602 core_addr_to_string (addr), len, type,
4603 host_address_to_string (cond), (unsigned long) retval);
4604 return retval;
4605 }
4606
4607 static int
4608 debug_to_remove_watchpoint (struct target_ops *self,
4609 CORE_ADDR addr, int len, int type,
4610 struct expression *cond)
4611 {
4612 int retval;
4613
4614 retval = debug_target.to_remove_watchpoint (&debug_target,
4615 addr, len, type, cond);
4616
4617 fprintf_unfiltered (gdb_stdlog,
4618 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4619 core_addr_to_string (addr), len, type,
4620 host_address_to_string (cond), (unsigned long) retval);
4621 return retval;
4622 }
4623
4624 static void
4625 debug_to_terminal_init (struct target_ops *self)
4626 {
4627 debug_target.to_terminal_init (&debug_target);
4628
4629 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4630 }
4631
4632 static void
4633 debug_to_terminal_inferior (struct target_ops *self)
4634 {
4635 debug_target.to_terminal_inferior (&debug_target);
4636
4637 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4638 }
4639
4640 static void
4641 debug_to_terminal_ours_for_output (struct target_ops *self)
4642 {
4643 debug_target.to_terminal_ours_for_output (&debug_target);
4644
4645 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4646 }
4647
4648 static void
4649 debug_to_terminal_ours (struct target_ops *self)
4650 {
4651 debug_target.to_terminal_ours (&debug_target);
4652
4653 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4654 }
4655
4656 static void
4657 debug_to_terminal_save_ours (struct target_ops *self)
4658 {
4659 debug_target.to_terminal_save_ours (&debug_target);
4660
4661 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4662 }
4663
4664 static void
4665 debug_to_terminal_info (struct target_ops *self,
4666 const char *arg, int from_tty)
4667 {
4668 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4669
4670 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4671 from_tty);
4672 }
4673
4674 static void
4675 debug_to_load (struct target_ops *self, char *args, int from_tty)
4676 {
4677 debug_target.to_load (&debug_target, args, from_tty);
4678
4679 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4680 }
4681
4682 static void
4683 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4684 {
4685 debug_target.to_post_startup_inferior (&debug_target, ptid);
4686
4687 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4688 ptid_get_pid (ptid));
4689 }
4690
4691 static int
4692 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4693 {
4694 int retval;
4695
4696 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4697
4698 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4699 pid, retval);
4700
4701 return retval;
4702 }
4703
4704 static int
4705 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4706 {
4707 int retval;
4708
4709 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4710
4711 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4712 pid, retval);
4713
4714 return retval;
4715 }
4716
4717 static int
4718 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4719 {
4720 int retval;
4721
4722 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4723
4724 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4725 pid, retval);
4726
4727 return retval;
4728 }
4729
4730 static int
4731 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4732 {
4733 int retval;
4734
4735 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4736
4737 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4738 pid, retval);
4739
4740 return retval;
4741 }
4742
4743 static int
4744 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4745 {
4746 int retval;
4747
4748 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4749
4750 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4751 pid, retval);
4752
4753 return retval;
4754 }
4755
4756 static int
4757 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4758 {
4759 int retval;
4760
4761 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4762
4763 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4764 pid, retval);
4765
4766 return retval;
4767 }
4768
4769 static int
4770 debug_to_has_exited (struct target_ops *self,
4771 int pid, int wait_status, int *exit_status)
4772 {
4773 int has_exited;
4774
4775 has_exited = debug_target.to_has_exited (&debug_target,
4776 pid, wait_status, exit_status);
4777
4778 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4779 pid, wait_status, *exit_status, has_exited);
4780
4781 return has_exited;
4782 }
4783
4784 static int
4785 debug_to_can_run (struct target_ops *self)
4786 {
4787 int retval;
4788
4789 retval = debug_target.to_can_run (&debug_target);
4790
4791 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4792
4793 return retval;
4794 }
4795
4796 static struct gdbarch *
4797 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4798 {
4799 struct gdbarch *retval;
4800
4801 retval = debug_target.to_thread_architecture (ops, ptid);
4802
4803 fprintf_unfiltered (gdb_stdlog,
4804 "target_thread_architecture (%s) = %s [%s]\n",
4805 target_pid_to_str (ptid),
4806 host_address_to_string (retval),
4807 gdbarch_bfd_arch_info (retval)->printable_name);
4808 return retval;
4809 }
4810
4811 static void
4812 debug_to_stop (struct target_ops *self, ptid_t ptid)
4813 {
4814 debug_target.to_stop (&debug_target, ptid);
4815
4816 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4817 target_pid_to_str (ptid));
4818 }
4819
4820 static void
4821 debug_to_rcmd (struct target_ops *self, char *command,
4822 struct ui_file *outbuf)
4823 {
4824 debug_target.to_rcmd (&debug_target, command, outbuf);
4825 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4826 }
4827
4828 static char *
4829 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4830 {
4831 char *exec_file;
4832
4833 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4834
4835 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4836 pid, exec_file);
4837
4838 return exec_file;
4839 }
4840
4841 static void
4842 setup_target_debug (void)
4843 {
4844 memcpy (&debug_target, &current_target, sizeof debug_target);
4845
4846 current_target.to_open = debug_to_open;
4847 current_target.to_post_attach = debug_to_post_attach;
4848 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4849 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4850 current_target.to_files_info = debug_to_files_info;
4851 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4852 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4853 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4854 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4855 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4856 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4857 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4858 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4859 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4860 current_target.to_watchpoint_addr_within_range
4861 = debug_to_watchpoint_addr_within_range;
4862 current_target.to_region_ok_for_hw_watchpoint
4863 = debug_to_region_ok_for_hw_watchpoint;
4864 current_target.to_can_accel_watchpoint_condition
4865 = debug_to_can_accel_watchpoint_condition;
4866 current_target.to_terminal_init = debug_to_terminal_init;
4867 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4868 current_target.to_terminal_ours_for_output
4869 = debug_to_terminal_ours_for_output;
4870 current_target.to_terminal_ours = debug_to_terminal_ours;
4871 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4872 current_target.to_terminal_info = debug_to_terminal_info;
4873 current_target.to_load = debug_to_load;
4874 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4875 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4876 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4877 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4878 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4879 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4880 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4881 current_target.to_has_exited = debug_to_has_exited;
4882 current_target.to_can_run = debug_to_can_run;
4883 current_target.to_stop = debug_to_stop;
4884 current_target.to_rcmd = debug_to_rcmd;
4885 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4886 current_target.to_thread_architecture = debug_to_thread_architecture;
4887 }
4888 \f
4889
4890 static char targ_desc[] =
4891 "Names of targets and files being debugged.\nShows the entire \
4892 stack of targets currently in use (including the exec-file,\n\
4893 core-file, and process, if any), as well as the symbol file name.";
4894
4895 static void
4896 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
4897 {
4898 error (_("\"monitor\" command not supported by this target."));
4899 }
4900
4901 static void
4902 do_monitor_command (char *cmd,
4903 int from_tty)
4904 {
4905 target_rcmd (cmd, gdb_stdtarg);
4906 }
4907
4908 /* Print the name of each layers of our target stack. */
4909
4910 static void
4911 maintenance_print_target_stack (char *cmd, int from_tty)
4912 {
4913 struct target_ops *t;
4914
4915 printf_filtered (_("The current target stack is:\n"));
4916
4917 for (t = target_stack; t != NULL; t = t->beneath)
4918 {
4919 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4920 }
4921 }
4922
4923 /* Controls if async mode is permitted. */
4924 int target_async_permitted = 0;
4925
4926 /* The set command writes to this variable. If the inferior is
4927 executing, target_async_permitted is *not* updated. */
4928 static int target_async_permitted_1 = 0;
4929
4930 static void
4931 set_target_async_command (char *args, int from_tty,
4932 struct cmd_list_element *c)
4933 {
4934 if (have_live_inferiors ())
4935 {
4936 target_async_permitted_1 = target_async_permitted;
4937 error (_("Cannot change this setting while the inferior is running."));
4938 }
4939
4940 target_async_permitted = target_async_permitted_1;
4941 }
4942
4943 static void
4944 show_target_async_command (struct ui_file *file, int from_tty,
4945 struct cmd_list_element *c,
4946 const char *value)
4947 {
4948 fprintf_filtered (file,
4949 _("Controlling the inferior in "
4950 "asynchronous mode is %s.\n"), value);
4951 }
4952
4953 /* Temporary copies of permission settings. */
4954
4955 static int may_write_registers_1 = 1;
4956 static int may_write_memory_1 = 1;
4957 static int may_insert_breakpoints_1 = 1;
4958 static int may_insert_tracepoints_1 = 1;
4959 static int may_insert_fast_tracepoints_1 = 1;
4960 static int may_stop_1 = 1;
4961
4962 /* Make the user-set values match the real values again. */
4963
4964 void
4965 update_target_permissions (void)
4966 {
4967 may_write_registers_1 = may_write_registers;
4968 may_write_memory_1 = may_write_memory;
4969 may_insert_breakpoints_1 = may_insert_breakpoints;
4970 may_insert_tracepoints_1 = may_insert_tracepoints;
4971 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4972 may_stop_1 = may_stop;
4973 }
4974
4975 /* The one function handles (most of) the permission flags in the same
4976 way. */
4977
4978 static void
4979 set_target_permissions (char *args, int from_tty,
4980 struct cmd_list_element *c)
4981 {
4982 if (target_has_execution)
4983 {
4984 update_target_permissions ();
4985 error (_("Cannot change this setting while the inferior is running."));
4986 }
4987
4988 /* Make the real values match the user-changed values. */
4989 may_write_registers = may_write_registers_1;
4990 may_insert_breakpoints = may_insert_breakpoints_1;
4991 may_insert_tracepoints = may_insert_tracepoints_1;
4992 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4993 may_stop = may_stop_1;
4994 update_observer_mode ();
4995 }
4996
4997 /* Set memory write permission independently of observer mode. */
4998
4999 static void
5000 set_write_memory_permission (char *args, int from_tty,
5001 struct cmd_list_element *c)
5002 {
5003 /* Make the real values match the user-changed values. */
5004 may_write_memory = may_write_memory_1;
5005 update_observer_mode ();
5006 }
5007
5008
5009 void
5010 initialize_targets (void)
5011 {
5012 init_dummy_target ();
5013 push_target (&dummy_target);
5014
5015 add_info ("target", target_info, targ_desc);
5016 add_info ("files", target_info, targ_desc);
5017
5018 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5019 Set target debugging."), _("\
5020 Show target debugging."), _("\
5021 When non-zero, target debugging is enabled. Higher numbers are more\n\
5022 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5023 command."),
5024 NULL,
5025 show_targetdebug,
5026 &setdebuglist, &showdebuglist);
5027
5028 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5029 &trust_readonly, _("\
5030 Set mode for reading from readonly sections."), _("\
5031 Show mode for reading from readonly sections."), _("\
5032 When this mode is on, memory reads from readonly sections (such as .text)\n\
5033 will be read from the object file instead of from the target. This will\n\
5034 result in significant performance improvement for remote targets."),
5035 NULL,
5036 show_trust_readonly,
5037 &setlist, &showlist);
5038
5039 add_com ("monitor", class_obscure, do_monitor_command,
5040 _("Send a command to the remote monitor (remote targets only)."));
5041
5042 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5043 _("Print the name of each layer of the internal target stack."),
5044 &maintenanceprintlist);
5045
5046 add_setshow_boolean_cmd ("target-async", no_class,
5047 &target_async_permitted_1, _("\
5048 Set whether gdb controls the inferior in asynchronous mode."), _("\
5049 Show whether gdb controls the inferior in asynchronous mode."), _("\
5050 Tells gdb whether to control the inferior in asynchronous mode."),
5051 set_target_async_command,
5052 show_target_async_command,
5053 &setlist,
5054 &showlist);
5055
5056 add_setshow_boolean_cmd ("may-write-registers", class_support,
5057 &may_write_registers_1, _("\
5058 Set permission to write into registers."), _("\
5059 Show permission to write into registers."), _("\
5060 When this permission is on, GDB may write into the target's registers.\n\
5061 Otherwise, any sort of write attempt will result in an error."),
5062 set_target_permissions, NULL,
5063 &setlist, &showlist);
5064
5065 add_setshow_boolean_cmd ("may-write-memory", class_support,
5066 &may_write_memory_1, _("\
5067 Set permission to write into target memory."), _("\
5068 Show permission to write into target memory."), _("\
5069 When this permission is on, GDB may write into the target's memory.\n\
5070 Otherwise, any sort of write attempt will result in an error."),
5071 set_write_memory_permission, NULL,
5072 &setlist, &showlist);
5073
5074 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5075 &may_insert_breakpoints_1, _("\
5076 Set permission to insert breakpoints in the target."), _("\
5077 Show permission to insert breakpoints in the target."), _("\
5078 When this permission is on, GDB may insert breakpoints in the program.\n\
5079 Otherwise, any sort of insertion attempt will result in an error."),
5080 set_target_permissions, NULL,
5081 &setlist, &showlist);
5082
5083 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5084 &may_insert_tracepoints_1, _("\
5085 Set permission to insert tracepoints in the target."), _("\
5086 Show permission to insert tracepoints in the target."), _("\
5087 When this permission is on, GDB may insert tracepoints in the program.\n\
5088 Otherwise, any sort of insertion attempt will result in an error."),
5089 set_target_permissions, NULL,
5090 &setlist, &showlist);
5091
5092 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5093 &may_insert_fast_tracepoints_1, _("\
5094 Set permission to insert fast tracepoints in the target."), _("\
5095 Show permission to insert fast tracepoints in the target."), _("\
5096 When this permission is on, GDB may insert fast tracepoints.\n\
5097 Otherwise, any sort of insertion attempt will result in an error."),
5098 set_target_permissions, NULL,
5099 &setlist, &showlist);
5100
5101 add_setshow_boolean_cmd ("may-interrupt", class_support,
5102 &may_stop_1, _("\
5103 Set permission to interrupt or signal the target."), _("\
5104 Show permission to interrupt or signal the target."), _("\
5105 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5106 Otherwise, any attempt to interrupt or stop will be ignored."),
5107 set_target_permissions, NULL,
5108 &setlist, &showlist);
5109 }