Return early in target_xfer_partial when LEN is zero.
[binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
56
57 static void tcomplain (void) ATTRIBUTE_NORETURN;
58
59 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
60
61 static int return_zero (void);
62
63 static int return_one (void);
64
65 static int return_minus_one (void);
66
67 static void *return_null (void);
68
69 void target_ignore (void);
70
71 static void target_command (char *, int);
72
73 static struct target_ops *find_default_run_target (char *);
74
75 static target_xfer_partial_ftype default_xfer_partial;
76
77 static target_xfer_partial_ftype current_xfer_partial;
78
79 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
80 ptid_t ptid);
81
82 static void init_dummy_target (void);
83
84 static struct target_ops debug_target;
85
86 static void debug_to_open (char *, int);
87
88 static void debug_to_prepare_to_store (struct target_ops *self,
89 struct regcache *);
90
91 static void debug_to_files_info (struct target_ops *);
92
93 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
94 struct bp_target_info *);
95
96 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
97 struct bp_target_info *);
98
99 static int debug_to_can_use_hw_breakpoint (int, int, int);
100
101 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
102 struct bp_target_info *);
103
104 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
105 struct bp_target_info *);
106
107 static int debug_to_insert_watchpoint (CORE_ADDR, int, int,
108 struct expression *);
109
110 static int debug_to_remove_watchpoint (CORE_ADDR, int, int,
111 struct expression *);
112
113 static int debug_to_stopped_by_watchpoint (void);
114
115 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
116
117 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
118 CORE_ADDR, CORE_ADDR, int);
119
120 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
121
122 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR, int, int,
123 struct expression *);
124
125 static void debug_to_terminal_init (void);
126
127 static void debug_to_terminal_inferior (void);
128
129 static void debug_to_terminal_ours_for_output (void);
130
131 static void debug_to_terminal_save_ours (void);
132
133 static void debug_to_terminal_ours (void);
134
135 static void debug_to_load (char *, int);
136
137 static int debug_to_can_run (void);
138
139 static void debug_to_stop (ptid_t);
140
141 /* Pointer to array of target architecture structures; the size of the
142 array; the current index into the array; the allocated size of the
143 array. */
144 struct target_ops **target_structs;
145 unsigned target_struct_size;
146 unsigned target_struct_allocsize;
147 #define DEFAULT_ALLOCSIZE 10
148
149 /* The initial current target, so that there is always a semi-valid
150 current target. */
151
152 static struct target_ops dummy_target;
153
154 /* Top of target stack. */
155
156 static struct target_ops *target_stack;
157
158 /* The target structure we are currently using to talk to a process
159 or file or whatever "inferior" we have. */
160
161 struct target_ops current_target;
162
163 /* Command list for target. */
164
165 static struct cmd_list_element *targetlist = NULL;
166
167 /* Nonzero if we should trust readonly sections from the
168 executable when reading memory. */
169
170 static int trust_readonly = 0;
171
172 /* Nonzero if we should show true memory content including
173 memory breakpoint inserted by gdb. */
174
175 static int show_memory_breakpoints = 0;
176
177 /* These globals control whether GDB attempts to perform these
178 operations; they are useful for targets that need to prevent
179 inadvertant disruption, such as in non-stop mode. */
180
181 int may_write_registers = 1;
182
183 int may_write_memory = 1;
184
185 int may_insert_breakpoints = 1;
186
187 int may_insert_tracepoints = 1;
188
189 int may_insert_fast_tracepoints = 1;
190
191 int may_stop = 1;
192
193 /* Non-zero if we want to see trace of target level stuff. */
194
195 static unsigned int targetdebug = 0;
196 static void
197 show_targetdebug (struct ui_file *file, int from_tty,
198 struct cmd_list_element *c, const char *value)
199 {
200 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
201 }
202
203 static void setup_target_debug (void);
204
205 /* The user just typed 'target' without the name of a target. */
206
207 static void
208 target_command (char *arg, int from_tty)
209 {
210 fputs_filtered ("Argument required (target name). Try `help target'\n",
211 gdb_stdout);
212 }
213
214 /* Default target_has_* methods for process_stratum targets. */
215
216 int
217 default_child_has_all_memory (struct target_ops *ops)
218 {
219 /* If no inferior selected, then we can't read memory here. */
220 if (ptid_equal (inferior_ptid, null_ptid))
221 return 0;
222
223 return 1;
224 }
225
226 int
227 default_child_has_memory (struct target_ops *ops)
228 {
229 /* If no inferior selected, then we can't read memory here. */
230 if (ptid_equal (inferior_ptid, null_ptid))
231 return 0;
232
233 return 1;
234 }
235
236 int
237 default_child_has_stack (struct target_ops *ops)
238 {
239 /* If no inferior selected, there's no stack. */
240 if (ptid_equal (inferior_ptid, null_ptid))
241 return 0;
242
243 return 1;
244 }
245
246 int
247 default_child_has_registers (struct target_ops *ops)
248 {
249 /* Can't read registers from no inferior. */
250 if (ptid_equal (inferior_ptid, null_ptid))
251 return 0;
252
253 return 1;
254 }
255
256 int
257 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
258 {
259 /* If there's no thread selected, then we can't make it run through
260 hoops. */
261 if (ptid_equal (the_ptid, null_ptid))
262 return 0;
263
264 return 1;
265 }
266
267
268 int
269 target_has_all_memory_1 (void)
270 {
271 struct target_ops *t;
272
273 for (t = current_target.beneath; t != NULL; t = t->beneath)
274 if (t->to_has_all_memory (t))
275 return 1;
276
277 return 0;
278 }
279
280 int
281 target_has_memory_1 (void)
282 {
283 struct target_ops *t;
284
285 for (t = current_target.beneath; t != NULL; t = t->beneath)
286 if (t->to_has_memory (t))
287 return 1;
288
289 return 0;
290 }
291
292 int
293 target_has_stack_1 (void)
294 {
295 struct target_ops *t;
296
297 for (t = current_target.beneath; t != NULL; t = t->beneath)
298 if (t->to_has_stack (t))
299 return 1;
300
301 return 0;
302 }
303
304 int
305 target_has_registers_1 (void)
306 {
307 struct target_ops *t;
308
309 for (t = current_target.beneath; t != NULL; t = t->beneath)
310 if (t->to_has_registers (t))
311 return 1;
312
313 return 0;
314 }
315
316 int
317 target_has_execution_1 (ptid_t the_ptid)
318 {
319 struct target_ops *t;
320
321 for (t = current_target.beneath; t != NULL; t = t->beneath)
322 if (t->to_has_execution (t, the_ptid))
323 return 1;
324
325 return 0;
326 }
327
328 int
329 target_has_execution_current (void)
330 {
331 return target_has_execution_1 (inferior_ptid);
332 }
333
334 /* Complete initialization of T. This ensures that various fields in
335 T are set, if needed by the target implementation. */
336
337 void
338 complete_target_initialization (struct target_ops *t)
339 {
340 /* Provide default values for all "must have" methods. */
341 if (t->to_xfer_partial == NULL)
342 t->to_xfer_partial = default_xfer_partial;
343
344 if (t->to_has_all_memory == NULL)
345 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
346
347 if (t->to_has_memory == NULL)
348 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
349
350 if (t->to_has_stack == NULL)
351 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
352
353 if (t->to_has_registers == NULL)
354 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
355
356 if (t->to_has_execution == NULL)
357 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
358 }
359
360 /* Add possible target architecture T to the list and add a new
361 command 'target T->to_shortname'. Set COMPLETER as the command's
362 completer if not NULL. */
363
364 void
365 add_target_with_completer (struct target_ops *t,
366 completer_ftype *completer)
367 {
368 struct cmd_list_element *c;
369
370 complete_target_initialization (t);
371
372 if (!target_structs)
373 {
374 target_struct_allocsize = DEFAULT_ALLOCSIZE;
375 target_structs = (struct target_ops **) xmalloc
376 (target_struct_allocsize * sizeof (*target_structs));
377 }
378 if (target_struct_size >= target_struct_allocsize)
379 {
380 target_struct_allocsize *= 2;
381 target_structs = (struct target_ops **)
382 xrealloc ((char *) target_structs,
383 target_struct_allocsize * sizeof (*target_structs));
384 }
385 target_structs[target_struct_size++] = t;
386
387 if (targetlist == NULL)
388 add_prefix_cmd ("target", class_run, target_command, _("\
389 Connect to a target machine or process.\n\
390 The first argument is the type or protocol of the target machine.\n\
391 Remaining arguments are interpreted by the target protocol. For more\n\
392 information on the arguments for a particular protocol, type\n\
393 `help target ' followed by the protocol name."),
394 &targetlist, "target ", 0, &cmdlist);
395 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
396 &targetlist);
397 if (completer != NULL)
398 set_cmd_completer (c, completer);
399 }
400
401 /* Add a possible target architecture to the list. */
402
403 void
404 add_target (struct target_ops *t)
405 {
406 add_target_with_completer (t, NULL);
407 }
408
409 /* See target.h. */
410
411 void
412 add_deprecated_target_alias (struct target_ops *t, char *alias)
413 {
414 struct cmd_list_element *c;
415 char *alt;
416
417 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
418 see PR cli/15104. */
419 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
420 alt = xstrprintf ("target %s", t->to_shortname);
421 deprecate_cmd (c, alt);
422 }
423
424 /* Stub functions */
425
426 void
427 target_ignore (void)
428 {
429 }
430
431 void
432 target_kill (void)
433 {
434 struct target_ops *t;
435
436 for (t = current_target.beneath; t != NULL; t = t->beneath)
437 if (t->to_kill != NULL)
438 {
439 if (targetdebug)
440 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
441
442 t->to_kill (t);
443 return;
444 }
445
446 noprocess ();
447 }
448
449 void
450 target_load (char *arg, int from_tty)
451 {
452 target_dcache_invalidate ();
453 (*current_target.to_load) (arg, from_tty);
454 }
455
456 void
457 target_create_inferior (char *exec_file, char *args,
458 char **env, int from_tty)
459 {
460 struct target_ops *t;
461
462 for (t = current_target.beneath; t != NULL; t = t->beneath)
463 {
464 if (t->to_create_inferior != NULL)
465 {
466 t->to_create_inferior (t, exec_file, args, env, from_tty);
467 if (targetdebug)
468 fprintf_unfiltered (gdb_stdlog,
469 "target_create_inferior (%s, %s, xxx, %d)\n",
470 exec_file, args, from_tty);
471 return;
472 }
473 }
474
475 internal_error (__FILE__, __LINE__,
476 _("could not find a target to create inferior"));
477 }
478
479 void
480 target_terminal_inferior (void)
481 {
482 /* A background resume (``run&'') should leave GDB in control of the
483 terminal. Use target_can_async_p, not target_is_async_p, since at
484 this point the target is not async yet. However, if sync_execution
485 is not set, we know it will become async prior to resume. */
486 if (target_can_async_p () && !sync_execution)
487 return;
488
489 /* If GDB is resuming the inferior in the foreground, install
490 inferior's terminal modes. */
491 (*current_target.to_terminal_inferior) ();
492 }
493
494 static int
495 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
496 struct target_ops *t)
497 {
498 errno = EIO; /* Can't read/write this location. */
499 return 0; /* No bytes handled. */
500 }
501
502 static void
503 tcomplain (void)
504 {
505 error (_("You can't do that when your target is `%s'"),
506 current_target.to_shortname);
507 }
508
509 void
510 noprocess (void)
511 {
512 error (_("You can't do that without a process to debug."));
513 }
514
515 static void
516 default_terminal_info (const char *args, int from_tty)
517 {
518 printf_unfiltered (_("No saved terminal information.\n"));
519 }
520
521 /* A default implementation for the to_get_ada_task_ptid target method.
522
523 This function builds the PTID by using both LWP and TID as part of
524 the PTID lwp and tid elements. The pid used is the pid of the
525 inferior_ptid. */
526
527 static ptid_t
528 default_get_ada_task_ptid (long lwp, long tid)
529 {
530 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
531 }
532
533 static enum exec_direction_kind
534 default_execution_direction (void)
535 {
536 if (!target_can_execute_reverse)
537 return EXEC_FORWARD;
538 else if (!target_can_async_p ())
539 return EXEC_FORWARD;
540 else
541 gdb_assert_not_reached ("\
542 to_execution_direction must be implemented for reverse async");
543 }
544
545 /* Go through the target stack from top to bottom, copying over zero
546 entries in current_target, then filling in still empty entries. In
547 effect, we are doing class inheritance through the pushed target
548 vectors.
549
550 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
551 is currently implemented, is that it discards any knowledge of
552 which target an inherited method originally belonged to.
553 Consequently, new new target methods should instead explicitly and
554 locally search the target stack for the target that can handle the
555 request. */
556
557 static void
558 update_current_target (void)
559 {
560 struct target_ops *t;
561
562 /* First, reset current's contents. */
563 memset (&current_target, 0, sizeof (current_target));
564
565 #define INHERIT(FIELD, TARGET) \
566 if (!current_target.FIELD) \
567 current_target.FIELD = (TARGET)->FIELD
568
569 for (t = target_stack; t; t = t->beneath)
570 {
571 INHERIT (to_shortname, t);
572 INHERIT (to_longname, t);
573 INHERIT (to_doc, t);
574 /* Do not inherit to_open. */
575 /* Do not inherit to_close. */
576 /* Do not inherit to_attach. */
577 INHERIT (to_post_attach, t);
578 INHERIT (to_attach_no_wait, t);
579 /* Do not inherit to_detach. */
580 /* Do not inherit to_disconnect. */
581 /* Do not inherit to_resume. */
582 /* Do not inherit to_wait. */
583 /* Do not inherit to_fetch_registers. */
584 /* Do not inherit to_store_registers. */
585 INHERIT (to_prepare_to_store, t);
586 INHERIT (deprecated_xfer_memory, t);
587 INHERIT (to_files_info, t);
588 /* Do not inherit to_insert_breakpoint. */
589 /* Do not inherit to_remove_breakpoint. */
590 INHERIT (to_can_use_hw_breakpoint, t);
591 INHERIT (to_insert_hw_breakpoint, t);
592 INHERIT (to_remove_hw_breakpoint, t);
593 /* Do not inherit to_ranged_break_num_registers. */
594 INHERIT (to_insert_watchpoint, t);
595 INHERIT (to_remove_watchpoint, t);
596 /* Do not inherit to_insert_mask_watchpoint. */
597 /* Do not inherit to_remove_mask_watchpoint. */
598 INHERIT (to_stopped_data_address, t);
599 INHERIT (to_have_steppable_watchpoint, t);
600 INHERIT (to_have_continuable_watchpoint, t);
601 INHERIT (to_stopped_by_watchpoint, t);
602 INHERIT (to_watchpoint_addr_within_range, t);
603 INHERIT (to_region_ok_for_hw_watchpoint, t);
604 INHERIT (to_can_accel_watchpoint_condition, t);
605 /* Do not inherit to_masked_watch_num_registers. */
606 INHERIT (to_terminal_init, t);
607 INHERIT (to_terminal_inferior, t);
608 INHERIT (to_terminal_ours_for_output, t);
609 INHERIT (to_terminal_ours, t);
610 INHERIT (to_terminal_save_ours, t);
611 INHERIT (to_terminal_info, t);
612 /* Do not inherit to_kill. */
613 INHERIT (to_load, t);
614 /* Do no inherit to_create_inferior. */
615 INHERIT (to_post_startup_inferior, t);
616 INHERIT (to_insert_fork_catchpoint, t);
617 INHERIT (to_remove_fork_catchpoint, t);
618 INHERIT (to_insert_vfork_catchpoint, t);
619 INHERIT (to_remove_vfork_catchpoint, t);
620 /* Do not inherit to_follow_fork. */
621 INHERIT (to_insert_exec_catchpoint, t);
622 INHERIT (to_remove_exec_catchpoint, t);
623 INHERIT (to_set_syscall_catchpoint, t);
624 INHERIT (to_has_exited, t);
625 /* Do not inherit to_mourn_inferior. */
626 INHERIT (to_can_run, t);
627 /* Do not inherit to_pass_signals. */
628 /* Do not inherit to_program_signals. */
629 /* Do not inherit to_thread_alive. */
630 /* Do not inherit to_find_new_threads. */
631 /* Do not inherit to_pid_to_str. */
632 INHERIT (to_extra_thread_info, t);
633 INHERIT (to_thread_name, t);
634 INHERIT (to_stop, t);
635 /* Do not inherit to_xfer_partial. */
636 INHERIT (to_rcmd, t);
637 INHERIT (to_pid_to_exec_file, t);
638 INHERIT (to_log_command, t);
639 INHERIT (to_stratum, t);
640 /* Do not inherit to_has_all_memory. */
641 /* Do not inherit to_has_memory. */
642 /* Do not inherit to_has_stack. */
643 /* Do not inherit to_has_registers. */
644 /* Do not inherit to_has_execution. */
645 INHERIT (to_has_thread_control, t);
646 INHERIT (to_can_async_p, t);
647 INHERIT (to_is_async_p, t);
648 INHERIT (to_async, t);
649 INHERIT (to_find_memory_regions, t);
650 INHERIT (to_make_corefile_notes, t);
651 INHERIT (to_get_bookmark, t);
652 INHERIT (to_goto_bookmark, t);
653 /* Do not inherit to_get_thread_local_address. */
654 INHERIT (to_can_execute_reverse, t);
655 INHERIT (to_execution_direction, t);
656 INHERIT (to_thread_architecture, t);
657 /* Do not inherit to_read_description. */
658 INHERIT (to_get_ada_task_ptid, t);
659 /* Do not inherit to_search_memory. */
660 INHERIT (to_supports_multi_process, t);
661 INHERIT (to_supports_enable_disable_tracepoint, t);
662 INHERIT (to_supports_string_tracing, t);
663 INHERIT (to_trace_init, t);
664 INHERIT (to_download_tracepoint, t);
665 INHERIT (to_can_download_tracepoint, t);
666 INHERIT (to_download_trace_state_variable, t);
667 INHERIT (to_enable_tracepoint, t);
668 INHERIT (to_disable_tracepoint, t);
669 INHERIT (to_trace_set_readonly_regions, t);
670 INHERIT (to_trace_start, t);
671 INHERIT (to_get_trace_status, t);
672 INHERIT (to_get_tracepoint_status, t);
673 INHERIT (to_trace_stop, t);
674 INHERIT (to_trace_find, t);
675 INHERIT (to_get_trace_state_variable_value, t);
676 INHERIT (to_save_trace_data, t);
677 INHERIT (to_upload_tracepoints, t);
678 INHERIT (to_upload_trace_state_variables, t);
679 INHERIT (to_get_raw_trace_data, t);
680 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
681 INHERIT (to_set_disconnected_tracing, t);
682 INHERIT (to_set_circular_trace_buffer, t);
683 INHERIT (to_set_trace_buffer_size, t);
684 INHERIT (to_set_trace_notes, t);
685 INHERIT (to_get_tib_address, t);
686 INHERIT (to_set_permissions, t);
687 INHERIT (to_static_tracepoint_marker_at, t);
688 INHERIT (to_static_tracepoint_markers_by_strid, t);
689 INHERIT (to_traceframe_info, t);
690 INHERIT (to_use_agent, t);
691 INHERIT (to_can_use_agent, t);
692 INHERIT (to_augmented_libraries_svr4_read, t);
693 INHERIT (to_magic, t);
694 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
695 INHERIT (to_can_run_breakpoint_commands, t);
696 /* Do not inherit to_memory_map. */
697 /* Do not inherit to_flash_erase. */
698 /* Do not inherit to_flash_done. */
699 }
700 #undef INHERIT
701
702 /* Clean up a target struct so it no longer has any zero pointers in
703 it. Some entries are defaulted to a method that print an error,
704 others are hard-wired to a standard recursive default. */
705
706 #define de_fault(field, value) \
707 if (!current_target.field) \
708 current_target.field = value
709
710 de_fault (to_open,
711 (void (*) (char *, int))
712 tcomplain);
713 de_fault (to_close,
714 (void (*) (void))
715 target_ignore);
716 de_fault (to_post_attach,
717 (void (*) (int))
718 target_ignore);
719 de_fault (to_prepare_to_store,
720 (void (*) (struct target_ops *, struct regcache *))
721 noprocess);
722 de_fault (deprecated_xfer_memory,
723 (int (*) (CORE_ADDR, gdb_byte *, int, int,
724 struct mem_attrib *, struct target_ops *))
725 nomemory);
726 de_fault (to_files_info,
727 (void (*) (struct target_ops *))
728 target_ignore);
729 de_fault (to_can_use_hw_breakpoint,
730 (int (*) (int, int, int))
731 return_zero);
732 de_fault (to_insert_hw_breakpoint,
733 (int (*) (struct gdbarch *, struct bp_target_info *))
734 return_minus_one);
735 de_fault (to_remove_hw_breakpoint,
736 (int (*) (struct gdbarch *, struct bp_target_info *))
737 return_minus_one);
738 de_fault (to_insert_watchpoint,
739 (int (*) (CORE_ADDR, int, int, struct expression *))
740 return_minus_one);
741 de_fault (to_remove_watchpoint,
742 (int (*) (CORE_ADDR, int, int, struct expression *))
743 return_minus_one);
744 de_fault (to_stopped_by_watchpoint,
745 (int (*) (void))
746 return_zero);
747 de_fault (to_stopped_data_address,
748 (int (*) (struct target_ops *, CORE_ADDR *))
749 return_zero);
750 de_fault (to_watchpoint_addr_within_range,
751 default_watchpoint_addr_within_range);
752 de_fault (to_region_ok_for_hw_watchpoint,
753 default_region_ok_for_hw_watchpoint);
754 de_fault (to_can_accel_watchpoint_condition,
755 (int (*) (CORE_ADDR, int, int, struct expression *))
756 return_zero);
757 de_fault (to_terminal_init,
758 (void (*) (void))
759 target_ignore);
760 de_fault (to_terminal_inferior,
761 (void (*) (void))
762 target_ignore);
763 de_fault (to_terminal_ours_for_output,
764 (void (*) (void))
765 target_ignore);
766 de_fault (to_terminal_ours,
767 (void (*) (void))
768 target_ignore);
769 de_fault (to_terminal_save_ours,
770 (void (*) (void))
771 target_ignore);
772 de_fault (to_terminal_info,
773 default_terminal_info);
774 de_fault (to_load,
775 (void (*) (char *, int))
776 tcomplain);
777 de_fault (to_post_startup_inferior,
778 (void (*) (ptid_t))
779 target_ignore);
780 de_fault (to_insert_fork_catchpoint,
781 (int (*) (int))
782 return_one);
783 de_fault (to_remove_fork_catchpoint,
784 (int (*) (int))
785 return_one);
786 de_fault (to_insert_vfork_catchpoint,
787 (int (*) (int))
788 return_one);
789 de_fault (to_remove_vfork_catchpoint,
790 (int (*) (int))
791 return_one);
792 de_fault (to_insert_exec_catchpoint,
793 (int (*) (int))
794 return_one);
795 de_fault (to_remove_exec_catchpoint,
796 (int (*) (int))
797 return_one);
798 de_fault (to_set_syscall_catchpoint,
799 (int (*) (int, int, int, int, int *))
800 return_one);
801 de_fault (to_has_exited,
802 (int (*) (int, int, int *))
803 return_zero);
804 de_fault (to_can_run,
805 return_zero);
806 de_fault (to_extra_thread_info,
807 (char *(*) (struct thread_info *))
808 return_null);
809 de_fault (to_thread_name,
810 (char *(*) (struct thread_info *))
811 return_null);
812 de_fault (to_stop,
813 (void (*) (ptid_t))
814 target_ignore);
815 current_target.to_xfer_partial = current_xfer_partial;
816 de_fault (to_rcmd,
817 (void (*) (char *, struct ui_file *))
818 tcomplain);
819 de_fault (to_pid_to_exec_file,
820 (char *(*) (int))
821 return_null);
822 de_fault (to_async,
823 (void (*) (void (*) (enum inferior_event_type, void*), void*))
824 tcomplain);
825 de_fault (to_thread_architecture,
826 default_thread_architecture);
827 current_target.to_read_description = NULL;
828 de_fault (to_get_ada_task_ptid,
829 (ptid_t (*) (long, long))
830 default_get_ada_task_ptid);
831 de_fault (to_supports_multi_process,
832 (int (*) (void))
833 return_zero);
834 de_fault (to_supports_enable_disable_tracepoint,
835 (int (*) (void))
836 return_zero);
837 de_fault (to_supports_string_tracing,
838 (int (*) (void))
839 return_zero);
840 de_fault (to_trace_init,
841 (void (*) (void))
842 tcomplain);
843 de_fault (to_download_tracepoint,
844 (void (*) (struct bp_location *))
845 tcomplain);
846 de_fault (to_can_download_tracepoint,
847 (int (*) (void))
848 return_zero);
849 de_fault (to_download_trace_state_variable,
850 (void (*) (struct trace_state_variable *))
851 tcomplain);
852 de_fault (to_enable_tracepoint,
853 (void (*) (struct bp_location *))
854 tcomplain);
855 de_fault (to_disable_tracepoint,
856 (void (*) (struct bp_location *))
857 tcomplain);
858 de_fault (to_trace_set_readonly_regions,
859 (void (*) (void))
860 tcomplain);
861 de_fault (to_trace_start,
862 (void (*) (void))
863 tcomplain);
864 de_fault (to_get_trace_status,
865 (int (*) (struct trace_status *))
866 return_minus_one);
867 de_fault (to_get_tracepoint_status,
868 (void (*) (struct breakpoint *, struct uploaded_tp *))
869 tcomplain);
870 de_fault (to_trace_stop,
871 (void (*) (void))
872 tcomplain);
873 de_fault (to_trace_find,
874 (int (*) (enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
875 return_minus_one);
876 de_fault (to_get_trace_state_variable_value,
877 (int (*) (int, LONGEST *))
878 return_zero);
879 de_fault (to_save_trace_data,
880 (int (*) (const char *))
881 tcomplain);
882 de_fault (to_upload_tracepoints,
883 (int (*) (struct uploaded_tp **))
884 return_zero);
885 de_fault (to_upload_trace_state_variables,
886 (int (*) (struct uploaded_tsv **))
887 return_zero);
888 de_fault (to_get_raw_trace_data,
889 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
890 tcomplain);
891 de_fault (to_get_min_fast_tracepoint_insn_len,
892 (int (*) (void))
893 return_minus_one);
894 de_fault (to_set_disconnected_tracing,
895 (void (*) (int))
896 target_ignore);
897 de_fault (to_set_circular_trace_buffer,
898 (void (*) (int))
899 target_ignore);
900 de_fault (to_set_trace_buffer_size,
901 (void (*) (LONGEST))
902 target_ignore);
903 de_fault (to_set_trace_notes,
904 (int (*) (const char *, const char *, const char *))
905 return_zero);
906 de_fault (to_get_tib_address,
907 (int (*) (ptid_t, CORE_ADDR *))
908 tcomplain);
909 de_fault (to_set_permissions,
910 (void (*) (void))
911 target_ignore);
912 de_fault (to_static_tracepoint_marker_at,
913 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
914 return_zero);
915 de_fault (to_static_tracepoint_markers_by_strid,
916 (VEC(static_tracepoint_marker_p) * (*) (const char *))
917 tcomplain);
918 de_fault (to_traceframe_info,
919 (struct traceframe_info * (*) (void))
920 return_null);
921 de_fault (to_supports_evaluation_of_breakpoint_conditions,
922 (int (*) (void))
923 return_zero);
924 de_fault (to_can_run_breakpoint_commands,
925 (int (*) (void))
926 return_zero);
927 de_fault (to_use_agent,
928 (int (*) (int))
929 tcomplain);
930 de_fault (to_can_use_agent,
931 (int (*) (void))
932 return_zero);
933 de_fault (to_augmented_libraries_svr4_read,
934 (int (*) (void))
935 return_zero);
936 de_fault (to_execution_direction, default_execution_direction);
937
938 #undef de_fault
939
940 /* Finally, position the target-stack beneath the squashed
941 "current_target". That way code looking for a non-inherited
942 target method can quickly and simply find it. */
943 current_target.beneath = target_stack;
944
945 if (targetdebug)
946 setup_target_debug ();
947 }
948
949 /* Push a new target type into the stack of the existing target accessors,
950 possibly superseding some of the existing accessors.
951
952 Rather than allow an empty stack, we always have the dummy target at
953 the bottom stratum, so we can call the function vectors without
954 checking them. */
955
956 void
957 push_target (struct target_ops *t)
958 {
959 struct target_ops **cur;
960
961 /* Check magic number. If wrong, it probably means someone changed
962 the struct definition, but not all the places that initialize one. */
963 if (t->to_magic != OPS_MAGIC)
964 {
965 fprintf_unfiltered (gdb_stderr,
966 "Magic number of %s target struct wrong\n",
967 t->to_shortname);
968 internal_error (__FILE__, __LINE__,
969 _("failed internal consistency check"));
970 }
971
972 /* Find the proper stratum to install this target in. */
973 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
974 {
975 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
976 break;
977 }
978
979 /* If there's already targets at this stratum, remove them. */
980 /* FIXME: cagney/2003-10-15: I think this should be popping all
981 targets to CUR, and not just those at this stratum level. */
982 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
983 {
984 /* There's already something at this stratum level. Close it,
985 and un-hook it from the stack. */
986 struct target_ops *tmp = (*cur);
987
988 (*cur) = (*cur)->beneath;
989 tmp->beneath = NULL;
990 target_close (tmp);
991 }
992
993 /* We have removed all targets in our stratum, now add the new one. */
994 t->beneath = (*cur);
995 (*cur) = t;
996
997 update_current_target ();
998 }
999
1000 /* Remove a target_ops vector from the stack, wherever it may be.
1001 Return how many times it was removed (0 or 1). */
1002
1003 int
1004 unpush_target (struct target_ops *t)
1005 {
1006 struct target_ops **cur;
1007 struct target_ops *tmp;
1008
1009 if (t->to_stratum == dummy_stratum)
1010 internal_error (__FILE__, __LINE__,
1011 _("Attempt to unpush the dummy target"));
1012
1013 /* Look for the specified target. Note that we assume that a target
1014 can only occur once in the target stack. */
1015
1016 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1017 {
1018 if ((*cur) == t)
1019 break;
1020 }
1021
1022 /* If we don't find target_ops, quit. Only open targets should be
1023 closed. */
1024 if ((*cur) == NULL)
1025 return 0;
1026
1027 /* Unchain the target. */
1028 tmp = (*cur);
1029 (*cur) = (*cur)->beneath;
1030 tmp->beneath = NULL;
1031
1032 update_current_target ();
1033
1034 /* Finally close the target. Note we do this after unchaining, so
1035 any target method calls from within the target_close
1036 implementation don't end up in T anymore. */
1037 target_close (t);
1038
1039 return 1;
1040 }
1041
1042 void
1043 pop_all_targets_above (enum strata above_stratum)
1044 {
1045 while ((int) (current_target.to_stratum) > (int) above_stratum)
1046 {
1047 if (!unpush_target (target_stack))
1048 {
1049 fprintf_unfiltered (gdb_stderr,
1050 "pop_all_targets couldn't find target %s\n",
1051 target_stack->to_shortname);
1052 internal_error (__FILE__, __LINE__,
1053 _("failed internal consistency check"));
1054 break;
1055 }
1056 }
1057 }
1058
1059 void
1060 pop_all_targets (void)
1061 {
1062 pop_all_targets_above (dummy_stratum);
1063 }
1064
1065 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1066
1067 int
1068 target_is_pushed (struct target_ops *t)
1069 {
1070 struct target_ops **cur;
1071
1072 /* Check magic number. If wrong, it probably means someone changed
1073 the struct definition, but not all the places that initialize one. */
1074 if (t->to_magic != OPS_MAGIC)
1075 {
1076 fprintf_unfiltered (gdb_stderr,
1077 "Magic number of %s target struct wrong\n",
1078 t->to_shortname);
1079 internal_error (__FILE__, __LINE__,
1080 _("failed internal consistency check"));
1081 }
1082
1083 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1084 if (*cur == t)
1085 return 1;
1086
1087 return 0;
1088 }
1089
1090 /* Using the objfile specified in OBJFILE, find the address for the
1091 current thread's thread-local storage with offset OFFSET. */
1092 CORE_ADDR
1093 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1094 {
1095 volatile CORE_ADDR addr = 0;
1096 struct target_ops *target;
1097
1098 for (target = current_target.beneath;
1099 target != NULL;
1100 target = target->beneath)
1101 {
1102 if (target->to_get_thread_local_address != NULL)
1103 break;
1104 }
1105
1106 if (target != NULL
1107 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1108 {
1109 ptid_t ptid = inferior_ptid;
1110 volatile struct gdb_exception ex;
1111
1112 TRY_CATCH (ex, RETURN_MASK_ALL)
1113 {
1114 CORE_ADDR lm_addr;
1115
1116 /* Fetch the load module address for this objfile. */
1117 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1118 objfile);
1119 /* If it's 0, throw the appropriate exception. */
1120 if (lm_addr == 0)
1121 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1122 _("TLS load module not found"));
1123
1124 addr = target->to_get_thread_local_address (target, ptid,
1125 lm_addr, offset);
1126 }
1127 /* If an error occurred, print TLS related messages here. Otherwise,
1128 throw the error to some higher catcher. */
1129 if (ex.reason < 0)
1130 {
1131 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1132
1133 switch (ex.error)
1134 {
1135 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1136 error (_("Cannot find thread-local variables "
1137 "in this thread library."));
1138 break;
1139 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1140 if (objfile_is_library)
1141 error (_("Cannot find shared library `%s' in dynamic"
1142 " linker's load module list"), objfile_name (objfile));
1143 else
1144 error (_("Cannot find executable file `%s' in dynamic"
1145 " linker's load module list"), objfile_name (objfile));
1146 break;
1147 case TLS_NOT_ALLOCATED_YET_ERROR:
1148 if (objfile_is_library)
1149 error (_("The inferior has not yet allocated storage for"
1150 " thread-local variables in\n"
1151 "the shared library `%s'\n"
1152 "for %s"),
1153 objfile_name (objfile), target_pid_to_str (ptid));
1154 else
1155 error (_("The inferior has not yet allocated storage for"
1156 " thread-local variables in\n"
1157 "the executable `%s'\n"
1158 "for %s"),
1159 objfile_name (objfile), target_pid_to_str (ptid));
1160 break;
1161 case TLS_GENERIC_ERROR:
1162 if (objfile_is_library)
1163 error (_("Cannot find thread-local storage for %s, "
1164 "shared library %s:\n%s"),
1165 target_pid_to_str (ptid),
1166 objfile_name (objfile), ex.message);
1167 else
1168 error (_("Cannot find thread-local storage for %s, "
1169 "executable file %s:\n%s"),
1170 target_pid_to_str (ptid),
1171 objfile_name (objfile), ex.message);
1172 break;
1173 default:
1174 throw_exception (ex);
1175 break;
1176 }
1177 }
1178 }
1179 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1180 TLS is an ABI-specific thing. But we don't do that yet. */
1181 else
1182 error (_("Cannot find thread-local variables on this target"));
1183
1184 return addr;
1185 }
1186
1187 const char *
1188 target_xfer_error_to_string (enum target_xfer_error err)
1189 {
1190 #define CASE(X) case X: return #X
1191 switch (err)
1192 {
1193 CASE(TARGET_XFER_E_IO);
1194 CASE(TARGET_XFER_E_UNAVAILABLE);
1195 default:
1196 return "<unknown>";
1197 }
1198 #undef CASE
1199 };
1200
1201
1202 #undef MIN
1203 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1204
1205 /* target_read_string -- read a null terminated string, up to LEN bytes,
1206 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1207 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1208 is responsible for freeing it. Return the number of bytes successfully
1209 read. */
1210
1211 int
1212 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1213 {
1214 int tlen, offset, i;
1215 gdb_byte buf[4];
1216 int errcode = 0;
1217 char *buffer;
1218 int buffer_allocated;
1219 char *bufptr;
1220 unsigned int nbytes_read = 0;
1221
1222 gdb_assert (string);
1223
1224 /* Small for testing. */
1225 buffer_allocated = 4;
1226 buffer = xmalloc (buffer_allocated);
1227 bufptr = buffer;
1228
1229 while (len > 0)
1230 {
1231 tlen = MIN (len, 4 - (memaddr & 3));
1232 offset = memaddr & 3;
1233
1234 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1235 if (errcode != 0)
1236 {
1237 /* The transfer request might have crossed the boundary to an
1238 unallocated region of memory. Retry the transfer, requesting
1239 a single byte. */
1240 tlen = 1;
1241 offset = 0;
1242 errcode = target_read_memory (memaddr, buf, 1);
1243 if (errcode != 0)
1244 goto done;
1245 }
1246
1247 if (bufptr - buffer + tlen > buffer_allocated)
1248 {
1249 unsigned int bytes;
1250
1251 bytes = bufptr - buffer;
1252 buffer_allocated *= 2;
1253 buffer = xrealloc (buffer, buffer_allocated);
1254 bufptr = buffer + bytes;
1255 }
1256
1257 for (i = 0; i < tlen; i++)
1258 {
1259 *bufptr++ = buf[i + offset];
1260 if (buf[i + offset] == '\000')
1261 {
1262 nbytes_read += i + 1;
1263 goto done;
1264 }
1265 }
1266
1267 memaddr += tlen;
1268 len -= tlen;
1269 nbytes_read += tlen;
1270 }
1271 done:
1272 *string = buffer;
1273 if (errnop != NULL)
1274 *errnop = errcode;
1275 return nbytes_read;
1276 }
1277
1278 struct target_section_table *
1279 target_get_section_table (struct target_ops *target)
1280 {
1281 struct target_ops *t;
1282
1283 if (targetdebug)
1284 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1285
1286 for (t = target; t != NULL; t = t->beneath)
1287 if (t->to_get_section_table != NULL)
1288 return (*t->to_get_section_table) (t);
1289
1290 return NULL;
1291 }
1292
1293 /* Find a section containing ADDR. */
1294
1295 struct target_section *
1296 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1297 {
1298 struct target_section_table *table = target_get_section_table (target);
1299 struct target_section *secp;
1300
1301 if (table == NULL)
1302 return NULL;
1303
1304 for (secp = table->sections; secp < table->sections_end; secp++)
1305 {
1306 if (addr >= secp->addr && addr < secp->endaddr)
1307 return secp;
1308 }
1309 return NULL;
1310 }
1311
1312 /* Read memory from the live target, even if currently inspecting a
1313 traceframe. The return is the same as that of target_read. */
1314
1315 static LONGEST
1316 target_read_live_memory (enum target_object object,
1317 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len)
1318 {
1319 LONGEST ret;
1320 struct cleanup *cleanup;
1321
1322 /* Switch momentarily out of tfind mode so to access live memory.
1323 Note that this must not clear global state, such as the frame
1324 cache, which must still remain valid for the previous traceframe.
1325 We may be _building_ the frame cache at this point. */
1326 cleanup = make_cleanup_restore_traceframe_number ();
1327 set_traceframe_number (-1);
1328
1329 ret = target_read (current_target.beneath, object, NULL,
1330 myaddr, memaddr, len);
1331
1332 do_cleanups (cleanup);
1333 return ret;
1334 }
1335
1336 /* Using the set of read-only target sections of OPS, read live
1337 read-only memory. Note that the actual reads start from the
1338 top-most target again.
1339
1340 For interface/parameters/return description see target.h,
1341 to_xfer_partial. */
1342
1343 static LONGEST
1344 memory_xfer_live_readonly_partial (struct target_ops *ops,
1345 enum target_object object,
1346 gdb_byte *readbuf, ULONGEST memaddr,
1347 ULONGEST len)
1348 {
1349 struct target_section *secp;
1350 struct target_section_table *table;
1351
1352 secp = target_section_by_addr (ops, memaddr);
1353 if (secp != NULL
1354 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1355 secp->the_bfd_section)
1356 & SEC_READONLY))
1357 {
1358 struct target_section *p;
1359 ULONGEST memend = memaddr + len;
1360
1361 table = target_get_section_table (ops);
1362
1363 for (p = table->sections; p < table->sections_end; p++)
1364 {
1365 if (memaddr >= p->addr)
1366 {
1367 if (memend <= p->endaddr)
1368 {
1369 /* Entire transfer is within this section. */
1370 return target_read_live_memory (object, memaddr,
1371 readbuf, len);
1372 }
1373 else if (memaddr >= p->endaddr)
1374 {
1375 /* This section ends before the transfer starts. */
1376 continue;
1377 }
1378 else
1379 {
1380 /* This section overlaps the transfer. Just do half. */
1381 len = p->endaddr - memaddr;
1382 return target_read_live_memory (object, memaddr,
1383 readbuf, len);
1384 }
1385 }
1386 }
1387 }
1388
1389 return 0;
1390 }
1391
1392 /* Read memory from more than one valid target. A core file, for
1393 instance, could have some of memory but delegate other bits to
1394 the target below it. So, we must manually try all targets. */
1395
1396 static LONGEST
1397 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1398 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len)
1399 {
1400 LONGEST res;
1401
1402 do
1403 {
1404 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1405 readbuf, writebuf, memaddr, len);
1406 if (res > 0)
1407 break;
1408
1409 /* Stop if the target reports that the memory is not available. */
1410 if (res == TARGET_XFER_E_UNAVAILABLE)
1411 break;
1412
1413 /* We want to continue past core files to executables, but not
1414 past a running target's memory. */
1415 if (ops->to_has_all_memory (ops))
1416 break;
1417
1418 ops = ops->beneath;
1419 }
1420 while (ops != NULL);
1421
1422 return res;
1423 }
1424
1425 /* Perform a partial memory transfer.
1426 For docs see target.h, to_xfer_partial. */
1427
1428 static LONGEST
1429 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1430 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1431 ULONGEST len)
1432 {
1433 LONGEST res;
1434 int reg_len;
1435 struct mem_region *region;
1436 struct inferior *inf;
1437
1438 /* For accesses to unmapped overlay sections, read directly from
1439 files. Must do this first, as MEMADDR may need adjustment. */
1440 if (readbuf != NULL && overlay_debugging)
1441 {
1442 struct obj_section *section = find_pc_overlay (memaddr);
1443
1444 if (pc_in_unmapped_range (memaddr, section))
1445 {
1446 struct target_section_table *table
1447 = target_get_section_table (ops);
1448 const char *section_name = section->the_bfd_section->name;
1449
1450 memaddr = overlay_mapped_address (memaddr, section);
1451 return section_table_xfer_memory_partial (readbuf, writebuf,
1452 memaddr, len,
1453 table->sections,
1454 table->sections_end,
1455 section_name);
1456 }
1457 }
1458
1459 /* Try the executable files, if "trust-readonly-sections" is set. */
1460 if (readbuf != NULL && trust_readonly)
1461 {
1462 struct target_section *secp;
1463 struct target_section_table *table;
1464
1465 secp = target_section_by_addr (ops, memaddr);
1466 if (secp != NULL
1467 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1468 secp->the_bfd_section)
1469 & SEC_READONLY))
1470 {
1471 table = target_get_section_table (ops);
1472 return section_table_xfer_memory_partial (readbuf, writebuf,
1473 memaddr, len,
1474 table->sections,
1475 table->sections_end,
1476 NULL);
1477 }
1478 }
1479
1480 /* If reading unavailable memory in the context of traceframes, and
1481 this address falls within a read-only section, fallback to
1482 reading from live memory. */
1483 if (readbuf != NULL && get_traceframe_number () != -1)
1484 {
1485 VEC(mem_range_s) *available;
1486
1487 /* If we fail to get the set of available memory, then the
1488 target does not support querying traceframe info, and so we
1489 attempt reading from the traceframe anyway (assuming the
1490 target implements the old QTro packet then). */
1491 if (traceframe_available_memory (&available, memaddr, len))
1492 {
1493 struct cleanup *old_chain;
1494
1495 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1496
1497 if (VEC_empty (mem_range_s, available)
1498 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1499 {
1500 /* Don't read into the traceframe's available
1501 memory. */
1502 if (!VEC_empty (mem_range_s, available))
1503 {
1504 LONGEST oldlen = len;
1505
1506 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1507 gdb_assert (len <= oldlen);
1508 }
1509
1510 do_cleanups (old_chain);
1511
1512 /* This goes through the topmost target again. */
1513 res = memory_xfer_live_readonly_partial (ops, object,
1514 readbuf, memaddr, len);
1515 if (res > 0)
1516 return res;
1517
1518 /* No use trying further, we know some memory starting
1519 at MEMADDR isn't available. */
1520 return TARGET_XFER_E_UNAVAILABLE;
1521 }
1522
1523 /* Don't try to read more than how much is available, in
1524 case the target implements the deprecated QTro packet to
1525 cater for older GDBs (the target's knowledge of read-only
1526 sections may be outdated by now). */
1527 len = VEC_index (mem_range_s, available, 0)->length;
1528
1529 do_cleanups (old_chain);
1530 }
1531 }
1532
1533 /* Try GDB's internal data cache. */
1534 region = lookup_mem_region (memaddr);
1535 /* region->hi == 0 means there's no upper bound. */
1536 if (memaddr + len < region->hi || region->hi == 0)
1537 reg_len = len;
1538 else
1539 reg_len = region->hi - memaddr;
1540
1541 switch (region->attrib.mode)
1542 {
1543 case MEM_RO:
1544 if (writebuf != NULL)
1545 return TARGET_XFER_E_IO;
1546 break;
1547
1548 case MEM_WO:
1549 if (readbuf != NULL)
1550 return TARGET_XFER_E_IO;
1551 break;
1552
1553 case MEM_FLASH:
1554 /* We only support writing to flash during "load" for now. */
1555 if (writebuf != NULL)
1556 error (_("Writing to flash memory forbidden in this context"));
1557 break;
1558
1559 case MEM_NONE:
1560 return TARGET_XFER_E_IO;
1561 }
1562
1563 if (!ptid_equal (inferior_ptid, null_ptid))
1564 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1565 else
1566 inf = NULL;
1567
1568 if (inf != NULL
1569 /* The dcache reads whole cache lines; that doesn't play well
1570 with reading from a trace buffer, because reading outside of
1571 the collected memory range fails. */
1572 && get_traceframe_number () == -1
1573 && (region->attrib.cache
1574 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1575 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1576 {
1577 DCACHE *dcache = target_dcache_get_or_init ();
1578
1579 if (readbuf != NULL)
1580 res = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1581 else
1582 /* FIXME drow/2006-08-09: If we're going to preserve const
1583 correctness dcache_xfer_memory should take readbuf and
1584 writebuf. */
1585 res = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1586 reg_len, 1);
1587 if (res <= 0)
1588 return -1;
1589 else
1590 return res;
1591 }
1592
1593 /* If none of those methods found the memory we wanted, fall back
1594 to a target partial transfer. Normally a single call to
1595 to_xfer_partial is enough; if it doesn't recognize an object
1596 it will call the to_xfer_partial of the next target down.
1597 But for memory this won't do. Memory is the only target
1598 object which can be read from more than one valid target. */
1599 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len);
1600
1601 /* Make sure the cache gets updated no matter what - if we are writing
1602 to the stack. Even if this write is not tagged as such, we still need
1603 to update the cache. */
1604
1605 if (res > 0
1606 && inf != NULL
1607 && writebuf != NULL
1608 && target_dcache_init_p ()
1609 && !region->attrib.cache
1610 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1611 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1612 {
1613 DCACHE *dcache = target_dcache_get ();
1614
1615 dcache_update (dcache, memaddr, (void *) writebuf, res);
1616 }
1617
1618 /* If we still haven't got anything, return the last error. We
1619 give up. */
1620 return res;
1621 }
1622
1623 /* Perform a partial memory transfer. For docs see target.h,
1624 to_xfer_partial. */
1625
1626 static LONGEST
1627 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1628 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1629 ULONGEST len)
1630 {
1631 int res;
1632
1633 /* Zero length requests are ok and require no work. */
1634 if (len == 0)
1635 return 0;
1636
1637 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1638 breakpoint insns, thus hiding out from higher layers whether
1639 there are software breakpoints inserted in the code stream. */
1640 if (readbuf != NULL)
1641 {
1642 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len);
1643
1644 if (res > 0 && !show_memory_breakpoints)
1645 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1646 }
1647 else
1648 {
1649 void *buf;
1650 struct cleanup *old_chain;
1651
1652 /* A large write request is likely to be partially satisfied
1653 by memory_xfer_partial_1. We will continually malloc
1654 and free a copy of the entire write request for breakpoint
1655 shadow handling even though we only end up writing a small
1656 subset of it. Cap writes to 4KB to mitigate this. */
1657 len = min (4096, len);
1658
1659 buf = xmalloc (len);
1660 old_chain = make_cleanup (xfree, buf);
1661 memcpy (buf, writebuf, len);
1662
1663 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1664 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len);
1665
1666 do_cleanups (old_chain);
1667 }
1668
1669 return res;
1670 }
1671
1672 static void
1673 restore_show_memory_breakpoints (void *arg)
1674 {
1675 show_memory_breakpoints = (uintptr_t) arg;
1676 }
1677
1678 struct cleanup *
1679 make_show_memory_breakpoints_cleanup (int show)
1680 {
1681 int current = show_memory_breakpoints;
1682
1683 show_memory_breakpoints = show;
1684 return make_cleanup (restore_show_memory_breakpoints,
1685 (void *) (uintptr_t) current);
1686 }
1687
1688 /* For docs see target.h, to_xfer_partial. */
1689
1690 LONGEST
1691 target_xfer_partial (struct target_ops *ops,
1692 enum target_object object, const char *annex,
1693 gdb_byte *readbuf, const gdb_byte *writebuf,
1694 ULONGEST offset, ULONGEST len)
1695 {
1696 LONGEST retval;
1697
1698 gdb_assert (ops->to_xfer_partial != NULL);
1699
1700 /* Transfer is done when LEN is zero. */
1701 if (len == 0)
1702 return 0;
1703
1704 if (writebuf && !may_write_memory)
1705 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1706 core_addr_to_string_nz (offset), plongest (len));
1707
1708 /* If this is a memory transfer, let the memory-specific code
1709 have a look at it instead. Memory transfers are more
1710 complicated. */
1711 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1712 || object == TARGET_OBJECT_CODE_MEMORY)
1713 retval = memory_xfer_partial (ops, object, readbuf,
1714 writebuf, offset, len);
1715 else if (object == TARGET_OBJECT_RAW_MEMORY)
1716 {
1717 /* Request the normal memory object from other layers. */
1718 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len);
1719 }
1720 else
1721 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1722 writebuf, offset, len);
1723
1724 if (targetdebug)
1725 {
1726 const unsigned char *myaddr = NULL;
1727
1728 fprintf_unfiltered (gdb_stdlog,
1729 "%s:target_xfer_partial "
1730 "(%d, %s, %s, %s, %s, %s) = %s",
1731 ops->to_shortname,
1732 (int) object,
1733 (annex ? annex : "(null)"),
1734 host_address_to_string (readbuf),
1735 host_address_to_string (writebuf),
1736 core_addr_to_string_nz (offset),
1737 pulongest (len), plongest (retval));
1738
1739 if (readbuf)
1740 myaddr = readbuf;
1741 if (writebuf)
1742 myaddr = writebuf;
1743 if (retval > 0 && myaddr != NULL)
1744 {
1745 int i;
1746
1747 fputs_unfiltered (", bytes =", gdb_stdlog);
1748 for (i = 0; i < retval; i++)
1749 {
1750 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1751 {
1752 if (targetdebug < 2 && i > 0)
1753 {
1754 fprintf_unfiltered (gdb_stdlog, " ...");
1755 break;
1756 }
1757 fprintf_unfiltered (gdb_stdlog, "\n");
1758 }
1759
1760 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1761 }
1762 }
1763
1764 fputc_unfiltered ('\n', gdb_stdlog);
1765 }
1766 return retval;
1767 }
1768
1769 /* Read LEN bytes of target memory at address MEMADDR, placing the
1770 results in GDB's memory at MYADDR. Returns either 0 for success or
1771 a target_xfer_error value if any error occurs.
1772
1773 If an error occurs, no guarantee is made about the contents of the data at
1774 MYADDR. In particular, the caller should not depend upon partial reads
1775 filling the buffer with good data. There is no way for the caller to know
1776 how much good data might have been transfered anyway. Callers that can
1777 deal with partial reads should call target_read (which will retry until
1778 it makes no progress, and then return how much was transferred). */
1779
1780 int
1781 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1782 {
1783 /* Dispatch to the topmost target, not the flattened current_target.
1784 Memory accesses check target->to_has_(all_)memory, and the
1785 flattened target doesn't inherit those. */
1786 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1787 myaddr, memaddr, len) == len)
1788 return 0;
1789 else
1790 return TARGET_XFER_E_IO;
1791 }
1792
1793 /* Like target_read_memory, but specify explicitly that this is a read
1794 from the target's raw memory. That is, this read bypasses the
1795 dcache, breakpoint shadowing, etc. */
1796
1797 int
1798 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1799 {
1800 /* See comment in target_read_memory about why the request starts at
1801 current_target.beneath. */
1802 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1803 myaddr, memaddr, len) == len)
1804 return 0;
1805 else
1806 return TARGET_XFER_E_IO;
1807 }
1808
1809 /* Like target_read_memory, but specify explicitly that this is a read from
1810 the target's stack. This may trigger different cache behavior. */
1811
1812 int
1813 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1814 {
1815 /* See comment in target_read_memory about why the request starts at
1816 current_target.beneath. */
1817 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1818 myaddr, memaddr, len) == len)
1819 return 0;
1820 else
1821 return TARGET_XFER_E_IO;
1822 }
1823
1824 /* Like target_read_memory, but specify explicitly that this is a read from
1825 the target's code. This may trigger different cache behavior. */
1826
1827 int
1828 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1829 {
1830 /* See comment in target_read_memory about why the request starts at
1831 current_target.beneath. */
1832 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1833 myaddr, memaddr, len) == len)
1834 return 0;
1835 else
1836 return TARGET_XFER_E_IO;
1837 }
1838
1839 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1840 Returns either 0 for success or a target_xfer_error value if any
1841 error occurs. If an error occurs, no guarantee is made about how
1842 much data got written. Callers that can deal with partial writes
1843 should call target_write. */
1844
1845 int
1846 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1847 {
1848 /* See comment in target_read_memory about why the request starts at
1849 current_target.beneath. */
1850 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1851 myaddr, memaddr, len) == len)
1852 return 0;
1853 else
1854 return TARGET_XFER_E_IO;
1855 }
1856
1857 /* Write LEN bytes from MYADDR to target raw memory at address
1858 MEMADDR. Returns either 0 for success or a target_xfer_error value
1859 if any error occurs. If an error occurs, no guarantee is made
1860 about how much data got written. Callers that can deal with
1861 partial writes should call target_write. */
1862
1863 int
1864 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1865 {
1866 /* See comment in target_read_memory about why the request starts at
1867 current_target.beneath. */
1868 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1869 myaddr, memaddr, len) == len)
1870 return 0;
1871 else
1872 return TARGET_XFER_E_IO;
1873 }
1874
1875 /* Fetch the target's memory map. */
1876
1877 VEC(mem_region_s) *
1878 target_memory_map (void)
1879 {
1880 VEC(mem_region_s) *result;
1881 struct mem_region *last_one, *this_one;
1882 int ix;
1883 struct target_ops *t;
1884
1885 if (targetdebug)
1886 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1887
1888 for (t = current_target.beneath; t != NULL; t = t->beneath)
1889 if (t->to_memory_map != NULL)
1890 break;
1891
1892 if (t == NULL)
1893 return NULL;
1894
1895 result = t->to_memory_map (t);
1896 if (result == NULL)
1897 return NULL;
1898
1899 qsort (VEC_address (mem_region_s, result),
1900 VEC_length (mem_region_s, result),
1901 sizeof (struct mem_region), mem_region_cmp);
1902
1903 /* Check that regions do not overlap. Simultaneously assign
1904 a numbering for the "mem" commands to use to refer to
1905 each region. */
1906 last_one = NULL;
1907 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1908 {
1909 this_one->number = ix;
1910
1911 if (last_one && last_one->hi > this_one->lo)
1912 {
1913 warning (_("Overlapping regions in memory map: ignoring"));
1914 VEC_free (mem_region_s, result);
1915 return NULL;
1916 }
1917 last_one = this_one;
1918 }
1919
1920 return result;
1921 }
1922
1923 void
1924 target_flash_erase (ULONGEST address, LONGEST length)
1925 {
1926 struct target_ops *t;
1927
1928 for (t = current_target.beneath; t != NULL; t = t->beneath)
1929 if (t->to_flash_erase != NULL)
1930 {
1931 if (targetdebug)
1932 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1933 hex_string (address), phex (length, 0));
1934 t->to_flash_erase (t, address, length);
1935 return;
1936 }
1937
1938 tcomplain ();
1939 }
1940
1941 void
1942 target_flash_done (void)
1943 {
1944 struct target_ops *t;
1945
1946 for (t = current_target.beneath; t != NULL; t = t->beneath)
1947 if (t->to_flash_done != NULL)
1948 {
1949 if (targetdebug)
1950 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1951 t->to_flash_done (t);
1952 return;
1953 }
1954
1955 tcomplain ();
1956 }
1957
1958 static void
1959 show_trust_readonly (struct ui_file *file, int from_tty,
1960 struct cmd_list_element *c, const char *value)
1961 {
1962 fprintf_filtered (file,
1963 _("Mode for reading from readonly sections is %s.\n"),
1964 value);
1965 }
1966
1967 /* More generic transfers. */
1968
1969 static LONGEST
1970 default_xfer_partial (struct target_ops *ops, enum target_object object,
1971 const char *annex, gdb_byte *readbuf,
1972 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len)
1973 {
1974 if (object == TARGET_OBJECT_MEMORY
1975 && ops->deprecated_xfer_memory != NULL)
1976 /* If available, fall back to the target's
1977 "deprecated_xfer_memory" method. */
1978 {
1979 int xfered = -1;
1980
1981 errno = 0;
1982 if (writebuf != NULL)
1983 {
1984 void *buffer = xmalloc (len);
1985 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1986
1987 memcpy (buffer, writebuf, len);
1988 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1989 1/*write*/, NULL, ops);
1990 do_cleanups (cleanup);
1991 }
1992 if (readbuf != NULL)
1993 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1994 0/*read*/, NULL, ops);
1995 if (xfered > 0)
1996 return xfered;
1997 else if (xfered == 0 && errno == 0)
1998 /* "deprecated_xfer_memory" uses 0, cross checked against
1999 ERRNO as one indication of an error. */
2000 return 0;
2001 else
2002 return -1;
2003 }
2004 else if (ops->beneath != NULL)
2005 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
2006 readbuf, writebuf, offset, len);
2007 else
2008 return -1;
2009 }
2010
2011 /* The xfer_partial handler for the topmost target. Unlike the default,
2012 it does not need to handle memory specially; it just passes all
2013 requests down the stack. */
2014
2015 static LONGEST
2016 current_xfer_partial (struct target_ops *ops, enum target_object object,
2017 const char *annex, gdb_byte *readbuf,
2018 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len)
2019 {
2020 if (ops->beneath != NULL)
2021 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
2022 readbuf, writebuf, offset, len);
2023 else
2024 return -1;
2025 }
2026
2027 /* Target vector read/write partial wrapper functions. */
2028
2029 static LONGEST
2030 target_read_partial (struct target_ops *ops,
2031 enum target_object object,
2032 const char *annex, gdb_byte *buf,
2033 ULONGEST offset, LONGEST len)
2034 {
2035 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
2036 }
2037
2038 static LONGEST
2039 target_write_partial (struct target_ops *ops,
2040 enum target_object object,
2041 const char *annex, const gdb_byte *buf,
2042 ULONGEST offset, LONGEST len)
2043 {
2044 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
2045 }
2046
2047 /* Wrappers to perform the full transfer. */
2048
2049 /* For docs on target_read see target.h. */
2050
2051 LONGEST
2052 target_read (struct target_ops *ops,
2053 enum target_object object,
2054 const char *annex, gdb_byte *buf,
2055 ULONGEST offset, LONGEST len)
2056 {
2057 LONGEST xfered = 0;
2058
2059 while (xfered < len)
2060 {
2061 LONGEST xfer = target_read_partial (ops, object, annex,
2062 (gdb_byte *) buf + xfered,
2063 offset + xfered, len - xfered);
2064
2065 /* Call an observer, notifying them of the xfer progress? */
2066 if (xfer == 0)
2067 return xfered;
2068 if (xfer < 0)
2069 return -1;
2070 xfered += xfer;
2071 QUIT;
2072 }
2073 return len;
2074 }
2075
2076 /* Assuming that the entire [begin, end) range of memory cannot be
2077 read, try to read whatever subrange is possible to read.
2078
2079 The function returns, in RESULT, either zero or one memory block.
2080 If there's a readable subrange at the beginning, it is completely
2081 read and returned. Any further readable subrange will not be read.
2082 Otherwise, if there's a readable subrange at the end, it will be
2083 completely read and returned. Any readable subranges before it
2084 (obviously, not starting at the beginning), will be ignored. In
2085 other cases -- either no readable subrange, or readable subrange(s)
2086 that is neither at the beginning, or end, nothing is returned.
2087
2088 The purpose of this function is to handle a read across a boundary
2089 of accessible memory in a case when memory map is not available.
2090 The above restrictions are fine for this case, but will give
2091 incorrect results if the memory is 'patchy'. However, supporting
2092 'patchy' memory would require trying to read every single byte,
2093 and it seems unacceptable solution. Explicit memory map is
2094 recommended for this case -- and target_read_memory_robust will
2095 take care of reading multiple ranges then. */
2096
2097 static void
2098 read_whatever_is_readable (struct target_ops *ops,
2099 ULONGEST begin, ULONGEST end,
2100 VEC(memory_read_result_s) **result)
2101 {
2102 gdb_byte *buf = xmalloc (end - begin);
2103 ULONGEST current_begin = begin;
2104 ULONGEST current_end = end;
2105 int forward;
2106 memory_read_result_s r;
2107
2108 /* If we previously failed to read 1 byte, nothing can be done here. */
2109 if (end - begin <= 1)
2110 {
2111 xfree (buf);
2112 return;
2113 }
2114
2115 /* Check that either first or the last byte is readable, and give up
2116 if not. This heuristic is meant to permit reading accessible memory
2117 at the boundary of accessible region. */
2118 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2119 buf, begin, 1) == 1)
2120 {
2121 forward = 1;
2122 ++current_begin;
2123 }
2124 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2125 buf + (end-begin) - 1, end - 1, 1) == 1)
2126 {
2127 forward = 0;
2128 --current_end;
2129 }
2130 else
2131 {
2132 xfree (buf);
2133 return;
2134 }
2135
2136 /* Loop invariant is that the [current_begin, current_end) was previously
2137 found to be not readable as a whole.
2138
2139 Note loop condition -- if the range has 1 byte, we can't divide the range
2140 so there's no point trying further. */
2141 while (current_end - current_begin > 1)
2142 {
2143 ULONGEST first_half_begin, first_half_end;
2144 ULONGEST second_half_begin, second_half_end;
2145 LONGEST xfer;
2146 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2147
2148 if (forward)
2149 {
2150 first_half_begin = current_begin;
2151 first_half_end = middle;
2152 second_half_begin = middle;
2153 second_half_end = current_end;
2154 }
2155 else
2156 {
2157 first_half_begin = middle;
2158 first_half_end = current_end;
2159 second_half_begin = current_begin;
2160 second_half_end = middle;
2161 }
2162
2163 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2164 buf + (first_half_begin - begin),
2165 first_half_begin,
2166 first_half_end - first_half_begin);
2167
2168 if (xfer == first_half_end - first_half_begin)
2169 {
2170 /* This half reads up fine. So, the error must be in the
2171 other half. */
2172 current_begin = second_half_begin;
2173 current_end = second_half_end;
2174 }
2175 else
2176 {
2177 /* This half is not readable. Because we've tried one byte, we
2178 know some part of this half if actually redable. Go to the next
2179 iteration to divide again and try to read.
2180
2181 We don't handle the other half, because this function only tries
2182 to read a single readable subrange. */
2183 current_begin = first_half_begin;
2184 current_end = first_half_end;
2185 }
2186 }
2187
2188 if (forward)
2189 {
2190 /* The [begin, current_begin) range has been read. */
2191 r.begin = begin;
2192 r.end = current_begin;
2193 r.data = buf;
2194 }
2195 else
2196 {
2197 /* The [current_end, end) range has been read. */
2198 LONGEST rlen = end - current_end;
2199
2200 r.data = xmalloc (rlen);
2201 memcpy (r.data, buf + current_end - begin, rlen);
2202 r.begin = current_end;
2203 r.end = end;
2204 xfree (buf);
2205 }
2206 VEC_safe_push(memory_read_result_s, (*result), &r);
2207 }
2208
2209 void
2210 free_memory_read_result_vector (void *x)
2211 {
2212 VEC(memory_read_result_s) *v = x;
2213 memory_read_result_s *current;
2214 int ix;
2215
2216 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2217 {
2218 xfree (current->data);
2219 }
2220 VEC_free (memory_read_result_s, v);
2221 }
2222
2223 VEC(memory_read_result_s) *
2224 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2225 {
2226 VEC(memory_read_result_s) *result = 0;
2227
2228 LONGEST xfered = 0;
2229 while (xfered < len)
2230 {
2231 struct mem_region *region = lookup_mem_region (offset + xfered);
2232 LONGEST rlen;
2233
2234 /* If there is no explicit region, a fake one should be created. */
2235 gdb_assert (region);
2236
2237 if (region->hi == 0)
2238 rlen = len - xfered;
2239 else
2240 rlen = region->hi - offset;
2241
2242 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2243 {
2244 /* Cannot read this region. Note that we can end up here only
2245 if the region is explicitly marked inaccessible, or
2246 'inaccessible-by-default' is in effect. */
2247 xfered += rlen;
2248 }
2249 else
2250 {
2251 LONGEST to_read = min (len - xfered, rlen);
2252 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2253
2254 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2255 (gdb_byte *) buffer,
2256 offset + xfered, to_read);
2257 /* Call an observer, notifying them of the xfer progress? */
2258 if (xfer <= 0)
2259 {
2260 /* Got an error reading full chunk. See if maybe we can read
2261 some subrange. */
2262 xfree (buffer);
2263 read_whatever_is_readable (ops, offset + xfered,
2264 offset + xfered + to_read, &result);
2265 xfered += to_read;
2266 }
2267 else
2268 {
2269 struct memory_read_result r;
2270 r.data = buffer;
2271 r.begin = offset + xfered;
2272 r.end = r.begin + xfer;
2273 VEC_safe_push (memory_read_result_s, result, &r);
2274 xfered += xfer;
2275 }
2276 QUIT;
2277 }
2278 }
2279 return result;
2280 }
2281
2282
2283 /* An alternative to target_write with progress callbacks. */
2284
2285 LONGEST
2286 target_write_with_progress (struct target_ops *ops,
2287 enum target_object object,
2288 const char *annex, const gdb_byte *buf,
2289 ULONGEST offset, LONGEST len,
2290 void (*progress) (ULONGEST, void *), void *baton)
2291 {
2292 LONGEST xfered = 0;
2293
2294 /* Give the progress callback a chance to set up. */
2295 if (progress)
2296 (*progress) (0, baton);
2297
2298 while (xfered < len)
2299 {
2300 LONGEST xfer = target_write_partial (ops, object, annex,
2301 (gdb_byte *) buf + xfered,
2302 offset + xfered, len - xfered);
2303
2304 if (xfer == 0)
2305 return xfered;
2306 if (xfer < 0)
2307 return -1;
2308
2309 if (progress)
2310 (*progress) (xfer, baton);
2311
2312 xfered += xfer;
2313 QUIT;
2314 }
2315 return len;
2316 }
2317
2318 /* For docs on target_write see target.h. */
2319
2320 LONGEST
2321 target_write (struct target_ops *ops,
2322 enum target_object object,
2323 const char *annex, const gdb_byte *buf,
2324 ULONGEST offset, LONGEST len)
2325 {
2326 return target_write_with_progress (ops, object, annex, buf, offset, len,
2327 NULL, NULL);
2328 }
2329
2330 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2331 the size of the transferred data. PADDING additional bytes are
2332 available in *BUF_P. This is a helper function for
2333 target_read_alloc; see the declaration of that function for more
2334 information. */
2335
2336 static LONGEST
2337 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2338 const char *annex, gdb_byte **buf_p, int padding)
2339 {
2340 size_t buf_alloc, buf_pos;
2341 gdb_byte *buf;
2342 LONGEST n;
2343
2344 /* This function does not have a length parameter; it reads the
2345 entire OBJECT). Also, it doesn't support objects fetched partly
2346 from one target and partly from another (in a different stratum,
2347 e.g. a core file and an executable). Both reasons make it
2348 unsuitable for reading memory. */
2349 gdb_assert (object != TARGET_OBJECT_MEMORY);
2350
2351 /* Start by reading up to 4K at a time. The target will throttle
2352 this number down if necessary. */
2353 buf_alloc = 4096;
2354 buf = xmalloc (buf_alloc);
2355 buf_pos = 0;
2356 while (1)
2357 {
2358 n = target_read_partial (ops, object, annex, &buf[buf_pos],
2359 buf_pos, buf_alloc - buf_pos - padding);
2360 if (n < 0)
2361 {
2362 /* An error occurred. */
2363 xfree (buf);
2364 return -1;
2365 }
2366 else if (n == 0)
2367 {
2368 /* Read all there was. */
2369 if (buf_pos == 0)
2370 xfree (buf);
2371 else
2372 *buf_p = buf;
2373 return buf_pos;
2374 }
2375
2376 buf_pos += n;
2377
2378 /* If the buffer is filling up, expand it. */
2379 if (buf_alloc < buf_pos * 2)
2380 {
2381 buf_alloc *= 2;
2382 buf = xrealloc (buf, buf_alloc);
2383 }
2384
2385 QUIT;
2386 }
2387 }
2388
2389 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2390 the size of the transferred data. See the declaration in "target.h"
2391 function for more information about the return value. */
2392
2393 LONGEST
2394 target_read_alloc (struct target_ops *ops, enum target_object object,
2395 const char *annex, gdb_byte **buf_p)
2396 {
2397 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2398 }
2399
2400 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2401 returned as a string, allocated using xmalloc. If an error occurs
2402 or the transfer is unsupported, NULL is returned. Empty objects
2403 are returned as allocated but empty strings. A warning is issued
2404 if the result contains any embedded NUL bytes. */
2405
2406 char *
2407 target_read_stralloc (struct target_ops *ops, enum target_object object,
2408 const char *annex)
2409 {
2410 gdb_byte *buffer;
2411 char *bufstr;
2412 LONGEST i, transferred;
2413
2414 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2415 bufstr = (char *) buffer;
2416
2417 if (transferred < 0)
2418 return NULL;
2419
2420 if (transferred == 0)
2421 return xstrdup ("");
2422
2423 bufstr[transferred] = 0;
2424
2425 /* Check for embedded NUL bytes; but allow trailing NULs. */
2426 for (i = strlen (bufstr); i < transferred; i++)
2427 if (bufstr[i] != 0)
2428 {
2429 warning (_("target object %d, annex %s, "
2430 "contained unexpected null characters"),
2431 (int) object, annex ? annex : "(none)");
2432 break;
2433 }
2434
2435 return bufstr;
2436 }
2437
2438 /* Memory transfer methods. */
2439
2440 void
2441 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2442 LONGEST len)
2443 {
2444 /* This method is used to read from an alternate, non-current
2445 target. This read must bypass the overlay support (as symbols
2446 don't match this target), and GDB's internal cache (wrong cache
2447 for this target). */
2448 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2449 != len)
2450 memory_error (TARGET_XFER_E_IO, addr);
2451 }
2452
2453 ULONGEST
2454 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2455 int len, enum bfd_endian byte_order)
2456 {
2457 gdb_byte buf[sizeof (ULONGEST)];
2458
2459 gdb_assert (len <= sizeof (buf));
2460 get_target_memory (ops, addr, buf, len);
2461 return extract_unsigned_integer (buf, len, byte_order);
2462 }
2463
2464 /* See target.h. */
2465
2466 int
2467 forward_target_insert_breakpoint (struct target_ops *ops,
2468 struct gdbarch *gdbarch,
2469 struct bp_target_info *bp_tgt)
2470 {
2471 for (; ops != NULL; ops = ops->beneath)
2472 if (ops->to_insert_breakpoint != NULL)
2473 return ops->to_insert_breakpoint (ops, gdbarch, bp_tgt);
2474
2475 return memory_insert_breakpoint (ops, gdbarch, bp_tgt);
2476 }
2477
2478 /* See target.h. */
2479
2480 int
2481 target_insert_breakpoint (struct gdbarch *gdbarch,
2482 struct bp_target_info *bp_tgt)
2483 {
2484 if (!may_insert_breakpoints)
2485 {
2486 warning (_("May not insert breakpoints"));
2487 return 1;
2488 }
2489
2490 return forward_target_insert_breakpoint (&current_target, gdbarch, bp_tgt);
2491 }
2492
2493 /* See target.h. */
2494
2495 int
2496 forward_target_remove_breakpoint (struct target_ops *ops,
2497 struct gdbarch *gdbarch,
2498 struct bp_target_info *bp_tgt)
2499 {
2500 /* This is kind of a weird case to handle, but the permission might
2501 have been changed after breakpoints were inserted - in which case
2502 we should just take the user literally and assume that any
2503 breakpoints should be left in place. */
2504 if (!may_insert_breakpoints)
2505 {
2506 warning (_("May not remove breakpoints"));
2507 return 1;
2508 }
2509
2510 for (; ops != NULL; ops = ops->beneath)
2511 if (ops->to_remove_breakpoint != NULL)
2512 return ops->to_remove_breakpoint (ops, gdbarch, bp_tgt);
2513
2514 return memory_remove_breakpoint (ops, gdbarch, bp_tgt);
2515 }
2516
2517 /* See target.h. */
2518
2519 int
2520 target_remove_breakpoint (struct gdbarch *gdbarch,
2521 struct bp_target_info *bp_tgt)
2522 {
2523 return forward_target_remove_breakpoint (&current_target, gdbarch, bp_tgt);
2524 }
2525
2526 static void
2527 target_info (char *args, int from_tty)
2528 {
2529 struct target_ops *t;
2530 int has_all_mem = 0;
2531
2532 if (symfile_objfile != NULL)
2533 printf_unfiltered (_("Symbols from \"%s\".\n"),
2534 objfile_name (symfile_objfile));
2535
2536 for (t = target_stack; t != NULL; t = t->beneath)
2537 {
2538 if (!(*t->to_has_memory) (t))
2539 continue;
2540
2541 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2542 continue;
2543 if (has_all_mem)
2544 printf_unfiltered (_("\tWhile running this, "
2545 "GDB does not access memory from...\n"));
2546 printf_unfiltered ("%s:\n", t->to_longname);
2547 (t->to_files_info) (t);
2548 has_all_mem = (*t->to_has_all_memory) (t);
2549 }
2550 }
2551
2552 /* This function is called before any new inferior is created, e.g.
2553 by running a program, attaching, or connecting to a target.
2554 It cleans up any state from previous invocations which might
2555 change between runs. This is a subset of what target_preopen
2556 resets (things which might change between targets). */
2557
2558 void
2559 target_pre_inferior (int from_tty)
2560 {
2561 /* Clear out solib state. Otherwise the solib state of the previous
2562 inferior might have survived and is entirely wrong for the new
2563 target. This has been observed on GNU/Linux using glibc 2.3. How
2564 to reproduce:
2565
2566 bash$ ./foo&
2567 [1] 4711
2568 bash$ ./foo&
2569 [1] 4712
2570 bash$ gdb ./foo
2571 [...]
2572 (gdb) attach 4711
2573 (gdb) detach
2574 (gdb) attach 4712
2575 Cannot access memory at address 0xdeadbeef
2576 */
2577
2578 /* In some OSs, the shared library list is the same/global/shared
2579 across inferiors. If code is shared between processes, so are
2580 memory regions and features. */
2581 if (!gdbarch_has_global_solist (target_gdbarch ()))
2582 {
2583 no_shared_libraries (NULL, from_tty);
2584
2585 invalidate_target_mem_regions ();
2586
2587 target_clear_description ();
2588 }
2589
2590 agent_capability_invalidate ();
2591 }
2592
2593 /* Callback for iterate_over_inferiors. Gets rid of the given
2594 inferior. */
2595
2596 static int
2597 dispose_inferior (struct inferior *inf, void *args)
2598 {
2599 struct thread_info *thread;
2600
2601 thread = any_thread_of_process (inf->pid);
2602 if (thread)
2603 {
2604 switch_to_thread (thread->ptid);
2605
2606 /* Core inferiors actually should be detached, not killed. */
2607 if (target_has_execution)
2608 target_kill ();
2609 else
2610 target_detach (NULL, 0);
2611 }
2612
2613 return 0;
2614 }
2615
2616 /* This is to be called by the open routine before it does
2617 anything. */
2618
2619 void
2620 target_preopen (int from_tty)
2621 {
2622 dont_repeat ();
2623
2624 if (have_inferiors ())
2625 {
2626 if (!from_tty
2627 || !have_live_inferiors ()
2628 || query (_("A program is being debugged already. Kill it? ")))
2629 iterate_over_inferiors (dispose_inferior, NULL);
2630 else
2631 error (_("Program not killed."));
2632 }
2633
2634 /* Calling target_kill may remove the target from the stack. But if
2635 it doesn't (which seems like a win for UDI), remove it now. */
2636 /* Leave the exec target, though. The user may be switching from a
2637 live process to a core of the same program. */
2638 pop_all_targets_above (file_stratum);
2639
2640 target_pre_inferior (from_tty);
2641 }
2642
2643 /* Detach a target after doing deferred register stores. */
2644
2645 void
2646 target_detach (const char *args, int from_tty)
2647 {
2648 struct target_ops* t;
2649
2650 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2651 /* Don't remove global breakpoints here. They're removed on
2652 disconnection from the target. */
2653 ;
2654 else
2655 /* If we're in breakpoints-always-inserted mode, have to remove
2656 them before detaching. */
2657 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2658
2659 prepare_for_detach ();
2660
2661 for (t = current_target.beneath; t != NULL; t = t->beneath)
2662 {
2663 if (t->to_detach != NULL)
2664 {
2665 t->to_detach (t, args, from_tty);
2666 if (targetdebug)
2667 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2668 args, from_tty);
2669 return;
2670 }
2671 }
2672
2673 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2674 }
2675
2676 void
2677 target_disconnect (char *args, int from_tty)
2678 {
2679 struct target_ops *t;
2680
2681 /* If we're in breakpoints-always-inserted mode or if breakpoints
2682 are global across processes, we have to remove them before
2683 disconnecting. */
2684 remove_breakpoints ();
2685
2686 for (t = current_target.beneath; t != NULL; t = t->beneath)
2687 if (t->to_disconnect != NULL)
2688 {
2689 if (targetdebug)
2690 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2691 args, from_tty);
2692 t->to_disconnect (t, args, from_tty);
2693 return;
2694 }
2695
2696 tcomplain ();
2697 }
2698
2699 ptid_t
2700 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2701 {
2702 struct target_ops *t;
2703
2704 for (t = current_target.beneath; t != NULL; t = t->beneath)
2705 {
2706 if (t->to_wait != NULL)
2707 {
2708 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2709
2710 if (targetdebug)
2711 {
2712 char *status_string;
2713 char *options_string;
2714
2715 status_string = target_waitstatus_to_string (status);
2716 options_string = target_options_to_string (options);
2717 fprintf_unfiltered (gdb_stdlog,
2718 "target_wait (%d, status, options={%s})"
2719 " = %d, %s\n",
2720 ptid_get_pid (ptid), options_string,
2721 ptid_get_pid (retval), status_string);
2722 xfree (status_string);
2723 xfree (options_string);
2724 }
2725
2726 return retval;
2727 }
2728 }
2729
2730 noprocess ();
2731 }
2732
2733 char *
2734 target_pid_to_str (ptid_t ptid)
2735 {
2736 struct target_ops *t;
2737
2738 for (t = current_target.beneath; t != NULL; t = t->beneath)
2739 {
2740 if (t->to_pid_to_str != NULL)
2741 return (*t->to_pid_to_str) (t, ptid);
2742 }
2743
2744 return normal_pid_to_str (ptid);
2745 }
2746
2747 char *
2748 target_thread_name (struct thread_info *info)
2749 {
2750 struct target_ops *t;
2751
2752 for (t = current_target.beneath; t != NULL; t = t->beneath)
2753 {
2754 if (t->to_thread_name != NULL)
2755 return (*t->to_thread_name) (info);
2756 }
2757
2758 return NULL;
2759 }
2760
2761 void
2762 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2763 {
2764 struct target_ops *t;
2765
2766 target_dcache_invalidate ();
2767
2768 for (t = current_target.beneath; t != NULL; t = t->beneath)
2769 {
2770 if (t->to_resume != NULL)
2771 {
2772 t->to_resume (t, ptid, step, signal);
2773 if (targetdebug)
2774 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2775 ptid_get_pid (ptid),
2776 step ? "step" : "continue",
2777 gdb_signal_to_name (signal));
2778
2779 registers_changed_ptid (ptid);
2780 set_executing (ptid, 1);
2781 set_running (ptid, 1);
2782 clear_inline_frame_state (ptid);
2783 return;
2784 }
2785 }
2786
2787 noprocess ();
2788 }
2789
2790 void
2791 target_pass_signals (int numsigs, unsigned char *pass_signals)
2792 {
2793 struct target_ops *t;
2794
2795 for (t = current_target.beneath; t != NULL; t = t->beneath)
2796 {
2797 if (t->to_pass_signals != NULL)
2798 {
2799 if (targetdebug)
2800 {
2801 int i;
2802
2803 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2804 numsigs);
2805
2806 for (i = 0; i < numsigs; i++)
2807 if (pass_signals[i])
2808 fprintf_unfiltered (gdb_stdlog, " %s",
2809 gdb_signal_to_name (i));
2810
2811 fprintf_unfiltered (gdb_stdlog, " })\n");
2812 }
2813
2814 (*t->to_pass_signals) (numsigs, pass_signals);
2815 return;
2816 }
2817 }
2818 }
2819
2820 void
2821 target_program_signals (int numsigs, unsigned char *program_signals)
2822 {
2823 struct target_ops *t;
2824
2825 for (t = current_target.beneath; t != NULL; t = t->beneath)
2826 {
2827 if (t->to_program_signals != NULL)
2828 {
2829 if (targetdebug)
2830 {
2831 int i;
2832
2833 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2834 numsigs);
2835
2836 for (i = 0; i < numsigs; i++)
2837 if (program_signals[i])
2838 fprintf_unfiltered (gdb_stdlog, " %s",
2839 gdb_signal_to_name (i));
2840
2841 fprintf_unfiltered (gdb_stdlog, " })\n");
2842 }
2843
2844 (*t->to_program_signals) (numsigs, program_signals);
2845 return;
2846 }
2847 }
2848 }
2849
2850 /* Look through the list of possible targets for a target that can
2851 follow forks. */
2852
2853 int
2854 target_follow_fork (int follow_child, int detach_fork)
2855 {
2856 struct target_ops *t;
2857
2858 for (t = current_target.beneath; t != NULL; t = t->beneath)
2859 {
2860 if (t->to_follow_fork != NULL)
2861 {
2862 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2863
2864 if (targetdebug)
2865 fprintf_unfiltered (gdb_stdlog,
2866 "target_follow_fork (%d, %d) = %d\n",
2867 follow_child, detach_fork, retval);
2868 return retval;
2869 }
2870 }
2871
2872 /* Some target returned a fork event, but did not know how to follow it. */
2873 internal_error (__FILE__, __LINE__,
2874 _("could not find a target to follow fork"));
2875 }
2876
2877 void
2878 target_mourn_inferior (void)
2879 {
2880 struct target_ops *t;
2881
2882 for (t = current_target.beneath; t != NULL; t = t->beneath)
2883 {
2884 if (t->to_mourn_inferior != NULL)
2885 {
2886 t->to_mourn_inferior (t);
2887 if (targetdebug)
2888 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2889
2890 /* We no longer need to keep handles on any of the object files.
2891 Make sure to release them to avoid unnecessarily locking any
2892 of them while we're not actually debugging. */
2893 bfd_cache_close_all ();
2894
2895 return;
2896 }
2897 }
2898
2899 internal_error (__FILE__, __LINE__,
2900 _("could not find a target to follow mourn inferior"));
2901 }
2902
2903 /* Look for a target which can describe architectural features, starting
2904 from TARGET. If we find one, return its description. */
2905
2906 const struct target_desc *
2907 target_read_description (struct target_ops *target)
2908 {
2909 struct target_ops *t;
2910
2911 for (t = target; t != NULL; t = t->beneath)
2912 if (t->to_read_description != NULL)
2913 {
2914 const struct target_desc *tdesc;
2915
2916 tdesc = t->to_read_description (t);
2917 if (tdesc)
2918 return tdesc;
2919 }
2920
2921 return NULL;
2922 }
2923
2924 /* The default implementation of to_search_memory.
2925 This implements a basic search of memory, reading target memory and
2926 performing the search here (as opposed to performing the search in on the
2927 target side with, for example, gdbserver). */
2928
2929 int
2930 simple_search_memory (struct target_ops *ops,
2931 CORE_ADDR start_addr, ULONGEST search_space_len,
2932 const gdb_byte *pattern, ULONGEST pattern_len,
2933 CORE_ADDR *found_addrp)
2934 {
2935 /* NOTE: also defined in find.c testcase. */
2936 #define SEARCH_CHUNK_SIZE 16000
2937 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2938 /* Buffer to hold memory contents for searching. */
2939 gdb_byte *search_buf;
2940 unsigned search_buf_size;
2941 struct cleanup *old_cleanups;
2942
2943 search_buf_size = chunk_size + pattern_len - 1;
2944
2945 /* No point in trying to allocate a buffer larger than the search space. */
2946 if (search_space_len < search_buf_size)
2947 search_buf_size = search_space_len;
2948
2949 search_buf = malloc (search_buf_size);
2950 if (search_buf == NULL)
2951 error (_("Unable to allocate memory to perform the search."));
2952 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2953
2954 /* Prime the search buffer. */
2955
2956 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2957 search_buf, start_addr, search_buf_size) != search_buf_size)
2958 {
2959 warning (_("Unable to access %s bytes of target "
2960 "memory at %s, halting search."),
2961 pulongest (search_buf_size), hex_string (start_addr));
2962 do_cleanups (old_cleanups);
2963 return -1;
2964 }
2965
2966 /* Perform the search.
2967
2968 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2969 When we've scanned N bytes we copy the trailing bytes to the start and
2970 read in another N bytes. */
2971
2972 while (search_space_len >= pattern_len)
2973 {
2974 gdb_byte *found_ptr;
2975 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2976
2977 found_ptr = memmem (search_buf, nr_search_bytes,
2978 pattern, pattern_len);
2979
2980 if (found_ptr != NULL)
2981 {
2982 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2983
2984 *found_addrp = found_addr;
2985 do_cleanups (old_cleanups);
2986 return 1;
2987 }
2988
2989 /* Not found in this chunk, skip to next chunk. */
2990
2991 /* Don't let search_space_len wrap here, it's unsigned. */
2992 if (search_space_len >= chunk_size)
2993 search_space_len -= chunk_size;
2994 else
2995 search_space_len = 0;
2996
2997 if (search_space_len >= pattern_len)
2998 {
2999 unsigned keep_len = search_buf_size - chunk_size;
3000 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
3001 int nr_to_read;
3002
3003 /* Copy the trailing part of the previous iteration to the front
3004 of the buffer for the next iteration. */
3005 gdb_assert (keep_len == pattern_len - 1);
3006 memcpy (search_buf, search_buf + chunk_size, keep_len);
3007
3008 nr_to_read = min (search_space_len - keep_len, chunk_size);
3009
3010 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
3011 search_buf + keep_len, read_addr,
3012 nr_to_read) != nr_to_read)
3013 {
3014 warning (_("Unable to access %s bytes of target "
3015 "memory at %s, halting search."),
3016 plongest (nr_to_read),
3017 hex_string (read_addr));
3018 do_cleanups (old_cleanups);
3019 return -1;
3020 }
3021
3022 start_addr += chunk_size;
3023 }
3024 }
3025
3026 /* Not found. */
3027
3028 do_cleanups (old_cleanups);
3029 return 0;
3030 }
3031
3032 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
3033 sequence of bytes in PATTERN with length PATTERN_LEN.
3034
3035 The result is 1 if found, 0 if not found, and -1 if there was an error
3036 requiring halting of the search (e.g. memory read error).
3037 If the pattern is found the address is recorded in FOUND_ADDRP. */
3038
3039 int
3040 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
3041 const gdb_byte *pattern, ULONGEST pattern_len,
3042 CORE_ADDR *found_addrp)
3043 {
3044 struct target_ops *t;
3045 int found;
3046
3047 /* We don't use INHERIT to set current_target.to_search_memory,
3048 so we have to scan the target stack and handle targetdebug
3049 ourselves. */
3050
3051 if (targetdebug)
3052 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
3053 hex_string (start_addr));
3054
3055 for (t = current_target.beneath; t != NULL; t = t->beneath)
3056 if (t->to_search_memory != NULL)
3057 break;
3058
3059 if (t != NULL)
3060 {
3061 found = t->to_search_memory (t, start_addr, search_space_len,
3062 pattern, pattern_len, found_addrp);
3063 }
3064 else
3065 {
3066 /* If a special version of to_search_memory isn't available, use the
3067 simple version. */
3068 found = simple_search_memory (current_target.beneath,
3069 start_addr, search_space_len,
3070 pattern, pattern_len, found_addrp);
3071 }
3072
3073 if (targetdebug)
3074 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
3075
3076 return found;
3077 }
3078
3079 /* Look through the currently pushed targets. If none of them will
3080 be able to restart the currently running process, issue an error
3081 message. */
3082
3083 void
3084 target_require_runnable (void)
3085 {
3086 struct target_ops *t;
3087
3088 for (t = target_stack; t != NULL; t = t->beneath)
3089 {
3090 /* If this target knows how to create a new program, then
3091 assume we will still be able to after killing the current
3092 one. Either killing and mourning will not pop T, or else
3093 find_default_run_target will find it again. */
3094 if (t->to_create_inferior != NULL)
3095 return;
3096
3097 /* Do not worry about thread_stratum targets that can not
3098 create inferiors. Assume they will be pushed again if
3099 necessary, and continue to the process_stratum. */
3100 if (t->to_stratum == thread_stratum
3101 || t->to_stratum == arch_stratum)
3102 continue;
3103
3104 error (_("The \"%s\" target does not support \"run\". "
3105 "Try \"help target\" or \"continue\"."),
3106 t->to_shortname);
3107 }
3108
3109 /* This function is only called if the target is running. In that
3110 case there should have been a process_stratum target and it
3111 should either know how to create inferiors, or not... */
3112 internal_error (__FILE__, __LINE__, _("No targets found"));
3113 }
3114
3115 /* Look through the list of possible targets for a target that can
3116 execute a run or attach command without any other data. This is
3117 used to locate the default process stratum.
3118
3119 If DO_MESG is not NULL, the result is always valid (error() is
3120 called for errors); else, return NULL on error. */
3121
3122 static struct target_ops *
3123 find_default_run_target (char *do_mesg)
3124 {
3125 struct target_ops **t;
3126 struct target_ops *runable = NULL;
3127 int count;
3128
3129 count = 0;
3130
3131 for (t = target_structs; t < target_structs + target_struct_size;
3132 ++t)
3133 {
3134 if ((*t)->to_can_run && target_can_run (*t))
3135 {
3136 runable = *t;
3137 ++count;
3138 }
3139 }
3140
3141 if (count != 1)
3142 {
3143 if (do_mesg)
3144 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3145 else
3146 return NULL;
3147 }
3148
3149 return runable;
3150 }
3151
3152 void
3153 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3154 {
3155 struct target_ops *t;
3156
3157 t = find_default_run_target ("attach");
3158 (t->to_attach) (t, args, from_tty);
3159 return;
3160 }
3161
3162 void
3163 find_default_create_inferior (struct target_ops *ops,
3164 char *exec_file, char *allargs, char **env,
3165 int from_tty)
3166 {
3167 struct target_ops *t;
3168
3169 t = find_default_run_target ("run");
3170 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3171 return;
3172 }
3173
3174 static int
3175 find_default_can_async_p (void)
3176 {
3177 struct target_ops *t;
3178
3179 /* This may be called before the target is pushed on the stack;
3180 look for the default process stratum. If there's none, gdb isn't
3181 configured with a native debugger, and target remote isn't
3182 connected yet. */
3183 t = find_default_run_target (NULL);
3184 if (t && t->to_can_async_p)
3185 return (t->to_can_async_p) ();
3186 return 0;
3187 }
3188
3189 static int
3190 find_default_is_async_p (void)
3191 {
3192 struct target_ops *t;
3193
3194 /* This may be called before the target is pushed on the stack;
3195 look for the default process stratum. If there's none, gdb isn't
3196 configured with a native debugger, and target remote isn't
3197 connected yet. */
3198 t = find_default_run_target (NULL);
3199 if (t && t->to_is_async_p)
3200 return (t->to_is_async_p) ();
3201 return 0;
3202 }
3203
3204 static int
3205 find_default_supports_non_stop (void)
3206 {
3207 struct target_ops *t;
3208
3209 t = find_default_run_target (NULL);
3210 if (t && t->to_supports_non_stop)
3211 return (t->to_supports_non_stop) ();
3212 return 0;
3213 }
3214
3215 int
3216 target_supports_non_stop (void)
3217 {
3218 struct target_ops *t;
3219
3220 for (t = &current_target; t != NULL; t = t->beneath)
3221 if (t->to_supports_non_stop)
3222 return t->to_supports_non_stop ();
3223
3224 return 0;
3225 }
3226
3227 /* Implement the "info proc" command. */
3228
3229 int
3230 target_info_proc (char *args, enum info_proc_what what)
3231 {
3232 struct target_ops *t;
3233
3234 /* If we're already connected to something that can get us OS
3235 related data, use it. Otherwise, try using the native
3236 target. */
3237 if (current_target.to_stratum >= process_stratum)
3238 t = current_target.beneath;
3239 else
3240 t = find_default_run_target (NULL);
3241
3242 for (; t != NULL; t = t->beneath)
3243 {
3244 if (t->to_info_proc != NULL)
3245 {
3246 t->to_info_proc (t, args, what);
3247
3248 if (targetdebug)
3249 fprintf_unfiltered (gdb_stdlog,
3250 "target_info_proc (\"%s\", %d)\n", args, what);
3251
3252 return 1;
3253 }
3254 }
3255
3256 return 0;
3257 }
3258
3259 static int
3260 find_default_supports_disable_randomization (void)
3261 {
3262 struct target_ops *t;
3263
3264 t = find_default_run_target (NULL);
3265 if (t && t->to_supports_disable_randomization)
3266 return (t->to_supports_disable_randomization) ();
3267 return 0;
3268 }
3269
3270 int
3271 target_supports_disable_randomization (void)
3272 {
3273 struct target_ops *t;
3274
3275 for (t = &current_target; t != NULL; t = t->beneath)
3276 if (t->to_supports_disable_randomization)
3277 return t->to_supports_disable_randomization ();
3278
3279 return 0;
3280 }
3281
3282 char *
3283 target_get_osdata (const char *type)
3284 {
3285 struct target_ops *t;
3286
3287 /* If we're already connected to something that can get us OS
3288 related data, use it. Otherwise, try using the native
3289 target. */
3290 if (current_target.to_stratum >= process_stratum)
3291 t = current_target.beneath;
3292 else
3293 t = find_default_run_target ("get OS data");
3294
3295 if (!t)
3296 return NULL;
3297
3298 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3299 }
3300
3301 /* Determine the current address space of thread PTID. */
3302
3303 struct address_space *
3304 target_thread_address_space (ptid_t ptid)
3305 {
3306 struct address_space *aspace;
3307 struct inferior *inf;
3308 struct target_ops *t;
3309
3310 for (t = current_target.beneath; t != NULL; t = t->beneath)
3311 {
3312 if (t->to_thread_address_space != NULL)
3313 {
3314 aspace = t->to_thread_address_space (t, ptid);
3315 gdb_assert (aspace);
3316
3317 if (targetdebug)
3318 fprintf_unfiltered (gdb_stdlog,
3319 "target_thread_address_space (%s) = %d\n",
3320 target_pid_to_str (ptid),
3321 address_space_num (aspace));
3322 return aspace;
3323 }
3324 }
3325
3326 /* Fall-back to the "main" address space of the inferior. */
3327 inf = find_inferior_pid (ptid_get_pid (ptid));
3328
3329 if (inf == NULL || inf->aspace == NULL)
3330 internal_error (__FILE__, __LINE__,
3331 _("Can't determine the current "
3332 "address space of thread %s\n"),
3333 target_pid_to_str (ptid));
3334
3335 return inf->aspace;
3336 }
3337
3338
3339 /* Target file operations. */
3340
3341 static struct target_ops *
3342 default_fileio_target (void)
3343 {
3344 /* If we're already connected to something that can perform
3345 file I/O, use it. Otherwise, try using the native target. */
3346 if (current_target.to_stratum >= process_stratum)
3347 return current_target.beneath;
3348 else
3349 return find_default_run_target ("file I/O");
3350 }
3351
3352 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3353 target file descriptor, or -1 if an error occurs (and set
3354 *TARGET_ERRNO). */
3355 int
3356 target_fileio_open (const char *filename, int flags, int mode,
3357 int *target_errno)
3358 {
3359 struct target_ops *t;
3360
3361 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3362 {
3363 if (t->to_fileio_open != NULL)
3364 {
3365 int fd = t->to_fileio_open (filename, flags, mode, target_errno);
3366
3367 if (targetdebug)
3368 fprintf_unfiltered (gdb_stdlog,
3369 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3370 filename, flags, mode,
3371 fd, fd != -1 ? 0 : *target_errno);
3372 return fd;
3373 }
3374 }
3375
3376 *target_errno = FILEIO_ENOSYS;
3377 return -1;
3378 }
3379
3380 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3381 Return the number of bytes written, or -1 if an error occurs
3382 (and set *TARGET_ERRNO). */
3383 int
3384 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3385 ULONGEST offset, int *target_errno)
3386 {
3387 struct target_ops *t;
3388
3389 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3390 {
3391 if (t->to_fileio_pwrite != NULL)
3392 {
3393 int ret = t->to_fileio_pwrite (fd, write_buf, len, offset,
3394 target_errno);
3395
3396 if (targetdebug)
3397 fprintf_unfiltered (gdb_stdlog,
3398 "target_fileio_pwrite (%d,...,%d,%s) "
3399 "= %d (%d)\n",
3400 fd, len, pulongest (offset),
3401 ret, ret != -1 ? 0 : *target_errno);
3402 return ret;
3403 }
3404 }
3405
3406 *target_errno = FILEIO_ENOSYS;
3407 return -1;
3408 }
3409
3410 /* Read up to LEN bytes FD on the target into READ_BUF.
3411 Return the number of bytes read, or -1 if an error occurs
3412 (and set *TARGET_ERRNO). */
3413 int
3414 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3415 ULONGEST offset, int *target_errno)
3416 {
3417 struct target_ops *t;
3418
3419 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3420 {
3421 if (t->to_fileio_pread != NULL)
3422 {
3423 int ret = t->to_fileio_pread (fd, read_buf, len, offset,
3424 target_errno);
3425
3426 if (targetdebug)
3427 fprintf_unfiltered (gdb_stdlog,
3428 "target_fileio_pread (%d,...,%d,%s) "
3429 "= %d (%d)\n",
3430 fd, len, pulongest (offset),
3431 ret, ret != -1 ? 0 : *target_errno);
3432 return ret;
3433 }
3434 }
3435
3436 *target_errno = FILEIO_ENOSYS;
3437 return -1;
3438 }
3439
3440 /* Close FD on the target. Return 0, or -1 if an error occurs
3441 (and set *TARGET_ERRNO). */
3442 int
3443 target_fileio_close (int fd, int *target_errno)
3444 {
3445 struct target_ops *t;
3446
3447 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3448 {
3449 if (t->to_fileio_close != NULL)
3450 {
3451 int ret = t->to_fileio_close (fd, target_errno);
3452
3453 if (targetdebug)
3454 fprintf_unfiltered (gdb_stdlog,
3455 "target_fileio_close (%d) = %d (%d)\n",
3456 fd, ret, ret != -1 ? 0 : *target_errno);
3457 return ret;
3458 }
3459 }
3460
3461 *target_errno = FILEIO_ENOSYS;
3462 return -1;
3463 }
3464
3465 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3466 occurs (and set *TARGET_ERRNO). */
3467 int
3468 target_fileio_unlink (const char *filename, int *target_errno)
3469 {
3470 struct target_ops *t;
3471
3472 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3473 {
3474 if (t->to_fileio_unlink != NULL)
3475 {
3476 int ret = t->to_fileio_unlink (filename, target_errno);
3477
3478 if (targetdebug)
3479 fprintf_unfiltered (gdb_stdlog,
3480 "target_fileio_unlink (%s) = %d (%d)\n",
3481 filename, ret, ret != -1 ? 0 : *target_errno);
3482 return ret;
3483 }
3484 }
3485
3486 *target_errno = FILEIO_ENOSYS;
3487 return -1;
3488 }
3489
3490 /* Read value of symbolic link FILENAME on the target. Return a
3491 null-terminated string allocated via xmalloc, or NULL if an error
3492 occurs (and set *TARGET_ERRNO). */
3493 char *
3494 target_fileio_readlink (const char *filename, int *target_errno)
3495 {
3496 struct target_ops *t;
3497
3498 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3499 {
3500 if (t->to_fileio_readlink != NULL)
3501 {
3502 char *ret = t->to_fileio_readlink (filename, target_errno);
3503
3504 if (targetdebug)
3505 fprintf_unfiltered (gdb_stdlog,
3506 "target_fileio_readlink (%s) = %s (%d)\n",
3507 filename, ret? ret : "(nil)",
3508 ret? 0 : *target_errno);
3509 return ret;
3510 }
3511 }
3512
3513 *target_errno = FILEIO_ENOSYS;
3514 return NULL;
3515 }
3516
3517 static void
3518 target_fileio_close_cleanup (void *opaque)
3519 {
3520 int fd = *(int *) opaque;
3521 int target_errno;
3522
3523 target_fileio_close (fd, &target_errno);
3524 }
3525
3526 /* Read target file FILENAME. Store the result in *BUF_P and
3527 return the size of the transferred data. PADDING additional bytes are
3528 available in *BUF_P. This is a helper function for
3529 target_fileio_read_alloc; see the declaration of that function for more
3530 information. */
3531
3532 static LONGEST
3533 target_fileio_read_alloc_1 (const char *filename,
3534 gdb_byte **buf_p, int padding)
3535 {
3536 struct cleanup *close_cleanup;
3537 size_t buf_alloc, buf_pos;
3538 gdb_byte *buf;
3539 LONGEST n;
3540 int fd;
3541 int target_errno;
3542
3543 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3544 if (fd == -1)
3545 return -1;
3546
3547 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3548
3549 /* Start by reading up to 4K at a time. The target will throttle
3550 this number down if necessary. */
3551 buf_alloc = 4096;
3552 buf = xmalloc (buf_alloc);
3553 buf_pos = 0;
3554 while (1)
3555 {
3556 n = target_fileio_pread (fd, &buf[buf_pos],
3557 buf_alloc - buf_pos - padding, buf_pos,
3558 &target_errno);
3559 if (n < 0)
3560 {
3561 /* An error occurred. */
3562 do_cleanups (close_cleanup);
3563 xfree (buf);
3564 return -1;
3565 }
3566 else if (n == 0)
3567 {
3568 /* Read all there was. */
3569 do_cleanups (close_cleanup);
3570 if (buf_pos == 0)
3571 xfree (buf);
3572 else
3573 *buf_p = buf;
3574 return buf_pos;
3575 }
3576
3577 buf_pos += n;
3578
3579 /* If the buffer is filling up, expand it. */
3580 if (buf_alloc < buf_pos * 2)
3581 {
3582 buf_alloc *= 2;
3583 buf = xrealloc (buf, buf_alloc);
3584 }
3585
3586 QUIT;
3587 }
3588 }
3589
3590 /* Read target file FILENAME. Store the result in *BUF_P and return
3591 the size of the transferred data. See the declaration in "target.h"
3592 function for more information about the return value. */
3593
3594 LONGEST
3595 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3596 {
3597 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3598 }
3599
3600 /* Read target file FILENAME. The result is NUL-terminated and
3601 returned as a string, allocated using xmalloc. If an error occurs
3602 or the transfer is unsupported, NULL is returned. Empty objects
3603 are returned as allocated but empty strings. A warning is issued
3604 if the result contains any embedded NUL bytes. */
3605
3606 char *
3607 target_fileio_read_stralloc (const char *filename)
3608 {
3609 gdb_byte *buffer;
3610 char *bufstr;
3611 LONGEST i, transferred;
3612
3613 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3614 bufstr = (char *) buffer;
3615
3616 if (transferred < 0)
3617 return NULL;
3618
3619 if (transferred == 0)
3620 return xstrdup ("");
3621
3622 bufstr[transferred] = 0;
3623
3624 /* Check for embedded NUL bytes; but allow trailing NULs. */
3625 for (i = strlen (bufstr); i < transferred; i++)
3626 if (bufstr[i] != 0)
3627 {
3628 warning (_("target file %s "
3629 "contained unexpected null characters"),
3630 filename);
3631 break;
3632 }
3633
3634 return bufstr;
3635 }
3636
3637
3638 static int
3639 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3640 {
3641 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3642 }
3643
3644 static int
3645 default_watchpoint_addr_within_range (struct target_ops *target,
3646 CORE_ADDR addr,
3647 CORE_ADDR start, int length)
3648 {
3649 return addr >= start && addr < start + length;
3650 }
3651
3652 static struct gdbarch *
3653 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3654 {
3655 return target_gdbarch ();
3656 }
3657
3658 static int
3659 return_zero (void)
3660 {
3661 return 0;
3662 }
3663
3664 static int
3665 return_one (void)
3666 {
3667 return 1;
3668 }
3669
3670 static int
3671 return_minus_one (void)
3672 {
3673 return -1;
3674 }
3675
3676 static void *
3677 return_null (void)
3678 {
3679 return 0;
3680 }
3681
3682 /*
3683 * Find the next target down the stack from the specified target.
3684 */
3685
3686 struct target_ops *
3687 find_target_beneath (struct target_ops *t)
3688 {
3689 return t->beneath;
3690 }
3691
3692 \f
3693 /* The inferior process has died. Long live the inferior! */
3694
3695 void
3696 generic_mourn_inferior (void)
3697 {
3698 ptid_t ptid;
3699
3700 ptid = inferior_ptid;
3701 inferior_ptid = null_ptid;
3702
3703 /* Mark breakpoints uninserted in case something tries to delete a
3704 breakpoint while we delete the inferior's threads (which would
3705 fail, since the inferior is long gone). */
3706 mark_breakpoints_out ();
3707
3708 if (!ptid_equal (ptid, null_ptid))
3709 {
3710 int pid = ptid_get_pid (ptid);
3711 exit_inferior (pid);
3712 }
3713
3714 /* Note this wipes step-resume breakpoints, so needs to be done
3715 after exit_inferior, which ends up referencing the step-resume
3716 breakpoints through clear_thread_inferior_resources. */
3717 breakpoint_init_inferior (inf_exited);
3718
3719 registers_changed ();
3720
3721 reopen_exec_file ();
3722 reinit_frame_cache ();
3723
3724 if (deprecated_detach_hook)
3725 deprecated_detach_hook ();
3726 }
3727 \f
3728 /* Convert a normal process ID to a string. Returns the string in a
3729 static buffer. */
3730
3731 char *
3732 normal_pid_to_str (ptid_t ptid)
3733 {
3734 static char buf[32];
3735
3736 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3737 return buf;
3738 }
3739
3740 static char *
3741 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3742 {
3743 return normal_pid_to_str (ptid);
3744 }
3745
3746 /* Error-catcher for target_find_memory_regions. */
3747 static int
3748 dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
3749 {
3750 error (_("Command not implemented for this target."));
3751 return 0;
3752 }
3753
3754 /* Error-catcher for target_make_corefile_notes. */
3755 static char *
3756 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3757 {
3758 error (_("Command not implemented for this target."));
3759 return NULL;
3760 }
3761
3762 /* Error-catcher for target_get_bookmark. */
3763 static gdb_byte *
3764 dummy_get_bookmark (char *ignore1, int ignore2)
3765 {
3766 tcomplain ();
3767 return NULL;
3768 }
3769
3770 /* Error-catcher for target_goto_bookmark. */
3771 static void
3772 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3773 {
3774 tcomplain ();
3775 }
3776
3777 /* Set up the handful of non-empty slots needed by the dummy target
3778 vector. */
3779
3780 static void
3781 init_dummy_target (void)
3782 {
3783 dummy_target.to_shortname = "None";
3784 dummy_target.to_longname = "None";
3785 dummy_target.to_doc = "";
3786 dummy_target.to_attach = find_default_attach;
3787 dummy_target.to_detach =
3788 (void (*)(struct target_ops *, const char *, int))target_ignore;
3789 dummy_target.to_create_inferior = find_default_create_inferior;
3790 dummy_target.to_can_async_p = find_default_can_async_p;
3791 dummy_target.to_is_async_p = find_default_is_async_p;
3792 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3793 dummy_target.to_supports_disable_randomization
3794 = find_default_supports_disable_randomization;
3795 dummy_target.to_pid_to_str = dummy_pid_to_str;
3796 dummy_target.to_stratum = dummy_stratum;
3797 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3798 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3799 dummy_target.to_get_bookmark = dummy_get_bookmark;
3800 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3801 dummy_target.to_xfer_partial = default_xfer_partial;
3802 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3803 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3804 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3805 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3806 dummy_target.to_has_execution
3807 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3808 dummy_target.to_stopped_by_watchpoint = return_zero;
3809 dummy_target.to_stopped_data_address =
3810 (int (*) (struct target_ops *, CORE_ADDR *)) return_zero;
3811 dummy_target.to_magic = OPS_MAGIC;
3812 }
3813 \f
3814 static void
3815 debug_to_open (char *args, int from_tty)
3816 {
3817 debug_target.to_open (args, from_tty);
3818
3819 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3820 }
3821
3822 void
3823 target_close (struct target_ops *targ)
3824 {
3825 gdb_assert (!target_is_pushed (targ));
3826
3827 if (targ->to_xclose != NULL)
3828 targ->to_xclose (targ);
3829 else if (targ->to_close != NULL)
3830 targ->to_close ();
3831
3832 if (targetdebug)
3833 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3834 }
3835
3836 void
3837 target_attach (char *args, int from_tty)
3838 {
3839 struct target_ops *t;
3840
3841 for (t = current_target.beneath; t != NULL; t = t->beneath)
3842 {
3843 if (t->to_attach != NULL)
3844 {
3845 t->to_attach (t, args, from_tty);
3846 if (targetdebug)
3847 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3848 args, from_tty);
3849 return;
3850 }
3851 }
3852
3853 internal_error (__FILE__, __LINE__,
3854 _("could not find a target to attach"));
3855 }
3856
3857 int
3858 target_thread_alive (ptid_t ptid)
3859 {
3860 struct target_ops *t;
3861
3862 for (t = current_target.beneath; t != NULL; t = t->beneath)
3863 {
3864 if (t->to_thread_alive != NULL)
3865 {
3866 int retval;
3867
3868 retval = t->to_thread_alive (t, ptid);
3869 if (targetdebug)
3870 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3871 ptid_get_pid (ptid), retval);
3872
3873 return retval;
3874 }
3875 }
3876
3877 return 0;
3878 }
3879
3880 void
3881 target_find_new_threads (void)
3882 {
3883 struct target_ops *t;
3884
3885 for (t = current_target.beneath; t != NULL; t = t->beneath)
3886 {
3887 if (t->to_find_new_threads != NULL)
3888 {
3889 t->to_find_new_threads (t);
3890 if (targetdebug)
3891 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3892
3893 return;
3894 }
3895 }
3896 }
3897
3898 void
3899 target_stop (ptid_t ptid)
3900 {
3901 if (!may_stop)
3902 {
3903 warning (_("May not interrupt or stop the target, ignoring attempt"));
3904 return;
3905 }
3906
3907 (*current_target.to_stop) (ptid);
3908 }
3909
3910 static void
3911 debug_to_post_attach (int pid)
3912 {
3913 debug_target.to_post_attach (pid);
3914
3915 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3916 }
3917
3918 /* Concatenate ELEM to LIST, a comma separate list, and return the
3919 result. The LIST incoming argument is released. */
3920
3921 static char *
3922 str_comma_list_concat_elem (char *list, const char *elem)
3923 {
3924 if (list == NULL)
3925 return xstrdup (elem);
3926 else
3927 return reconcat (list, list, ", ", elem, (char *) NULL);
3928 }
3929
3930 /* Helper for target_options_to_string. If OPT is present in
3931 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3932 Returns the new resulting string. OPT is removed from
3933 TARGET_OPTIONS. */
3934
3935 static char *
3936 do_option (int *target_options, char *ret,
3937 int opt, char *opt_str)
3938 {
3939 if ((*target_options & opt) != 0)
3940 {
3941 ret = str_comma_list_concat_elem (ret, opt_str);
3942 *target_options &= ~opt;
3943 }
3944
3945 return ret;
3946 }
3947
3948 char *
3949 target_options_to_string (int target_options)
3950 {
3951 char *ret = NULL;
3952
3953 #define DO_TARG_OPTION(OPT) \
3954 ret = do_option (&target_options, ret, OPT, #OPT)
3955
3956 DO_TARG_OPTION (TARGET_WNOHANG);
3957
3958 if (target_options != 0)
3959 ret = str_comma_list_concat_elem (ret, "unknown???");
3960
3961 if (ret == NULL)
3962 ret = xstrdup ("");
3963 return ret;
3964 }
3965
3966 static void
3967 debug_print_register (const char * func,
3968 struct regcache *regcache, int regno)
3969 {
3970 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3971
3972 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3973 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3974 && gdbarch_register_name (gdbarch, regno) != NULL
3975 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3976 fprintf_unfiltered (gdb_stdlog, "(%s)",
3977 gdbarch_register_name (gdbarch, regno));
3978 else
3979 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3980 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3981 {
3982 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3983 int i, size = register_size (gdbarch, regno);
3984 gdb_byte buf[MAX_REGISTER_SIZE];
3985
3986 regcache_raw_collect (regcache, regno, buf);
3987 fprintf_unfiltered (gdb_stdlog, " = ");
3988 for (i = 0; i < size; i++)
3989 {
3990 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3991 }
3992 if (size <= sizeof (LONGEST))
3993 {
3994 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3995
3996 fprintf_unfiltered (gdb_stdlog, " %s %s",
3997 core_addr_to_string_nz (val), plongest (val));
3998 }
3999 }
4000 fprintf_unfiltered (gdb_stdlog, "\n");
4001 }
4002
4003 void
4004 target_fetch_registers (struct regcache *regcache, int regno)
4005 {
4006 struct target_ops *t;
4007
4008 for (t = current_target.beneath; t != NULL; t = t->beneath)
4009 {
4010 if (t->to_fetch_registers != NULL)
4011 {
4012 t->to_fetch_registers (t, regcache, regno);
4013 if (targetdebug)
4014 debug_print_register ("target_fetch_registers", regcache, regno);
4015 return;
4016 }
4017 }
4018 }
4019
4020 void
4021 target_store_registers (struct regcache *regcache, int regno)
4022 {
4023 struct target_ops *t;
4024
4025 if (!may_write_registers)
4026 error (_("Writing to registers is not allowed (regno %d)"), regno);
4027
4028 for (t = current_target.beneath; t != NULL; t = t->beneath)
4029 {
4030 if (t->to_store_registers != NULL)
4031 {
4032 t->to_store_registers (t, regcache, regno);
4033 if (targetdebug)
4034 {
4035 debug_print_register ("target_store_registers", regcache, regno);
4036 }
4037 return;
4038 }
4039 }
4040
4041 noprocess ();
4042 }
4043
4044 int
4045 target_core_of_thread (ptid_t ptid)
4046 {
4047 struct target_ops *t;
4048
4049 for (t = current_target.beneath; t != NULL; t = t->beneath)
4050 {
4051 if (t->to_core_of_thread != NULL)
4052 {
4053 int retval = t->to_core_of_thread (t, ptid);
4054
4055 if (targetdebug)
4056 fprintf_unfiltered (gdb_stdlog,
4057 "target_core_of_thread (%d) = %d\n",
4058 ptid_get_pid (ptid), retval);
4059 return retval;
4060 }
4061 }
4062
4063 return -1;
4064 }
4065
4066 int
4067 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4068 {
4069 struct target_ops *t;
4070
4071 for (t = current_target.beneath; t != NULL; t = t->beneath)
4072 {
4073 if (t->to_verify_memory != NULL)
4074 {
4075 int retval = t->to_verify_memory (t, data, memaddr, size);
4076
4077 if (targetdebug)
4078 fprintf_unfiltered (gdb_stdlog,
4079 "target_verify_memory (%s, %s) = %d\n",
4080 paddress (target_gdbarch (), memaddr),
4081 pulongest (size),
4082 retval);
4083 return retval;
4084 }
4085 }
4086
4087 tcomplain ();
4088 }
4089
4090 /* The documentation for this function is in its prototype declaration in
4091 target.h. */
4092
4093 int
4094 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4095 {
4096 struct target_ops *t;
4097
4098 for (t = current_target.beneath; t != NULL; t = t->beneath)
4099 if (t->to_insert_mask_watchpoint != NULL)
4100 {
4101 int ret;
4102
4103 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4104
4105 if (targetdebug)
4106 fprintf_unfiltered (gdb_stdlog, "\
4107 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4108 core_addr_to_string (addr),
4109 core_addr_to_string (mask), rw, ret);
4110
4111 return ret;
4112 }
4113
4114 return 1;
4115 }
4116
4117 /* The documentation for this function is in its prototype declaration in
4118 target.h. */
4119
4120 int
4121 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4122 {
4123 struct target_ops *t;
4124
4125 for (t = current_target.beneath; t != NULL; t = t->beneath)
4126 if (t->to_remove_mask_watchpoint != NULL)
4127 {
4128 int ret;
4129
4130 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4131
4132 if (targetdebug)
4133 fprintf_unfiltered (gdb_stdlog, "\
4134 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4135 core_addr_to_string (addr),
4136 core_addr_to_string (mask), rw, ret);
4137
4138 return ret;
4139 }
4140
4141 return 1;
4142 }
4143
4144 /* The documentation for this function is in its prototype declaration
4145 in target.h. */
4146
4147 int
4148 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4149 {
4150 struct target_ops *t;
4151
4152 for (t = current_target.beneath; t != NULL; t = t->beneath)
4153 if (t->to_masked_watch_num_registers != NULL)
4154 return t->to_masked_watch_num_registers (t, addr, mask);
4155
4156 return -1;
4157 }
4158
4159 /* The documentation for this function is in its prototype declaration
4160 in target.h. */
4161
4162 int
4163 target_ranged_break_num_registers (void)
4164 {
4165 struct target_ops *t;
4166
4167 for (t = current_target.beneath; t != NULL; t = t->beneath)
4168 if (t->to_ranged_break_num_registers != NULL)
4169 return t->to_ranged_break_num_registers (t);
4170
4171 return -1;
4172 }
4173
4174 /* See target.h. */
4175
4176 int
4177 target_supports_btrace (void)
4178 {
4179 struct target_ops *t;
4180
4181 for (t = current_target.beneath; t != NULL; t = t->beneath)
4182 if (t->to_supports_btrace != NULL)
4183 return t->to_supports_btrace ();
4184
4185 return 0;
4186 }
4187
4188 /* See target.h. */
4189
4190 struct btrace_target_info *
4191 target_enable_btrace (ptid_t ptid)
4192 {
4193 struct target_ops *t;
4194
4195 for (t = current_target.beneath; t != NULL; t = t->beneath)
4196 if (t->to_enable_btrace != NULL)
4197 return t->to_enable_btrace (ptid);
4198
4199 tcomplain ();
4200 return NULL;
4201 }
4202
4203 /* See target.h. */
4204
4205 void
4206 target_disable_btrace (struct btrace_target_info *btinfo)
4207 {
4208 struct target_ops *t;
4209
4210 for (t = current_target.beneath; t != NULL; t = t->beneath)
4211 if (t->to_disable_btrace != NULL)
4212 {
4213 t->to_disable_btrace (btinfo);
4214 return;
4215 }
4216
4217 tcomplain ();
4218 }
4219
4220 /* See target.h. */
4221
4222 void
4223 target_teardown_btrace (struct btrace_target_info *btinfo)
4224 {
4225 struct target_ops *t;
4226
4227 for (t = current_target.beneath; t != NULL; t = t->beneath)
4228 if (t->to_teardown_btrace != NULL)
4229 {
4230 t->to_teardown_btrace (btinfo);
4231 return;
4232 }
4233
4234 tcomplain ();
4235 }
4236
4237 /* See target.h. */
4238
4239 enum btrace_error
4240 target_read_btrace (VEC (btrace_block_s) **btrace,
4241 struct btrace_target_info *btinfo,
4242 enum btrace_read_type type)
4243 {
4244 struct target_ops *t;
4245
4246 for (t = current_target.beneath; t != NULL; t = t->beneath)
4247 if (t->to_read_btrace != NULL)
4248 return t->to_read_btrace (btrace, btinfo, type);
4249
4250 tcomplain ();
4251 return BTRACE_ERR_NOT_SUPPORTED;
4252 }
4253
4254 /* See target.h. */
4255
4256 void
4257 target_stop_recording (void)
4258 {
4259 struct target_ops *t;
4260
4261 for (t = current_target.beneath; t != NULL; t = t->beneath)
4262 if (t->to_stop_recording != NULL)
4263 {
4264 t->to_stop_recording ();
4265 return;
4266 }
4267
4268 /* This is optional. */
4269 }
4270
4271 /* See target.h. */
4272
4273 void
4274 target_info_record (void)
4275 {
4276 struct target_ops *t;
4277
4278 for (t = current_target.beneath; t != NULL; t = t->beneath)
4279 if (t->to_info_record != NULL)
4280 {
4281 t->to_info_record ();
4282 return;
4283 }
4284
4285 tcomplain ();
4286 }
4287
4288 /* See target.h. */
4289
4290 void
4291 target_save_record (const char *filename)
4292 {
4293 struct target_ops *t;
4294
4295 for (t = current_target.beneath; t != NULL; t = t->beneath)
4296 if (t->to_save_record != NULL)
4297 {
4298 t->to_save_record (filename);
4299 return;
4300 }
4301
4302 tcomplain ();
4303 }
4304
4305 /* See target.h. */
4306
4307 int
4308 target_supports_delete_record (void)
4309 {
4310 struct target_ops *t;
4311
4312 for (t = current_target.beneath; t != NULL; t = t->beneath)
4313 if (t->to_delete_record != NULL)
4314 return 1;
4315
4316 return 0;
4317 }
4318
4319 /* See target.h. */
4320
4321 void
4322 target_delete_record (void)
4323 {
4324 struct target_ops *t;
4325
4326 for (t = current_target.beneath; t != NULL; t = t->beneath)
4327 if (t->to_delete_record != NULL)
4328 {
4329 t->to_delete_record ();
4330 return;
4331 }
4332
4333 tcomplain ();
4334 }
4335
4336 /* See target.h. */
4337
4338 int
4339 target_record_is_replaying (void)
4340 {
4341 struct target_ops *t;
4342
4343 for (t = current_target.beneath; t != NULL; t = t->beneath)
4344 if (t->to_record_is_replaying != NULL)
4345 return t->to_record_is_replaying ();
4346
4347 return 0;
4348 }
4349
4350 /* See target.h. */
4351
4352 void
4353 target_goto_record_begin (void)
4354 {
4355 struct target_ops *t;
4356
4357 for (t = current_target.beneath; t != NULL; t = t->beneath)
4358 if (t->to_goto_record_begin != NULL)
4359 {
4360 t->to_goto_record_begin ();
4361 return;
4362 }
4363
4364 tcomplain ();
4365 }
4366
4367 /* See target.h. */
4368
4369 void
4370 target_goto_record_end (void)
4371 {
4372 struct target_ops *t;
4373
4374 for (t = current_target.beneath; t != NULL; t = t->beneath)
4375 if (t->to_goto_record_end != NULL)
4376 {
4377 t->to_goto_record_end ();
4378 return;
4379 }
4380
4381 tcomplain ();
4382 }
4383
4384 /* See target.h. */
4385
4386 void
4387 target_goto_record (ULONGEST insn)
4388 {
4389 struct target_ops *t;
4390
4391 for (t = current_target.beneath; t != NULL; t = t->beneath)
4392 if (t->to_goto_record != NULL)
4393 {
4394 t->to_goto_record (insn);
4395 return;
4396 }
4397
4398 tcomplain ();
4399 }
4400
4401 /* See target.h. */
4402
4403 void
4404 target_insn_history (int size, int flags)
4405 {
4406 struct target_ops *t;
4407
4408 for (t = current_target.beneath; t != NULL; t = t->beneath)
4409 if (t->to_insn_history != NULL)
4410 {
4411 t->to_insn_history (size, flags);
4412 return;
4413 }
4414
4415 tcomplain ();
4416 }
4417
4418 /* See target.h. */
4419
4420 void
4421 target_insn_history_from (ULONGEST from, int size, int flags)
4422 {
4423 struct target_ops *t;
4424
4425 for (t = current_target.beneath; t != NULL; t = t->beneath)
4426 if (t->to_insn_history_from != NULL)
4427 {
4428 t->to_insn_history_from (from, size, flags);
4429 return;
4430 }
4431
4432 tcomplain ();
4433 }
4434
4435 /* See target.h. */
4436
4437 void
4438 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4439 {
4440 struct target_ops *t;
4441
4442 for (t = current_target.beneath; t != NULL; t = t->beneath)
4443 if (t->to_insn_history_range != NULL)
4444 {
4445 t->to_insn_history_range (begin, end, flags);
4446 return;
4447 }
4448
4449 tcomplain ();
4450 }
4451
4452 /* See target.h. */
4453
4454 void
4455 target_call_history (int size, int flags)
4456 {
4457 struct target_ops *t;
4458
4459 for (t = current_target.beneath; t != NULL; t = t->beneath)
4460 if (t->to_call_history != NULL)
4461 {
4462 t->to_call_history (size, flags);
4463 return;
4464 }
4465
4466 tcomplain ();
4467 }
4468
4469 /* See target.h. */
4470
4471 void
4472 target_call_history_from (ULONGEST begin, int size, int flags)
4473 {
4474 struct target_ops *t;
4475
4476 for (t = current_target.beneath; t != NULL; t = t->beneath)
4477 if (t->to_call_history_from != NULL)
4478 {
4479 t->to_call_history_from (begin, size, flags);
4480 return;
4481 }
4482
4483 tcomplain ();
4484 }
4485
4486 /* See target.h. */
4487
4488 void
4489 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4490 {
4491 struct target_ops *t;
4492
4493 for (t = current_target.beneath; t != NULL; t = t->beneath)
4494 if (t->to_call_history_range != NULL)
4495 {
4496 t->to_call_history_range (begin, end, flags);
4497 return;
4498 }
4499
4500 tcomplain ();
4501 }
4502
4503 static void
4504 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4505 {
4506 debug_target.to_prepare_to_store (&debug_target, regcache);
4507
4508 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4509 }
4510
4511 /* See target.h. */
4512
4513 const struct frame_unwind *
4514 target_get_unwinder (void)
4515 {
4516 struct target_ops *t;
4517
4518 for (t = current_target.beneath; t != NULL; t = t->beneath)
4519 if (t->to_get_unwinder != NULL)
4520 return t->to_get_unwinder;
4521
4522 return NULL;
4523 }
4524
4525 /* See target.h. */
4526
4527 const struct frame_unwind *
4528 target_get_tailcall_unwinder (void)
4529 {
4530 struct target_ops *t;
4531
4532 for (t = current_target.beneath; t != NULL; t = t->beneath)
4533 if (t->to_get_tailcall_unwinder != NULL)
4534 return t->to_get_tailcall_unwinder;
4535
4536 return NULL;
4537 }
4538
4539 /* See target.h. */
4540
4541 CORE_ADDR
4542 forward_target_decr_pc_after_break (struct target_ops *ops,
4543 struct gdbarch *gdbarch)
4544 {
4545 for (; ops != NULL; ops = ops->beneath)
4546 if (ops->to_decr_pc_after_break != NULL)
4547 return ops->to_decr_pc_after_break (ops, gdbarch);
4548
4549 return gdbarch_decr_pc_after_break (gdbarch);
4550 }
4551
4552 /* See target.h. */
4553
4554 CORE_ADDR
4555 target_decr_pc_after_break (struct gdbarch *gdbarch)
4556 {
4557 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4558 }
4559
4560 static int
4561 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4562 int write, struct mem_attrib *attrib,
4563 struct target_ops *target)
4564 {
4565 int retval;
4566
4567 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4568 attrib, target);
4569
4570 fprintf_unfiltered (gdb_stdlog,
4571 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4572 paddress (target_gdbarch (), memaddr), len,
4573 write ? "write" : "read", retval);
4574
4575 if (retval > 0)
4576 {
4577 int i;
4578
4579 fputs_unfiltered (", bytes =", gdb_stdlog);
4580 for (i = 0; i < retval; i++)
4581 {
4582 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4583 {
4584 if (targetdebug < 2 && i > 0)
4585 {
4586 fprintf_unfiltered (gdb_stdlog, " ...");
4587 break;
4588 }
4589 fprintf_unfiltered (gdb_stdlog, "\n");
4590 }
4591
4592 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4593 }
4594 }
4595
4596 fputc_unfiltered ('\n', gdb_stdlog);
4597
4598 return retval;
4599 }
4600
4601 static void
4602 debug_to_files_info (struct target_ops *target)
4603 {
4604 debug_target.to_files_info (target);
4605
4606 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4607 }
4608
4609 static int
4610 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4611 struct bp_target_info *bp_tgt)
4612 {
4613 int retval;
4614
4615 retval = forward_target_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4616
4617 fprintf_unfiltered (gdb_stdlog,
4618 "target_insert_breakpoint (%s, xxx) = %ld\n",
4619 core_addr_to_string (bp_tgt->placed_address),
4620 (unsigned long) retval);
4621 return retval;
4622 }
4623
4624 static int
4625 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4626 struct bp_target_info *bp_tgt)
4627 {
4628 int retval;
4629
4630 retval = forward_target_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4631
4632 fprintf_unfiltered (gdb_stdlog,
4633 "target_remove_breakpoint (%s, xxx) = %ld\n",
4634 core_addr_to_string (bp_tgt->placed_address),
4635 (unsigned long) retval);
4636 return retval;
4637 }
4638
4639 static int
4640 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
4641 {
4642 int retval;
4643
4644 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
4645
4646 fprintf_unfiltered (gdb_stdlog,
4647 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4648 (unsigned long) type,
4649 (unsigned long) cnt,
4650 (unsigned long) from_tty,
4651 (unsigned long) retval);
4652 return retval;
4653 }
4654
4655 static int
4656 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
4657 {
4658 CORE_ADDR retval;
4659
4660 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
4661
4662 fprintf_unfiltered (gdb_stdlog,
4663 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4664 core_addr_to_string (addr), (unsigned long) len,
4665 core_addr_to_string (retval));
4666 return retval;
4667 }
4668
4669 static int
4670 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int rw,
4671 struct expression *cond)
4672 {
4673 int retval;
4674
4675 retval = debug_target.to_can_accel_watchpoint_condition (addr, len,
4676 rw, cond);
4677
4678 fprintf_unfiltered (gdb_stdlog,
4679 "target_can_accel_watchpoint_condition "
4680 "(%s, %d, %d, %s) = %ld\n",
4681 core_addr_to_string (addr), len, rw,
4682 host_address_to_string (cond), (unsigned long) retval);
4683 return retval;
4684 }
4685
4686 static int
4687 debug_to_stopped_by_watchpoint (void)
4688 {
4689 int retval;
4690
4691 retval = debug_target.to_stopped_by_watchpoint ();
4692
4693 fprintf_unfiltered (gdb_stdlog,
4694 "target_stopped_by_watchpoint () = %ld\n",
4695 (unsigned long) retval);
4696 return retval;
4697 }
4698
4699 static int
4700 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4701 {
4702 int retval;
4703
4704 retval = debug_target.to_stopped_data_address (target, addr);
4705
4706 fprintf_unfiltered (gdb_stdlog,
4707 "target_stopped_data_address ([%s]) = %ld\n",
4708 core_addr_to_string (*addr),
4709 (unsigned long)retval);
4710 return retval;
4711 }
4712
4713 static int
4714 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4715 CORE_ADDR addr,
4716 CORE_ADDR start, int length)
4717 {
4718 int retval;
4719
4720 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4721 start, length);
4722
4723 fprintf_filtered (gdb_stdlog,
4724 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4725 core_addr_to_string (addr), core_addr_to_string (start),
4726 length, retval);
4727 return retval;
4728 }
4729
4730 static int
4731 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
4732 struct bp_target_info *bp_tgt)
4733 {
4734 int retval;
4735
4736 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
4737
4738 fprintf_unfiltered (gdb_stdlog,
4739 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4740 core_addr_to_string (bp_tgt->placed_address),
4741 (unsigned long) retval);
4742 return retval;
4743 }
4744
4745 static int
4746 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
4747 struct bp_target_info *bp_tgt)
4748 {
4749 int retval;
4750
4751 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
4752
4753 fprintf_unfiltered (gdb_stdlog,
4754 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4755 core_addr_to_string (bp_tgt->placed_address),
4756 (unsigned long) retval);
4757 return retval;
4758 }
4759
4760 static int
4761 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type,
4762 struct expression *cond)
4763 {
4764 int retval;
4765
4766 retval = debug_target.to_insert_watchpoint (addr, len, type, cond);
4767
4768 fprintf_unfiltered (gdb_stdlog,
4769 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4770 core_addr_to_string (addr), len, type,
4771 host_address_to_string (cond), (unsigned long) retval);
4772 return retval;
4773 }
4774
4775 static int
4776 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type,
4777 struct expression *cond)
4778 {
4779 int retval;
4780
4781 retval = debug_target.to_remove_watchpoint (addr, len, type, cond);
4782
4783 fprintf_unfiltered (gdb_stdlog,
4784 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4785 core_addr_to_string (addr), len, type,
4786 host_address_to_string (cond), (unsigned long) retval);
4787 return retval;
4788 }
4789
4790 static void
4791 debug_to_terminal_init (void)
4792 {
4793 debug_target.to_terminal_init ();
4794
4795 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4796 }
4797
4798 static void
4799 debug_to_terminal_inferior (void)
4800 {
4801 debug_target.to_terminal_inferior ();
4802
4803 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4804 }
4805
4806 static void
4807 debug_to_terminal_ours_for_output (void)
4808 {
4809 debug_target.to_terminal_ours_for_output ();
4810
4811 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4812 }
4813
4814 static void
4815 debug_to_terminal_ours (void)
4816 {
4817 debug_target.to_terminal_ours ();
4818
4819 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4820 }
4821
4822 static void
4823 debug_to_terminal_save_ours (void)
4824 {
4825 debug_target.to_terminal_save_ours ();
4826
4827 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4828 }
4829
4830 static void
4831 debug_to_terminal_info (const char *arg, int from_tty)
4832 {
4833 debug_target.to_terminal_info (arg, from_tty);
4834
4835 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4836 from_tty);
4837 }
4838
4839 static void
4840 debug_to_load (char *args, int from_tty)
4841 {
4842 debug_target.to_load (args, from_tty);
4843
4844 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4845 }
4846
4847 static void
4848 debug_to_post_startup_inferior (ptid_t ptid)
4849 {
4850 debug_target.to_post_startup_inferior (ptid);
4851
4852 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4853 ptid_get_pid (ptid));
4854 }
4855
4856 static int
4857 debug_to_insert_fork_catchpoint (int pid)
4858 {
4859 int retval;
4860
4861 retval = debug_target.to_insert_fork_catchpoint (pid);
4862
4863 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4864 pid, retval);
4865
4866 return retval;
4867 }
4868
4869 static int
4870 debug_to_remove_fork_catchpoint (int pid)
4871 {
4872 int retval;
4873
4874 retval = debug_target.to_remove_fork_catchpoint (pid);
4875
4876 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4877 pid, retval);
4878
4879 return retval;
4880 }
4881
4882 static int
4883 debug_to_insert_vfork_catchpoint (int pid)
4884 {
4885 int retval;
4886
4887 retval = debug_target.to_insert_vfork_catchpoint (pid);
4888
4889 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4890 pid, retval);
4891
4892 return retval;
4893 }
4894
4895 static int
4896 debug_to_remove_vfork_catchpoint (int pid)
4897 {
4898 int retval;
4899
4900 retval = debug_target.to_remove_vfork_catchpoint (pid);
4901
4902 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4903 pid, retval);
4904
4905 return retval;
4906 }
4907
4908 static int
4909 debug_to_insert_exec_catchpoint (int pid)
4910 {
4911 int retval;
4912
4913 retval = debug_target.to_insert_exec_catchpoint (pid);
4914
4915 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4916 pid, retval);
4917
4918 return retval;
4919 }
4920
4921 static int
4922 debug_to_remove_exec_catchpoint (int pid)
4923 {
4924 int retval;
4925
4926 retval = debug_target.to_remove_exec_catchpoint (pid);
4927
4928 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4929 pid, retval);
4930
4931 return retval;
4932 }
4933
4934 static int
4935 debug_to_has_exited (int pid, int wait_status, int *exit_status)
4936 {
4937 int has_exited;
4938
4939 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
4940
4941 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4942 pid, wait_status, *exit_status, has_exited);
4943
4944 return has_exited;
4945 }
4946
4947 static int
4948 debug_to_can_run (void)
4949 {
4950 int retval;
4951
4952 retval = debug_target.to_can_run ();
4953
4954 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4955
4956 return retval;
4957 }
4958
4959 static struct gdbarch *
4960 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4961 {
4962 struct gdbarch *retval;
4963
4964 retval = debug_target.to_thread_architecture (ops, ptid);
4965
4966 fprintf_unfiltered (gdb_stdlog,
4967 "target_thread_architecture (%s) = %s [%s]\n",
4968 target_pid_to_str (ptid),
4969 host_address_to_string (retval),
4970 gdbarch_bfd_arch_info (retval)->printable_name);
4971 return retval;
4972 }
4973
4974 static void
4975 debug_to_stop (ptid_t ptid)
4976 {
4977 debug_target.to_stop (ptid);
4978
4979 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4980 target_pid_to_str (ptid));
4981 }
4982
4983 static void
4984 debug_to_rcmd (char *command,
4985 struct ui_file *outbuf)
4986 {
4987 debug_target.to_rcmd (command, outbuf);
4988 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4989 }
4990
4991 static char *
4992 debug_to_pid_to_exec_file (int pid)
4993 {
4994 char *exec_file;
4995
4996 exec_file = debug_target.to_pid_to_exec_file (pid);
4997
4998 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4999 pid, exec_file);
5000
5001 return exec_file;
5002 }
5003
5004 static void
5005 setup_target_debug (void)
5006 {
5007 memcpy (&debug_target, &current_target, sizeof debug_target);
5008
5009 current_target.to_open = debug_to_open;
5010 current_target.to_post_attach = debug_to_post_attach;
5011 current_target.to_prepare_to_store = debug_to_prepare_to_store;
5012 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
5013 current_target.to_files_info = debug_to_files_info;
5014 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
5015 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
5016 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
5017 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
5018 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
5019 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
5020 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
5021 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
5022 current_target.to_stopped_data_address = debug_to_stopped_data_address;
5023 current_target.to_watchpoint_addr_within_range
5024 = debug_to_watchpoint_addr_within_range;
5025 current_target.to_region_ok_for_hw_watchpoint
5026 = debug_to_region_ok_for_hw_watchpoint;
5027 current_target.to_can_accel_watchpoint_condition
5028 = debug_to_can_accel_watchpoint_condition;
5029 current_target.to_terminal_init = debug_to_terminal_init;
5030 current_target.to_terminal_inferior = debug_to_terminal_inferior;
5031 current_target.to_terminal_ours_for_output
5032 = debug_to_terminal_ours_for_output;
5033 current_target.to_terminal_ours = debug_to_terminal_ours;
5034 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
5035 current_target.to_terminal_info = debug_to_terminal_info;
5036 current_target.to_load = debug_to_load;
5037 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
5038 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
5039 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
5040 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
5041 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
5042 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
5043 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
5044 current_target.to_has_exited = debug_to_has_exited;
5045 current_target.to_can_run = debug_to_can_run;
5046 current_target.to_stop = debug_to_stop;
5047 current_target.to_rcmd = debug_to_rcmd;
5048 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
5049 current_target.to_thread_architecture = debug_to_thread_architecture;
5050 }
5051 \f
5052
5053 static char targ_desc[] =
5054 "Names of targets and files being debugged.\nShows the entire \
5055 stack of targets currently in use (including the exec-file,\n\
5056 core-file, and process, if any), as well as the symbol file name.";
5057
5058 static void
5059 do_monitor_command (char *cmd,
5060 int from_tty)
5061 {
5062 if ((current_target.to_rcmd
5063 == (void (*) (char *, struct ui_file *)) tcomplain)
5064 || (current_target.to_rcmd == debug_to_rcmd
5065 && (debug_target.to_rcmd
5066 == (void (*) (char *, struct ui_file *)) tcomplain)))
5067 error (_("\"monitor\" command not supported by this target."));
5068 target_rcmd (cmd, gdb_stdtarg);
5069 }
5070
5071 /* Print the name of each layers of our target stack. */
5072
5073 static void
5074 maintenance_print_target_stack (char *cmd, int from_tty)
5075 {
5076 struct target_ops *t;
5077
5078 printf_filtered (_("The current target stack is:\n"));
5079
5080 for (t = target_stack; t != NULL; t = t->beneath)
5081 {
5082 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
5083 }
5084 }
5085
5086 /* Controls if async mode is permitted. */
5087 int target_async_permitted = 0;
5088
5089 /* The set command writes to this variable. If the inferior is
5090 executing, target_async_permitted is *not* updated. */
5091 static int target_async_permitted_1 = 0;
5092
5093 static void
5094 set_target_async_command (char *args, int from_tty,
5095 struct cmd_list_element *c)
5096 {
5097 if (have_live_inferiors ())
5098 {
5099 target_async_permitted_1 = target_async_permitted;
5100 error (_("Cannot change this setting while the inferior is running."));
5101 }
5102
5103 target_async_permitted = target_async_permitted_1;
5104 }
5105
5106 static void
5107 show_target_async_command (struct ui_file *file, int from_tty,
5108 struct cmd_list_element *c,
5109 const char *value)
5110 {
5111 fprintf_filtered (file,
5112 _("Controlling the inferior in "
5113 "asynchronous mode is %s.\n"), value);
5114 }
5115
5116 /* Temporary copies of permission settings. */
5117
5118 static int may_write_registers_1 = 1;
5119 static int may_write_memory_1 = 1;
5120 static int may_insert_breakpoints_1 = 1;
5121 static int may_insert_tracepoints_1 = 1;
5122 static int may_insert_fast_tracepoints_1 = 1;
5123 static int may_stop_1 = 1;
5124
5125 /* Make the user-set values match the real values again. */
5126
5127 void
5128 update_target_permissions (void)
5129 {
5130 may_write_registers_1 = may_write_registers;
5131 may_write_memory_1 = may_write_memory;
5132 may_insert_breakpoints_1 = may_insert_breakpoints;
5133 may_insert_tracepoints_1 = may_insert_tracepoints;
5134 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5135 may_stop_1 = may_stop;
5136 }
5137
5138 /* The one function handles (most of) the permission flags in the same
5139 way. */
5140
5141 static void
5142 set_target_permissions (char *args, int from_tty,
5143 struct cmd_list_element *c)
5144 {
5145 if (target_has_execution)
5146 {
5147 update_target_permissions ();
5148 error (_("Cannot change this setting while the inferior is running."));
5149 }
5150
5151 /* Make the real values match the user-changed values. */
5152 may_write_registers = may_write_registers_1;
5153 may_insert_breakpoints = may_insert_breakpoints_1;
5154 may_insert_tracepoints = may_insert_tracepoints_1;
5155 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5156 may_stop = may_stop_1;
5157 update_observer_mode ();
5158 }
5159
5160 /* Set memory write permission independently of observer mode. */
5161
5162 static void
5163 set_write_memory_permission (char *args, int from_tty,
5164 struct cmd_list_element *c)
5165 {
5166 /* Make the real values match the user-changed values. */
5167 may_write_memory = may_write_memory_1;
5168 update_observer_mode ();
5169 }
5170
5171
5172 void
5173 initialize_targets (void)
5174 {
5175 init_dummy_target ();
5176 push_target (&dummy_target);
5177
5178 add_info ("target", target_info, targ_desc);
5179 add_info ("files", target_info, targ_desc);
5180
5181 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5182 Set target debugging."), _("\
5183 Show target debugging."), _("\
5184 When non-zero, target debugging is enabled. Higher numbers are more\n\
5185 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5186 command."),
5187 NULL,
5188 show_targetdebug,
5189 &setdebuglist, &showdebuglist);
5190
5191 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5192 &trust_readonly, _("\
5193 Set mode for reading from readonly sections."), _("\
5194 Show mode for reading from readonly sections."), _("\
5195 When this mode is on, memory reads from readonly sections (such as .text)\n\
5196 will be read from the object file instead of from the target. This will\n\
5197 result in significant performance improvement for remote targets."),
5198 NULL,
5199 show_trust_readonly,
5200 &setlist, &showlist);
5201
5202 add_com ("monitor", class_obscure, do_monitor_command,
5203 _("Send a command to the remote monitor (remote targets only)."));
5204
5205 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5206 _("Print the name of each layer of the internal target stack."),
5207 &maintenanceprintlist);
5208
5209 add_setshow_boolean_cmd ("target-async", no_class,
5210 &target_async_permitted_1, _("\
5211 Set whether gdb controls the inferior in asynchronous mode."), _("\
5212 Show whether gdb controls the inferior in asynchronous mode."), _("\
5213 Tells gdb whether to control the inferior in asynchronous mode."),
5214 set_target_async_command,
5215 show_target_async_command,
5216 &setlist,
5217 &showlist);
5218
5219 add_setshow_boolean_cmd ("may-write-registers", class_support,
5220 &may_write_registers_1, _("\
5221 Set permission to write into registers."), _("\
5222 Show permission to write into registers."), _("\
5223 When this permission is on, GDB may write into the target's registers.\n\
5224 Otherwise, any sort of write attempt will result in an error."),
5225 set_target_permissions, NULL,
5226 &setlist, &showlist);
5227
5228 add_setshow_boolean_cmd ("may-write-memory", class_support,
5229 &may_write_memory_1, _("\
5230 Set permission to write into target memory."), _("\
5231 Show permission to write into target memory."), _("\
5232 When this permission is on, GDB may write into the target's memory.\n\
5233 Otherwise, any sort of write attempt will result in an error."),
5234 set_write_memory_permission, NULL,
5235 &setlist, &showlist);
5236
5237 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5238 &may_insert_breakpoints_1, _("\
5239 Set permission to insert breakpoints in the target."), _("\
5240 Show permission to insert breakpoints in the target."), _("\
5241 When this permission is on, GDB may insert breakpoints in the program.\n\
5242 Otherwise, any sort of insertion attempt will result in an error."),
5243 set_target_permissions, NULL,
5244 &setlist, &showlist);
5245
5246 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5247 &may_insert_tracepoints_1, _("\
5248 Set permission to insert tracepoints in the target."), _("\
5249 Show permission to insert tracepoints in the target."), _("\
5250 When this permission is on, GDB may insert tracepoints in the program.\n\
5251 Otherwise, any sort of insertion attempt will result in an error."),
5252 set_target_permissions, NULL,
5253 &setlist, &showlist);
5254
5255 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5256 &may_insert_fast_tracepoints_1, _("\
5257 Set permission to insert fast tracepoints in the target."), _("\
5258 Show permission to insert fast tracepoints in the target."), _("\
5259 When this permission is on, GDB may insert fast tracepoints.\n\
5260 Otherwise, any sort of insertion attempt will result in an error."),
5261 set_target_permissions, NULL,
5262 &setlist, &showlist);
5263
5264 add_setshow_boolean_cmd ("may-interrupt", class_support,
5265 &may_stop_1, _("\
5266 Set permission to interrupt or signal the target."), _("\
5267 Show permission to interrupt or signal the target."), _("\
5268 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5269 Otherwise, any attempt to interrupt or stop will be ignored."),
5270 set_target_permissions, NULL,
5271 &setlist, &showlist);
5272 }