remove target_ignore
[binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47 #include "auxv.h"
48
49 static void target_info (char *, int);
50
51 static void default_terminal_info (struct target_ops *, const char *, int);
52
53 static int default_watchpoint_addr_within_range (struct target_ops *,
54 CORE_ADDR, CORE_ADDR, int);
55
56 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
57 CORE_ADDR, int);
58
59 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
60
61 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
62 long lwp, long tid);
63
64 static int default_follow_fork (struct target_ops *self, int follow_child,
65 int detach_fork);
66
67 static void default_mourn_inferior (struct target_ops *self);
68
69 static int default_search_memory (struct target_ops *ops,
70 CORE_ADDR start_addr,
71 ULONGEST search_space_len,
72 const gdb_byte *pattern,
73 ULONGEST pattern_len,
74 CORE_ADDR *found_addrp);
75
76 static void tcomplain (void) ATTRIBUTE_NORETURN;
77
78 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
79
80 static int return_zero (struct target_ops *);
81
82 static int return_zero_has_execution (struct target_ops *, ptid_t);
83
84 static void target_command (char *, int);
85
86 static struct target_ops *find_default_run_target (char *);
87
88 static target_xfer_partial_ftype default_xfer_partial;
89
90 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
91 ptid_t ptid);
92
93 static int dummy_find_memory_regions (struct target_ops *self,
94 find_memory_region_ftype ignore1,
95 void *ignore2);
96
97 static char *dummy_make_corefile_notes (struct target_ops *self,
98 bfd *ignore1, int *ignore2);
99
100 static char *default_pid_to_str (struct target_ops *ops, ptid_t ptid);
101
102 static int find_default_can_async_p (struct target_ops *ignore);
103
104 static int find_default_is_async_p (struct target_ops *ignore);
105
106 static enum exec_direction_kind default_execution_direction
107 (struct target_ops *self);
108
109 static CORE_ADDR default_target_decr_pc_after_break (struct target_ops *ops,
110 struct gdbarch *gdbarch);
111
112 #include "target-delegates.c"
113
114 static void init_dummy_target (void);
115
116 static struct target_ops debug_target;
117
118 static void debug_to_open (char *, int);
119
120 static void debug_to_prepare_to_store (struct target_ops *self,
121 struct regcache *);
122
123 static void debug_to_files_info (struct target_ops *);
124
125 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
126 struct bp_target_info *);
127
128 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
129 struct bp_target_info *);
130
131 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
132 int, int, int);
133
134 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
135 struct gdbarch *,
136 struct bp_target_info *);
137
138 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
139 struct gdbarch *,
140 struct bp_target_info *);
141
142 static int debug_to_insert_watchpoint (struct target_ops *self,
143 CORE_ADDR, int, int,
144 struct expression *);
145
146 static int debug_to_remove_watchpoint (struct target_ops *self,
147 CORE_ADDR, int, int,
148 struct expression *);
149
150 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
151
152 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
153 CORE_ADDR, CORE_ADDR, int);
154
155 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
156 CORE_ADDR, int);
157
158 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
159 CORE_ADDR, int, int,
160 struct expression *);
161
162 static void debug_to_terminal_init (struct target_ops *self);
163
164 static void debug_to_terminal_inferior (struct target_ops *self);
165
166 static void debug_to_terminal_ours_for_output (struct target_ops *self);
167
168 static void debug_to_terminal_save_ours (struct target_ops *self);
169
170 static void debug_to_terminal_ours (struct target_ops *self);
171
172 static void debug_to_load (struct target_ops *self, char *, int);
173
174 static int debug_to_can_run (struct target_ops *self);
175
176 static void debug_to_stop (struct target_ops *self, ptid_t);
177
178 /* Pointer to array of target architecture structures; the size of the
179 array; the current index into the array; the allocated size of the
180 array. */
181 struct target_ops **target_structs;
182 unsigned target_struct_size;
183 unsigned target_struct_allocsize;
184 #define DEFAULT_ALLOCSIZE 10
185
186 /* The initial current target, so that there is always a semi-valid
187 current target. */
188
189 static struct target_ops dummy_target;
190
191 /* Top of target stack. */
192
193 static struct target_ops *target_stack;
194
195 /* The target structure we are currently using to talk to a process
196 or file or whatever "inferior" we have. */
197
198 struct target_ops current_target;
199
200 /* Command list for target. */
201
202 static struct cmd_list_element *targetlist = NULL;
203
204 /* Nonzero if we should trust readonly sections from the
205 executable when reading memory. */
206
207 static int trust_readonly = 0;
208
209 /* Nonzero if we should show true memory content including
210 memory breakpoint inserted by gdb. */
211
212 static int show_memory_breakpoints = 0;
213
214 /* These globals control whether GDB attempts to perform these
215 operations; they are useful for targets that need to prevent
216 inadvertant disruption, such as in non-stop mode. */
217
218 int may_write_registers = 1;
219
220 int may_write_memory = 1;
221
222 int may_insert_breakpoints = 1;
223
224 int may_insert_tracepoints = 1;
225
226 int may_insert_fast_tracepoints = 1;
227
228 int may_stop = 1;
229
230 /* Non-zero if we want to see trace of target level stuff. */
231
232 static unsigned int targetdebug = 0;
233 static void
234 show_targetdebug (struct ui_file *file, int from_tty,
235 struct cmd_list_element *c, const char *value)
236 {
237 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
238 }
239
240 static void setup_target_debug (void);
241
242 /* The user just typed 'target' without the name of a target. */
243
244 static void
245 target_command (char *arg, int from_tty)
246 {
247 fputs_filtered ("Argument required (target name). Try `help target'\n",
248 gdb_stdout);
249 }
250
251 /* Default target_has_* methods for process_stratum targets. */
252
253 int
254 default_child_has_all_memory (struct target_ops *ops)
255 {
256 /* If no inferior selected, then we can't read memory here. */
257 if (ptid_equal (inferior_ptid, null_ptid))
258 return 0;
259
260 return 1;
261 }
262
263 int
264 default_child_has_memory (struct target_ops *ops)
265 {
266 /* If no inferior selected, then we can't read memory here. */
267 if (ptid_equal (inferior_ptid, null_ptid))
268 return 0;
269
270 return 1;
271 }
272
273 int
274 default_child_has_stack (struct target_ops *ops)
275 {
276 /* If no inferior selected, there's no stack. */
277 if (ptid_equal (inferior_ptid, null_ptid))
278 return 0;
279
280 return 1;
281 }
282
283 int
284 default_child_has_registers (struct target_ops *ops)
285 {
286 /* Can't read registers from no inferior. */
287 if (ptid_equal (inferior_ptid, null_ptid))
288 return 0;
289
290 return 1;
291 }
292
293 int
294 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
295 {
296 /* If there's no thread selected, then we can't make it run through
297 hoops. */
298 if (ptid_equal (the_ptid, null_ptid))
299 return 0;
300
301 return 1;
302 }
303
304
305 int
306 target_has_all_memory_1 (void)
307 {
308 struct target_ops *t;
309
310 for (t = current_target.beneath; t != NULL; t = t->beneath)
311 if (t->to_has_all_memory (t))
312 return 1;
313
314 return 0;
315 }
316
317 int
318 target_has_memory_1 (void)
319 {
320 struct target_ops *t;
321
322 for (t = current_target.beneath; t != NULL; t = t->beneath)
323 if (t->to_has_memory (t))
324 return 1;
325
326 return 0;
327 }
328
329 int
330 target_has_stack_1 (void)
331 {
332 struct target_ops *t;
333
334 for (t = current_target.beneath; t != NULL; t = t->beneath)
335 if (t->to_has_stack (t))
336 return 1;
337
338 return 0;
339 }
340
341 int
342 target_has_registers_1 (void)
343 {
344 struct target_ops *t;
345
346 for (t = current_target.beneath; t != NULL; t = t->beneath)
347 if (t->to_has_registers (t))
348 return 1;
349
350 return 0;
351 }
352
353 int
354 target_has_execution_1 (ptid_t the_ptid)
355 {
356 struct target_ops *t;
357
358 for (t = current_target.beneath; t != NULL; t = t->beneath)
359 if (t->to_has_execution (t, the_ptid))
360 return 1;
361
362 return 0;
363 }
364
365 int
366 target_has_execution_current (void)
367 {
368 return target_has_execution_1 (inferior_ptid);
369 }
370
371 /* Complete initialization of T. This ensures that various fields in
372 T are set, if needed by the target implementation. */
373
374 void
375 complete_target_initialization (struct target_ops *t)
376 {
377 /* Provide default values for all "must have" methods. */
378 if (t->to_xfer_partial == NULL)
379 t->to_xfer_partial = default_xfer_partial;
380
381 if (t->to_has_all_memory == NULL)
382 t->to_has_all_memory = return_zero;
383
384 if (t->to_has_memory == NULL)
385 t->to_has_memory = return_zero;
386
387 if (t->to_has_stack == NULL)
388 t->to_has_stack = return_zero;
389
390 if (t->to_has_registers == NULL)
391 t->to_has_registers = return_zero;
392
393 if (t->to_has_execution == NULL)
394 t->to_has_execution = return_zero_has_execution;
395
396 install_delegators (t);
397 }
398
399 /* Add possible target architecture T to the list and add a new
400 command 'target T->to_shortname'. Set COMPLETER as the command's
401 completer if not NULL. */
402
403 void
404 add_target_with_completer (struct target_ops *t,
405 completer_ftype *completer)
406 {
407 struct cmd_list_element *c;
408
409 complete_target_initialization (t);
410
411 if (!target_structs)
412 {
413 target_struct_allocsize = DEFAULT_ALLOCSIZE;
414 target_structs = (struct target_ops **) xmalloc
415 (target_struct_allocsize * sizeof (*target_structs));
416 }
417 if (target_struct_size >= target_struct_allocsize)
418 {
419 target_struct_allocsize *= 2;
420 target_structs = (struct target_ops **)
421 xrealloc ((char *) target_structs,
422 target_struct_allocsize * sizeof (*target_structs));
423 }
424 target_structs[target_struct_size++] = t;
425
426 if (targetlist == NULL)
427 add_prefix_cmd ("target", class_run, target_command, _("\
428 Connect to a target machine or process.\n\
429 The first argument is the type or protocol of the target machine.\n\
430 Remaining arguments are interpreted by the target protocol. For more\n\
431 information on the arguments for a particular protocol, type\n\
432 `help target ' followed by the protocol name."),
433 &targetlist, "target ", 0, &cmdlist);
434 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
435 &targetlist);
436 if (completer != NULL)
437 set_cmd_completer (c, completer);
438 }
439
440 /* Add a possible target architecture to the list. */
441
442 void
443 add_target (struct target_ops *t)
444 {
445 add_target_with_completer (t, NULL);
446 }
447
448 /* See target.h. */
449
450 void
451 add_deprecated_target_alias (struct target_ops *t, char *alias)
452 {
453 struct cmd_list_element *c;
454 char *alt;
455
456 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
457 see PR cli/15104. */
458 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
459 alt = xstrprintf ("target %s", t->to_shortname);
460 deprecate_cmd (c, alt);
461 }
462
463 /* Stub functions */
464
465 void
466 target_kill (void)
467 {
468 if (targetdebug)
469 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
470
471 current_target.to_kill (&current_target);
472 }
473
474 void
475 target_load (char *arg, int from_tty)
476 {
477 target_dcache_invalidate ();
478 (*current_target.to_load) (&current_target, arg, from_tty);
479 }
480
481 void
482 target_create_inferior (char *exec_file, char *args,
483 char **env, int from_tty)
484 {
485 struct target_ops *t;
486
487 for (t = current_target.beneath; t != NULL; t = t->beneath)
488 {
489 if (t->to_create_inferior != NULL)
490 {
491 t->to_create_inferior (t, exec_file, args, env, from_tty);
492 if (targetdebug)
493 fprintf_unfiltered (gdb_stdlog,
494 "target_create_inferior (%s, %s, xxx, %d)\n",
495 exec_file, args, from_tty);
496 return;
497 }
498 }
499
500 internal_error (__FILE__, __LINE__,
501 _("could not find a target to create inferior"));
502 }
503
504 void
505 target_terminal_inferior (void)
506 {
507 /* A background resume (``run&'') should leave GDB in control of the
508 terminal. Use target_can_async_p, not target_is_async_p, since at
509 this point the target is not async yet. However, if sync_execution
510 is not set, we know it will become async prior to resume. */
511 if (target_can_async_p () && !sync_execution)
512 return;
513
514 /* If GDB is resuming the inferior in the foreground, install
515 inferior's terminal modes. */
516 (*current_target.to_terminal_inferior) (&current_target);
517 }
518
519 static int
520 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
521 struct target_ops *t)
522 {
523 errno = EIO; /* Can't read/write this location. */
524 return 0; /* No bytes handled. */
525 }
526
527 static void
528 tcomplain (void)
529 {
530 error (_("You can't do that when your target is `%s'"),
531 current_target.to_shortname);
532 }
533
534 void
535 noprocess (void)
536 {
537 error (_("You can't do that without a process to debug."));
538 }
539
540 static void
541 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
542 {
543 printf_unfiltered (_("No saved terminal information.\n"));
544 }
545
546 /* A default implementation for the to_get_ada_task_ptid target method.
547
548 This function builds the PTID by using both LWP and TID as part of
549 the PTID lwp and tid elements. The pid used is the pid of the
550 inferior_ptid. */
551
552 static ptid_t
553 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
554 {
555 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
556 }
557
558 static enum exec_direction_kind
559 default_execution_direction (struct target_ops *self)
560 {
561 if (!target_can_execute_reverse)
562 return EXEC_FORWARD;
563 else if (!target_can_async_p ())
564 return EXEC_FORWARD;
565 else
566 gdb_assert_not_reached ("\
567 to_execution_direction must be implemented for reverse async");
568 }
569
570 /* Go through the target stack from top to bottom, copying over zero
571 entries in current_target, then filling in still empty entries. In
572 effect, we are doing class inheritance through the pushed target
573 vectors.
574
575 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
576 is currently implemented, is that it discards any knowledge of
577 which target an inherited method originally belonged to.
578 Consequently, new new target methods should instead explicitly and
579 locally search the target stack for the target that can handle the
580 request. */
581
582 static void
583 update_current_target (void)
584 {
585 struct target_ops *t;
586
587 /* First, reset current's contents. */
588 memset (&current_target, 0, sizeof (current_target));
589
590 /* Install the delegators. */
591 install_delegators (&current_target);
592
593 current_target.to_stratum = target_stack->to_stratum;
594
595 #define INHERIT(FIELD, TARGET) \
596 if (!current_target.FIELD) \
597 current_target.FIELD = (TARGET)->FIELD
598
599 /* Do not add any new INHERITs here. Instead, use the delegation
600 mechanism provided by make-target-delegates. */
601 for (t = target_stack; t; t = t->beneath)
602 {
603 INHERIT (to_shortname, t);
604 INHERIT (to_longname, t);
605 INHERIT (to_attach_no_wait, t);
606 INHERIT (deprecated_xfer_memory, t);
607 INHERIT (to_have_steppable_watchpoint, t);
608 INHERIT (to_have_continuable_watchpoint, t);
609 INHERIT (to_has_thread_control, t);
610 }
611 #undef INHERIT
612
613 /* Clean up a target struct so it no longer has any zero pointers in
614 it. Do not add any new de_faults here. Instead, use the
615 delegation mechanism provided by make-target-delegates. */
616
617 #define de_fault(field, value) \
618 if (!current_target.field) \
619 current_target.field = value
620
621 de_fault (deprecated_xfer_memory,
622 (int (*) (CORE_ADDR, gdb_byte *, int, int,
623 struct mem_attrib *, struct target_ops *))
624 nomemory);
625
626 #undef de_fault
627
628 /* Finally, position the target-stack beneath the squashed
629 "current_target". That way code looking for a non-inherited
630 target method can quickly and simply find it. */
631 current_target.beneath = target_stack;
632
633 if (targetdebug)
634 setup_target_debug ();
635 }
636
637 /* Push a new target type into the stack of the existing target accessors,
638 possibly superseding some of the existing accessors.
639
640 Rather than allow an empty stack, we always have the dummy target at
641 the bottom stratum, so we can call the function vectors without
642 checking them. */
643
644 void
645 push_target (struct target_ops *t)
646 {
647 struct target_ops **cur;
648
649 /* Check magic number. If wrong, it probably means someone changed
650 the struct definition, but not all the places that initialize one. */
651 if (t->to_magic != OPS_MAGIC)
652 {
653 fprintf_unfiltered (gdb_stderr,
654 "Magic number of %s target struct wrong\n",
655 t->to_shortname);
656 internal_error (__FILE__, __LINE__,
657 _("failed internal consistency check"));
658 }
659
660 /* Find the proper stratum to install this target in. */
661 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
662 {
663 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
664 break;
665 }
666
667 /* If there's already targets at this stratum, remove them. */
668 /* FIXME: cagney/2003-10-15: I think this should be popping all
669 targets to CUR, and not just those at this stratum level. */
670 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
671 {
672 /* There's already something at this stratum level. Close it,
673 and un-hook it from the stack. */
674 struct target_ops *tmp = (*cur);
675
676 (*cur) = (*cur)->beneath;
677 tmp->beneath = NULL;
678 target_close (tmp);
679 }
680
681 /* We have removed all targets in our stratum, now add the new one. */
682 t->beneath = (*cur);
683 (*cur) = t;
684
685 update_current_target ();
686 }
687
688 /* Remove a target_ops vector from the stack, wherever it may be.
689 Return how many times it was removed (0 or 1). */
690
691 int
692 unpush_target (struct target_ops *t)
693 {
694 struct target_ops **cur;
695 struct target_ops *tmp;
696
697 if (t->to_stratum == dummy_stratum)
698 internal_error (__FILE__, __LINE__,
699 _("Attempt to unpush the dummy target"));
700
701 /* Look for the specified target. Note that we assume that a target
702 can only occur once in the target stack. */
703
704 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
705 {
706 if ((*cur) == t)
707 break;
708 }
709
710 /* If we don't find target_ops, quit. Only open targets should be
711 closed. */
712 if ((*cur) == NULL)
713 return 0;
714
715 /* Unchain the target. */
716 tmp = (*cur);
717 (*cur) = (*cur)->beneath;
718 tmp->beneath = NULL;
719
720 update_current_target ();
721
722 /* Finally close the target. Note we do this after unchaining, so
723 any target method calls from within the target_close
724 implementation don't end up in T anymore. */
725 target_close (t);
726
727 return 1;
728 }
729
730 void
731 pop_all_targets_above (enum strata above_stratum)
732 {
733 while ((int) (current_target.to_stratum) > (int) above_stratum)
734 {
735 if (!unpush_target (target_stack))
736 {
737 fprintf_unfiltered (gdb_stderr,
738 "pop_all_targets couldn't find target %s\n",
739 target_stack->to_shortname);
740 internal_error (__FILE__, __LINE__,
741 _("failed internal consistency check"));
742 break;
743 }
744 }
745 }
746
747 void
748 pop_all_targets (void)
749 {
750 pop_all_targets_above (dummy_stratum);
751 }
752
753 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
754
755 int
756 target_is_pushed (struct target_ops *t)
757 {
758 struct target_ops **cur;
759
760 /* Check magic number. If wrong, it probably means someone changed
761 the struct definition, but not all the places that initialize one. */
762 if (t->to_magic != OPS_MAGIC)
763 {
764 fprintf_unfiltered (gdb_stderr,
765 "Magic number of %s target struct wrong\n",
766 t->to_shortname);
767 internal_error (__FILE__, __LINE__,
768 _("failed internal consistency check"));
769 }
770
771 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
772 if (*cur == t)
773 return 1;
774
775 return 0;
776 }
777
778 /* Using the objfile specified in OBJFILE, find the address for the
779 current thread's thread-local storage with offset OFFSET. */
780 CORE_ADDR
781 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
782 {
783 volatile CORE_ADDR addr = 0;
784 struct target_ops *target;
785
786 for (target = current_target.beneath;
787 target != NULL;
788 target = target->beneath)
789 {
790 if (target->to_get_thread_local_address != NULL)
791 break;
792 }
793
794 if (target != NULL
795 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
796 {
797 ptid_t ptid = inferior_ptid;
798 volatile struct gdb_exception ex;
799
800 TRY_CATCH (ex, RETURN_MASK_ALL)
801 {
802 CORE_ADDR lm_addr;
803
804 /* Fetch the load module address for this objfile. */
805 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
806 objfile);
807 /* If it's 0, throw the appropriate exception. */
808 if (lm_addr == 0)
809 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
810 _("TLS load module not found"));
811
812 addr = target->to_get_thread_local_address (target, ptid,
813 lm_addr, offset);
814 }
815 /* If an error occurred, print TLS related messages here. Otherwise,
816 throw the error to some higher catcher. */
817 if (ex.reason < 0)
818 {
819 int objfile_is_library = (objfile->flags & OBJF_SHARED);
820
821 switch (ex.error)
822 {
823 case TLS_NO_LIBRARY_SUPPORT_ERROR:
824 error (_("Cannot find thread-local variables "
825 "in this thread library."));
826 break;
827 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
828 if (objfile_is_library)
829 error (_("Cannot find shared library `%s' in dynamic"
830 " linker's load module list"), objfile_name (objfile));
831 else
832 error (_("Cannot find executable file `%s' in dynamic"
833 " linker's load module list"), objfile_name (objfile));
834 break;
835 case TLS_NOT_ALLOCATED_YET_ERROR:
836 if (objfile_is_library)
837 error (_("The inferior has not yet allocated storage for"
838 " thread-local variables in\n"
839 "the shared library `%s'\n"
840 "for %s"),
841 objfile_name (objfile), target_pid_to_str (ptid));
842 else
843 error (_("The inferior has not yet allocated storage for"
844 " thread-local variables in\n"
845 "the executable `%s'\n"
846 "for %s"),
847 objfile_name (objfile), target_pid_to_str (ptid));
848 break;
849 case TLS_GENERIC_ERROR:
850 if (objfile_is_library)
851 error (_("Cannot find thread-local storage for %s, "
852 "shared library %s:\n%s"),
853 target_pid_to_str (ptid),
854 objfile_name (objfile), ex.message);
855 else
856 error (_("Cannot find thread-local storage for %s, "
857 "executable file %s:\n%s"),
858 target_pid_to_str (ptid),
859 objfile_name (objfile), ex.message);
860 break;
861 default:
862 throw_exception (ex);
863 break;
864 }
865 }
866 }
867 /* It wouldn't be wrong here to try a gdbarch method, too; finding
868 TLS is an ABI-specific thing. But we don't do that yet. */
869 else
870 error (_("Cannot find thread-local variables on this target"));
871
872 return addr;
873 }
874
875 const char *
876 target_xfer_status_to_string (enum target_xfer_status status)
877 {
878 #define CASE(X) case X: return #X
879 switch (status)
880 {
881 CASE(TARGET_XFER_E_IO);
882 CASE(TARGET_XFER_UNAVAILABLE);
883 default:
884 return "<unknown>";
885 }
886 #undef CASE
887 };
888
889
890 #undef MIN
891 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
892
893 /* target_read_string -- read a null terminated string, up to LEN bytes,
894 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
895 Set *STRING to a pointer to malloc'd memory containing the data; the caller
896 is responsible for freeing it. Return the number of bytes successfully
897 read. */
898
899 int
900 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
901 {
902 int tlen, offset, i;
903 gdb_byte buf[4];
904 int errcode = 0;
905 char *buffer;
906 int buffer_allocated;
907 char *bufptr;
908 unsigned int nbytes_read = 0;
909
910 gdb_assert (string);
911
912 /* Small for testing. */
913 buffer_allocated = 4;
914 buffer = xmalloc (buffer_allocated);
915 bufptr = buffer;
916
917 while (len > 0)
918 {
919 tlen = MIN (len, 4 - (memaddr & 3));
920 offset = memaddr & 3;
921
922 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
923 if (errcode != 0)
924 {
925 /* The transfer request might have crossed the boundary to an
926 unallocated region of memory. Retry the transfer, requesting
927 a single byte. */
928 tlen = 1;
929 offset = 0;
930 errcode = target_read_memory (memaddr, buf, 1);
931 if (errcode != 0)
932 goto done;
933 }
934
935 if (bufptr - buffer + tlen > buffer_allocated)
936 {
937 unsigned int bytes;
938
939 bytes = bufptr - buffer;
940 buffer_allocated *= 2;
941 buffer = xrealloc (buffer, buffer_allocated);
942 bufptr = buffer + bytes;
943 }
944
945 for (i = 0; i < tlen; i++)
946 {
947 *bufptr++ = buf[i + offset];
948 if (buf[i + offset] == '\000')
949 {
950 nbytes_read += i + 1;
951 goto done;
952 }
953 }
954
955 memaddr += tlen;
956 len -= tlen;
957 nbytes_read += tlen;
958 }
959 done:
960 *string = buffer;
961 if (errnop != NULL)
962 *errnop = errcode;
963 return nbytes_read;
964 }
965
966 struct target_section_table *
967 target_get_section_table (struct target_ops *target)
968 {
969 if (targetdebug)
970 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
971
972 return (*target->to_get_section_table) (target);
973 }
974
975 /* Find a section containing ADDR. */
976
977 struct target_section *
978 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
979 {
980 struct target_section_table *table = target_get_section_table (target);
981 struct target_section *secp;
982
983 if (table == NULL)
984 return NULL;
985
986 for (secp = table->sections; secp < table->sections_end; secp++)
987 {
988 if (addr >= secp->addr && addr < secp->endaddr)
989 return secp;
990 }
991 return NULL;
992 }
993
994 /* Read memory from the live target, even if currently inspecting a
995 traceframe. The return is the same as that of target_read. */
996
997 static enum target_xfer_status
998 target_read_live_memory (enum target_object object,
999 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1000 ULONGEST *xfered_len)
1001 {
1002 enum target_xfer_status ret;
1003 struct cleanup *cleanup;
1004
1005 /* Switch momentarily out of tfind mode so to access live memory.
1006 Note that this must not clear global state, such as the frame
1007 cache, which must still remain valid for the previous traceframe.
1008 We may be _building_ the frame cache at this point. */
1009 cleanup = make_cleanup_restore_traceframe_number ();
1010 set_traceframe_number (-1);
1011
1012 ret = target_xfer_partial (current_target.beneath, object, NULL,
1013 myaddr, NULL, memaddr, len, xfered_len);
1014
1015 do_cleanups (cleanup);
1016 return ret;
1017 }
1018
1019 /* Using the set of read-only target sections of OPS, read live
1020 read-only memory. Note that the actual reads start from the
1021 top-most target again.
1022
1023 For interface/parameters/return description see target.h,
1024 to_xfer_partial. */
1025
1026 static enum target_xfer_status
1027 memory_xfer_live_readonly_partial (struct target_ops *ops,
1028 enum target_object object,
1029 gdb_byte *readbuf, ULONGEST memaddr,
1030 ULONGEST len, ULONGEST *xfered_len)
1031 {
1032 struct target_section *secp;
1033 struct target_section_table *table;
1034
1035 secp = target_section_by_addr (ops, memaddr);
1036 if (secp != NULL
1037 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1038 secp->the_bfd_section)
1039 & SEC_READONLY))
1040 {
1041 struct target_section *p;
1042 ULONGEST memend = memaddr + len;
1043
1044 table = target_get_section_table (ops);
1045
1046 for (p = table->sections; p < table->sections_end; p++)
1047 {
1048 if (memaddr >= p->addr)
1049 {
1050 if (memend <= p->endaddr)
1051 {
1052 /* Entire transfer is within this section. */
1053 return target_read_live_memory (object, memaddr,
1054 readbuf, len, xfered_len);
1055 }
1056 else if (memaddr >= p->endaddr)
1057 {
1058 /* This section ends before the transfer starts. */
1059 continue;
1060 }
1061 else
1062 {
1063 /* This section overlaps the transfer. Just do half. */
1064 len = p->endaddr - memaddr;
1065 return target_read_live_memory (object, memaddr,
1066 readbuf, len, xfered_len);
1067 }
1068 }
1069 }
1070 }
1071
1072 return TARGET_XFER_EOF;
1073 }
1074
1075 /* Read memory from more than one valid target. A core file, for
1076 instance, could have some of memory but delegate other bits to
1077 the target below it. So, we must manually try all targets. */
1078
1079 static enum target_xfer_status
1080 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1081 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1082 ULONGEST *xfered_len)
1083 {
1084 enum target_xfer_status res;
1085
1086 do
1087 {
1088 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1089 readbuf, writebuf, memaddr, len,
1090 xfered_len);
1091 if (res == TARGET_XFER_OK)
1092 break;
1093
1094 /* Stop if the target reports that the memory is not available. */
1095 if (res == TARGET_XFER_UNAVAILABLE)
1096 break;
1097
1098 /* We want to continue past core files to executables, but not
1099 past a running target's memory. */
1100 if (ops->to_has_all_memory (ops))
1101 break;
1102
1103 ops = ops->beneath;
1104 }
1105 while (ops != NULL);
1106
1107 return res;
1108 }
1109
1110 /* Perform a partial memory transfer.
1111 For docs see target.h, to_xfer_partial. */
1112
1113 static enum target_xfer_status
1114 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1115 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1116 ULONGEST len, ULONGEST *xfered_len)
1117 {
1118 enum target_xfer_status res;
1119 int reg_len;
1120 struct mem_region *region;
1121 struct inferior *inf;
1122
1123 /* For accesses to unmapped overlay sections, read directly from
1124 files. Must do this first, as MEMADDR may need adjustment. */
1125 if (readbuf != NULL && overlay_debugging)
1126 {
1127 struct obj_section *section = find_pc_overlay (memaddr);
1128
1129 if (pc_in_unmapped_range (memaddr, section))
1130 {
1131 struct target_section_table *table
1132 = target_get_section_table (ops);
1133 const char *section_name = section->the_bfd_section->name;
1134
1135 memaddr = overlay_mapped_address (memaddr, section);
1136 return section_table_xfer_memory_partial (readbuf, writebuf,
1137 memaddr, len, xfered_len,
1138 table->sections,
1139 table->sections_end,
1140 section_name);
1141 }
1142 }
1143
1144 /* Try the executable files, if "trust-readonly-sections" is set. */
1145 if (readbuf != NULL && trust_readonly)
1146 {
1147 struct target_section *secp;
1148 struct target_section_table *table;
1149
1150 secp = target_section_by_addr (ops, memaddr);
1151 if (secp != NULL
1152 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1153 secp->the_bfd_section)
1154 & SEC_READONLY))
1155 {
1156 table = target_get_section_table (ops);
1157 return section_table_xfer_memory_partial (readbuf, writebuf,
1158 memaddr, len, xfered_len,
1159 table->sections,
1160 table->sections_end,
1161 NULL);
1162 }
1163 }
1164
1165 /* If reading unavailable memory in the context of traceframes, and
1166 this address falls within a read-only section, fallback to
1167 reading from live memory. */
1168 if (readbuf != NULL && get_traceframe_number () != -1)
1169 {
1170 VEC(mem_range_s) *available;
1171
1172 /* If we fail to get the set of available memory, then the
1173 target does not support querying traceframe info, and so we
1174 attempt reading from the traceframe anyway (assuming the
1175 target implements the old QTro packet then). */
1176 if (traceframe_available_memory (&available, memaddr, len))
1177 {
1178 struct cleanup *old_chain;
1179
1180 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1181
1182 if (VEC_empty (mem_range_s, available)
1183 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1184 {
1185 /* Don't read into the traceframe's available
1186 memory. */
1187 if (!VEC_empty (mem_range_s, available))
1188 {
1189 LONGEST oldlen = len;
1190
1191 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1192 gdb_assert (len <= oldlen);
1193 }
1194
1195 do_cleanups (old_chain);
1196
1197 /* This goes through the topmost target again. */
1198 res = memory_xfer_live_readonly_partial (ops, object,
1199 readbuf, memaddr,
1200 len, xfered_len);
1201 if (res == TARGET_XFER_OK)
1202 return TARGET_XFER_OK;
1203 else
1204 {
1205 /* No use trying further, we know some memory starting
1206 at MEMADDR isn't available. */
1207 *xfered_len = len;
1208 return TARGET_XFER_UNAVAILABLE;
1209 }
1210 }
1211
1212 /* Don't try to read more than how much is available, in
1213 case the target implements the deprecated QTro packet to
1214 cater for older GDBs (the target's knowledge of read-only
1215 sections may be outdated by now). */
1216 len = VEC_index (mem_range_s, available, 0)->length;
1217
1218 do_cleanups (old_chain);
1219 }
1220 }
1221
1222 /* Try GDB's internal data cache. */
1223 region = lookup_mem_region (memaddr);
1224 /* region->hi == 0 means there's no upper bound. */
1225 if (memaddr + len < region->hi || region->hi == 0)
1226 reg_len = len;
1227 else
1228 reg_len = region->hi - memaddr;
1229
1230 switch (region->attrib.mode)
1231 {
1232 case MEM_RO:
1233 if (writebuf != NULL)
1234 return TARGET_XFER_E_IO;
1235 break;
1236
1237 case MEM_WO:
1238 if (readbuf != NULL)
1239 return TARGET_XFER_E_IO;
1240 break;
1241
1242 case MEM_FLASH:
1243 /* We only support writing to flash during "load" for now. */
1244 if (writebuf != NULL)
1245 error (_("Writing to flash memory forbidden in this context"));
1246 break;
1247
1248 case MEM_NONE:
1249 return TARGET_XFER_E_IO;
1250 }
1251
1252 if (!ptid_equal (inferior_ptid, null_ptid))
1253 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1254 else
1255 inf = NULL;
1256
1257 if (inf != NULL
1258 /* The dcache reads whole cache lines; that doesn't play well
1259 with reading from a trace buffer, because reading outside of
1260 the collected memory range fails. */
1261 && get_traceframe_number () == -1
1262 && (region->attrib.cache
1263 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1264 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1265 {
1266 DCACHE *dcache = target_dcache_get_or_init ();
1267 int l;
1268
1269 if (readbuf != NULL)
1270 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1271 else
1272 /* FIXME drow/2006-08-09: If we're going to preserve const
1273 correctness dcache_xfer_memory should take readbuf and
1274 writebuf. */
1275 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1276 reg_len, 1);
1277 if (l <= 0)
1278 return TARGET_XFER_E_IO;
1279 else
1280 {
1281 *xfered_len = (ULONGEST) l;
1282 return TARGET_XFER_OK;
1283 }
1284 }
1285
1286 /* If none of those methods found the memory we wanted, fall back
1287 to a target partial transfer. Normally a single call to
1288 to_xfer_partial is enough; if it doesn't recognize an object
1289 it will call the to_xfer_partial of the next target down.
1290 But for memory this won't do. Memory is the only target
1291 object which can be read from more than one valid target.
1292 A core file, for instance, could have some of memory but
1293 delegate other bits to the target below it. So, we must
1294 manually try all targets. */
1295
1296 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1297 xfered_len);
1298
1299 /* Make sure the cache gets updated no matter what - if we are writing
1300 to the stack. Even if this write is not tagged as such, we still need
1301 to update the cache. */
1302
1303 if (res == TARGET_XFER_OK
1304 && inf != NULL
1305 && writebuf != NULL
1306 && target_dcache_init_p ()
1307 && !region->attrib.cache
1308 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1309 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1310 {
1311 DCACHE *dcache = target_dcache_get ();
1312
1313 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1314 }
1315
1316 /* If we still haven't got anything, return the last error. We
1317 give up. */
1318 return res;
1319 }
1320
1321 /* Perform a partial memory transfer. For docs see target.h,
1322 to_xfer_partial. */
1323
1324 static enum target_xfer_status
1325 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1326 gdb_byte *readbuf, const gdb_byte *writebuf,
1327 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1328 {
1329 enum target_xfer_status res;
1330
1331 /* Zero length requests are ok and require no work. */
1332 if (len == 0)
1333 return TARGET_XFER_EOF;
1334
1335 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1336 breakpoint insns, thus hiding out from higher layers whether
1337 there are software breakpoints inserted in the code stream. */
1338 if (readbuf != NULL)
1339 {
1340 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1341 xfered_len);
1342
1343 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1344 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len);
1345 }
1346 else
1347 {
1348 void *buf;
1349 struct cleanup *old_chain;
1350
1351 /* A large write request is likely to be partially satisfied
1352 by memory_xfer_partial_1. We will continually malloc
1353 and free a copy of the entire write request for breakpoint
1354 shadow handling even though we only end up writing a small
1355 subset of it. Cap writes to 4KB to mitigate this. */
1356 len = min (4096, len);
1357
1358 buf = xmalloc (len);
1359 old_chain = make_cleanup (xfree, buf);
1360 memcpy (buf, writebuf, len);
1361
1362 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1363 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1364 xfered_len);
1365
1366 do_cleanups (old_chain);
1367 }
1368
1369 return res;
1370 }
1371
1372 static void
1373 restore_show_memory_breakpoints (void *arg)
1374 {
1375 show_memory_breakpoints = (uintptr_t) arg;
1376 }
1377
1378 struct cleanup *
1379 make_show_memory_breakpoints_cleanup (int show)
1380 {
1381 int current = show_memory_breakpoints;
1382
1383 show_memory_breakpoints = show;
1384 return make_cleanup (restore_show_memory_breakpoints,
1385 (void *) (uintptr_t) current);
1386 }
1387
1388 /* For docs see target.h, to_xfer_partial. */
1389
1390 enum target_xfer_status
1391 target_xfer_partial (struct target_ops *ops,
1392 enum target_object object, const char *annex,
1393 gdb_byte *readbuf, const gdb_byte *writebuf,
1394 ULONGEST offset, ULONGEST len,
1395 ULONGEST *xfered_len)
1396 {
1397 enum target_xfer_status retval;
1398
1399 gdb_assert (ops->to_xfer_partial != NULL);
1400
1401 /* Transfer is done when LEN is zero. */
1402 if (len == 0)
1403 return TARGET_XFER_EOF;
1404
1405 if (writebuf && !may_write_memory)
1406 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1407 core_addr_to_string_nz (offset), plongest (len));
1408
1409 *xfered_len = 0;
1410
1411 /* If this is a memory transfer, let the memory-specific code
1412 have a look at it instead. Memory transfers are more
1413 complicated. */
1414 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1415 || object == TARGET_OBJECT_CODE_MEMORY)
1416 retval = memory_xfer_partial (ops, object, readbuf,
1417 writebuf, offset, len, xfered_len);
1418 else if (object == TARGET_OBJECT_RAW_MEMORY)
1419 {
1420 /* Request the normal memory object from other layers. */
1421 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1422 xfered_len);
1423 }
1424 else
1425 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1426 writebuf, offset, len, xfered_len);
1427
1428 if (targetdebug)
1429 {
1430 const unsigned char *myaddr = NULL;
1431
1432 fprintf_unfiltered (gdb_stdlog,
1433 "%s:target_xfer_partial "
1434 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1435 ops->to_shortname,
1436 (int) object,
1437 (annex ? annex : "(null)"),
1438 host_address_to_string (readbuf),
1439 host_address_to_string (writebuf),
1440 core_addr_to_string_nz (offset),
1441 pulongest (len), retval,
1442 pulongest (*xfered_len));
1443
1444 if (readbuf)
1445 myaddr = readbuf;
1446 if (writebuf)
1447 myaddr = writebuf;
1448 if (retval == TARGET_XFER_OK && myaddr != NULL)
1449 {
1450 int i;
1451
1452 fputs_unfiltered (", bytes =", gdb_stdlog);
1453 for (i = 0; i < *xfered_len; i++)
1454 {
1455 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1456 {
1457 if (targetdebug < 2 && i > 0)
1458 {
1459 fprintf_unfiltered (gdb_stdlog, " ...");
1460 break;
1461 }
1462 fprintf_unfiltered (gdb_stdlog, "\n");
1463 }
1464
1465 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1466 }
1467 }
1468
1469 fputc_unfiltered ('\n', gdb_stdlog);
1470 }
1471
1472 /* Check implementations of to_xfer_partial update *XFERED_LEN
1473 properly. Do assertion after printing debug messages, so that we
1474 can find more clues on assertion failure from debugging messages. */
1475 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_UNAVAILABLE)
1476 gdb_assert (*xfered_len > 0);
1477
1478 return retval;
1479 }
1480
1481 /* Read LEN bytes of target memory at address MEMADDR, placing the
1482 results in GDB's memory at MYADDR. Returns either 0 for success or
1483 TARGET_XFER_E_IO if any error occurs.
1484
1485 If an error occurs, no guarantee is made about the contents of the data at
1486 MYADDR. In particular, the caller should not depend upon partial reads
1487 filling the buffer with good data. There is no way for the caller to know
1488 how much good data might have been transfered anyway. Callers that can
1489 deal with partial reads should call target_read (which will retry until
1490 it makes no progress, and then return how much was transferred). */
1491
1492 int
1493 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1494 {
1495 /* Dispatch to the topmost target, not the flattened current_target.
1496 Memory accesses check target->to_has_(all_)memory, and the
1497 flattened target doesn't inherit those. */
1498 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1499 myaddr, memaddr, len) == len)
1500 return 0;
1501 else
1502 return TARGET_XFER_E_IO;
1503 }
1504
1505 /* Like target_read_memory, but specify explicitly that this is a read
1506 from the target's raw memory. That is, this read bypasses the
1507 dcache, breakpoint shadowing, etc. */
1508
1509 int
1510 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1511 {
1512 /* See comment in target_read_memory about why the request starts at
1513 current_target.beneath. */
1514 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1515 myaddr, memaddr, len) == len)
1516 return 0;
1517 else
1518 return TARGET_XFER_E_IO;
1519 }
1520
1521 /* Like target_read_memory, but specify explicitly that this is a read from
1522 the target's stack. This may trigger different cache behavior. */
1523
1524 int
1525 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1526 {
1527 /* See comment in target_read_memory about why the request starts at
1528 current_target.beneath. */
1529 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1530 myaddr, memaddr, len) == len)
1531 return 0;
1532 else
1533 return TARGET_XFER_E_IO;
1534 }
1535
1536 /* Like target_read_memory, but specify explicitly that this is a read from
1537 the target's code. This may trigger different cache behavior. */
1538
1539 int
1540 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1541 {
1542 /* See comment in target_read_memory about why the request starts at
1543 current_target.beneath. */
1544 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1545 myaddr, memaddr, len) == len)
1546 return 0;
1547 else
1548 return TARGET_XFER_E_IO;
1549 }
1550
1551 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1552 Returns either 0 for success or TARGET_XFER_E_IO if any
1553 error occurs. If an error occurs, no guarantee is made about how
1554 much data got written. Callers that can deal with partial writes
1555 should call target_write. */
1556
1557 int
1558 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1559 {
1560 /* See comment in target_read_memory about why the request starts at
1561 current_target.beneath. */
1562 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1563 myaddr, memaddr, len) == len)
1564 return 0;
1565 else
1566 return TARGET_XFER_E_IO;
1567 }
1568
1569 /* Write LEN bytes from MYADDR to target raw memory at address
1570 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1571 if any error occurs. If an error occurs, no guarantee is made
1572 about how much data got written. Callers that can deal with
1573 partial writes should call target_write. */
1574
1575 int
1576 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1577 {
1578 /* See comment in target_read_memory about why the request starts at
1579 current_target.beneath. */
1580 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1581 myaddr, memaddr, len) == len)
1582 return 0;
1583 else
1584 return TARGET_XFER_E_IO;
1585 }
1586
1587 /* Fetch the target's memory map. */
1588
1589 VEC(mem_region_s) *
1590 target_memory_map (void)
1591 {
1592 VEC(mem_region_s) *result;
1593 struct mem_region *last_one, *this_one;
1594 int ix;
1595 struct target_ops *t;
1596
1597 if (targetdebug)
1598 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1599
1600 result = current_target.to_memory_map (&current_target);
1601 if (result == NULL)
1602 return NULL;
1603
1604 qsort (VEC_address (mem_region_s, result),
1605 VEC_length (mem_region_s, result),
1606 sizeof (struct mem_region), mem_region_cmp);
1607
1608 /* Check that regions do not overlap. Simultaneously assign
1609 a numbering for the "mem" commands to use to refer to
1610 each region. */
1611 last_one = NULL;
1612 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1613 {
1614 this_one->number = ix;
1615
1616 if (last_one && last_one->hi > this_one->lo)
1617 {
1618 warning (_("Overlapping regions in memory map: ignoring"));
1619 VEC_free (mem_region_s, result);
1620 return NULL;
1621 }
1622 last_one = this_one;
1623 }
1624
1625 return result;
1626 }
1627
1628 void
1629 target_flash_erase (ULONGEST address, LONGEST length)
1630 {
1631 if (targetdebug)
1632 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1633 hex_string (address), phex (length, 0));
1634 current_target.to_flash_erase (&current_target, address, length);
1635 }
1636
1637 void
1638 target_flash_done (void)
1639 {
1640 if (targetdebug)
1641 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1642 current_target.to_flash_done (&current_target);
1643 }
1644
1645 static void
1646 show_trust_readonly (struct ui_file *file, int from_tty,
1647 struct cmd_list_element *c, const char *value)
1648 {
1649 fprintf_filtered (file,
1650 _("Mode for reading from readonly sections is %s.\n"),
1651 value);
1652 }
1653
1654 /* More generic transfers. */
1655
1656 static enum target_xfer_status
1657 default_xfer_partial (struct target_ops *ops, enum target_object object,
1658 const char *annex, gdb_byte *readbuf,
1659 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
1660 ULONGEST *xfered_len)
1661 {
1662 if (object == TARGET_OBJECT_MEMORY
1663 && ops->deprecated_xfer_memory != NULL)
1664 /* If available, fall back to the target's
1665 "deprecated_xfer_memory" method. */
1666 {
1667 int xfered = -1;
1668
1669 errno = 0;
1670 if (writebuf != NULL)
1671 {
1672 void *buffer = xmalloc (len);
1673 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1674
1675 memcpy (buffer, writebuf, len);
1676 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1677 1/*write*/, NULL, ops);
1678 do_cleanups (cleanup);
1679 }
1680 if (readbuf != NULL)
1681 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1682 0/*read*/, NULL, ops);
1683 if (xfered > 0)
1684 {
1685 *xfered_len = (ULONGEST) xfered;
1686 return TARGET_XFER_E_IO;
1687 }
1688 else if (xfered == 0 && errno == 0)
1689 /* "deprecated_xfer_memory" uses 0, cross checked against
1690 ERRNO as one indication of an error. */
1691 return TARGET_XFER_EOF;
1692 else
1693 return TARGET_XFER_E_IO;
1694 }
1695 else
1696 {
1697 gdb_assert (ops->beneath != NULL);
1698 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1699 readbuf, writebuf, offset, len,
1700 xfered_len);
1701 }
1702 }
1703
1704 /* Target vector read/write partial wrapper functions. */
1705
1706 static enum target_xfer_status
1707 target_read_partial (struct target_ops *ops,
1708 enum target_object object,
1709 const char *annex, gdb_byte *buf,
1710 ULONGEST offset, ULONGEST len,
1711 ULONGEST *xfered_len)
1712 {
1713 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1714 xfered_len);
1715 }
1716
1717 static enum target_xfer_status
1718 target_write_partial (struct target_ops *ops,
1719 enum target_object object,
1720 const char *annex, const gdb_byte *buf,
1721 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1722 {
1723 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1724 xfered_len);
1725 }
1726
1727 /* Wrappers to perform the full transfer. */
1728
1729 /* For docs on target_read see target.h. */
1730
1731 LONGEST
1732 target_read (struct target_ops *ops,
1733 enum target_object object,
1734 const char *annex, gdb_byte *buf,
1735 ULONGEST offset, LONGEST len)
1736 {
1737 LONGEST xfered = 0;
1738
1739 while (xfered < len)
1740 {
1741 ULONGEST xfered_len;
1742 enum target_xfer_status status;
1743
1744 status = target_read_partial (ops, object, annex,
1745 (gdb_byte *) buf + xfered,
1746 offset + xfered, len - xfered,
1747 &xfered_len);
1748
1749 /* Call an observer, notifying them of the xfer progress? */
1750 if (status == TARGET_XFER_EOF)
1751 return xfered;
1752 else if (status == TARGET_XFER_OK)
1753 {
1754 xfered += xfered_len;
1755 QUIT;
1756 }
1757 else
1758 return -1;
1759
1760 }
1761 return len;
1762 }
1763
1764 /* Assuming that the entire [begin, end) range of memory cannot be
1765 read, try to read whatever subrange is possible to read.
1766
1767 The function returns, in RESULT, either zero or one memory block.
1768 If there's a readable subrange at the beginning, it is completely
1769 read and returned. Any further readable subrange will not be read.
1770 Otherwise, if there's a readable subrange at the end, it will be
1771 completely read and returned. Any readable subranges before it
1772 (obviously, not starting at the beginning), will be ignored. In
1773 other cases -- either no readable subrange, or readable subrange(s)
1774 that is neither at the beginning, or end, nothing is returned.
1775
1776 The purpose of this function is to handle a read across a boundary
1777 of accessible memory in a case when memory map is not available.
1778 The above restrictions are fine for this case, but will give
1779 incorrect results if the memory is 'patchy'. However, supporting
1780 'patchy' memory would require trying to read every single byte,
1781 and it seems unacceptable solution. Explicit memory map is
1782 recommended for this case -- and target_read_memory_robust will
1783 take care of reading multiple ranges then. */
1784
1785 static void
1786 read_whatever_is_readable (struct target_ops *ops,
1787 ULONGEST begin, ULONGEST end,
1788 VEC(memory_read_result_s) **result)
1789 {
1790 gdb_byte *buf = xmalloc (end - begin);
1791 ULONGEST current_begin = begin;
1792 ULONGEST current_end = end;
1793 int forward;
1794 memory_read_result_s r;
1795 ULONGEST xfered_len;
1796
1797 /* If we previously failed to read 1 byte, nothing can be done here. */
1798 if (end - begin <= 1)
1799 {
1800 xfree (buf);
1801 return;
1802 }
1803
1804 /* Check that either first or the last byte is readable, and give up
1805 if not. This heuristic is meant to permit reading accessible memory
1806 at the boundary of accessible region. */
1807 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1808 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
1809 {
1810 forward = 1;
1811 ++current_begin;
1812 }
1813 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1814 buf + (end-begin) - 1, end - 1, 1,
1815 &xfered_len) == TARGET_XFER_OK)
1816 {
1817 forward = 0;
1818 --current_end;
1819 }
1820 else
1821 {
1822 xfree (buf);
1823 return;
1824 }
1825
1826 /* Loop invariant is that the [current_begin, current_end) was previously
1827 found to be not readable as a whole.
1828
1829 Note loop condition -- if the range has 1 byte, we can't divide the range
1830 so there's no point trying further. */
1831 while (current_end - current_begin > 1)
1832 {
1833 ULONGEST first_half_begin, first_half_end;
1834 ULONGEST second_half_begin, second_half_end;
1835 LONGEST xfer;
1836 ULONGEST middle = current_begin + (current_end - current_begin)/2;
1837
1838 if (forward)
1839 {
1840 first_half_begin = current_begin;
1841 first_half_end = middle;
1842 second_half_begin = middle;
1843 second_half_end = current_end;
1844 }
1845 else
1846 {
1847 first_half_begin = middle;
1848 first_half_end = current_end;
1849 second_half_begin = current_begin;
1850 second_half_end = middle;
1851 }
1852
1853 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1854 buf + (first_half_begin - begin),
1855 first_half_begin,
1856 first_half_end - first_half_begin);
1857
1858 if (xfer == first_half_end - first_half_begin)
1859 {
1860 /* This half reads up fine. So, the error must be in the
1861 other half. */
1862 current_begin = second_half_begin;
1863 current_end = second_half_end;
1864 }
1865 else
1866 {
1867 /* This half is not readable. Because we've tried one byte, we
1868 know some part of this half if actually redable. Go to the next
1869 iteration to divide again and try to read.
1870
1871 We don't handle the other half, because this function only tries
1872 to read a single readable subrange. */
1873 current_begin = first_half_begin;
1874 current_end = first_half_end;
1875 }
1876 }
1877
1878 if (forward)
1879 {
1880 /* The [begin, current_begin) range has been read. */
1881 r.begin = begin;
1882 r.end = current_begin;
1883 r.data = buf;
1884 }
1885 else
1886 {
1887 /* The [current_end, end) range has been read. */
1888 LONGEST rlen = end - current_end;
1889
1890 r.data = xmalloc (rlen);
1891 memcpy (r.data, buf + current_end - begin, rlen);
1892 r.begin = current_end;
1893 r.end = end;
1894 xfree (buf);
1895 }
1896 VEC_safe_push(memory_read_result_s, (*result), &r);
1897 }
1898
1899 void
1900 free_memory_read_result_vector (void *x)
1901 {
1902 VEC(memory_read_result_s) *v = x;
1903 memory_read_result_s *current;
1904 int ix;
1905
1906 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
1907 {
1908 xfree (current->data);
1909 }
1910 VEC_free (memory_read_result_s, v);
1911 }
1912
1913 VEC(memory_read_result_s) *
1914 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
1915 {
1916 VEC(memory_read_result_s) *result = 0;
1917
1918 LONGEST xfered = 0;
1919 while (xfered < len)
1920 {
1921 struct mem_region *region = lookup_mem_region (offset + xfered);
1922 LONGEST rlen;
1923
1924 /* If there is no explicit region, a fake one should be created. */
1925 gdb_assert (region);
1926
1927 if (region->hi == 0)
1928 rlen = len - xfered;
1929 else
1930 rlen = region->hi - offset;
1931
1932 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
1933 {
1934 /* Cannot read this region. Note that we can end up here only
1935 if the region is explicitly marked inaccessible, or
1936 'inaccessible-by-default' is in effect. */
1937 xfered += rlen;
1938 }
1939 else
1940 {
1941 LONGEST to_read = min (len - xfered, rlen);
1942 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
1943
1944 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1945 (gdb_byte *) buffer,
1946 offset + xfered, to_read);
1947 /* Call an observer, notifying them of the xfer progress? */
1948 if (xfer <= 0)
1949 {
1950 /* Got an error reading full chunk. See if maybe we can read
1951 some subrange. */
1952 xfree (buffer);
1953 read_whatever_is_readable (ops, offset + xfered,
1954 offset + xfered + to_read, &result);
1955 xfered += to_read;
1956 }
1957 else
1958 {
1959 struct memory_read_result r;
1960 r.data = buffer;
1961 r.begin = offset + xfered;
1962 r.end = r.begin + xfer;
1963 VEC_safe_push (memory_read_result_s, result, &r);
1964 xfered += xfer;
1965 }
1966 QUIT;
1967 }
1968 }
1969 return result;
1970 }
1971
1972
1973 /* An alternative to target_write with progress callbacks. */
1974
1975 LONGEST
1976 target_write_with_progress (struct target_ops *ops,
1977 enum target_object object,
1978 const char *annex, const gdb_byte *buf,
1979 ULONGEST offset, LONGEST len,
1980 void (*progress) (ULONGEST, void *), void *baton)
1981 {
1982 LONGEST xfered = 0;
1983
1984 /* Give the progress callback a chance to set up. */
1985 if (progress)
1986 (*progress) (0, baton);
1987
1988 while (xfered < len)
1989 {
1990 ULONGEST xfered_len;
1991 enum target_xfer_status status;
1992
1993 status = target_write_partial (ops, object, annex,
1994 (gdb_byte *) buf + xfered,
1995 offset + xfered, len - xfered,
1996 &xfered_len);
1997
1998 if (status != TARGET_XFER_OK)
1999 return status == TARGET_XFER_EOF ? xfered : -1;
2000
2001 if (progress)
2002 (*progress) (xfered_len, baton);
2003
2004 xfered += xfered_len;
2005 QUIT;
2006 }
2007 return len;
2008 }
2009
2010 /* For docs on target_write see target.h. */
2011
2012 LONGEST
2013 target_write (struct target_ops *ops,
2014 enum target_object object,
2015 const char *annex, const gdb_byte *buf,
2016 ULONGEST offset, LONGEST len)
2017 {
2018 return target_write_with_progress (ops, object, annex, buf, offset, len,
2019 NULL, NULL);
2020 }
2021
2022 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2023 the size of the transferred data. PADDING additional bytes are
2024 available in *BUF_P. This is a helper function for
2025 target_read_alloc; see the declaration of that function for more
2026 information. */
2027
2028 static LONGEST
2029 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2030 const char *annex, gdb_byte **buf_p, int padding)
2031 {
2032 size_t buf_alloc, buf_pos;
2033 gdb_byte *buf;
2034
2035 /* This function does not have a length parameter; it reads the
2036 entire OBJECT). Also, it doesn't support objects fetched partly
2037 from one target and partly from another (in a different stratum,
2038 e.g. a core file and an executable). Both reasons make it
2039 unsuitable for reading memory. */
2040 gdb_assert (object != TARGET_OBJECT_MEMORY);
2041
2042 /* Start by reading up to 4K at a time. The target will throttle
2043 this number down if necessary. */
2044 buf_alloc = 4096;
2045 buf = xmalloc (buf_alloc);
2046 buf_pos = 0;
2047 while (1)
2048 {
2049 ULONGEST xfered_len;
2050 enum target_xfer_status status;
2051
2052 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2053 buf_pos, buf_alloc - buf_pos - padding,
2054 &xfered_len);
2055
2056 if (status == TARGET_XFER_EOF)
2057 {
2058 /* Read all there was. */
2059 if (buf_pos == 0)
2060 xfree (buf);
2061 else
2062 *buf_p = buf;
2063 return buf_pos;
2064 }
2065 else if (status != TARGET_XFER_OK)
2066 {
2067 /* An error occurred. */
2068 xfree (buf);
2069 return TARGET_XFER_E_IO;
2070 }
2071
2072 buf_pos += xfered_len;
2073
2074 /* If the buffer is filling up, expand it. */
2075 if (buf_alloc < buf_pos * 2)
2076 {
2077 buf_alloc *= 2;
2078 buf = xrealloc (buf, buf_alloc);
2079 }
2080
2081 QUIT;
2082 }
2083 }
2084
2085 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2086 the size of the transferred data. See the declaration in "target.h"
2087 function for more information about the return value. */
2088
2089 LONGEST
2090 target_read_alloc (struct target_ops *ops, enum target_object object,
2091 const char *annex, gdb_byte **buf_p)
2092 {
2093 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2094 }
2095
2096 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2097 returned as a string, allocated using xmalloc. If an error occurs
2098 or the transfer is unsupported, NULL is returned. Empty objects
2099 are returned as allocated but empty strings. A warning is issued
2100 if the result contains any embedded NUL bytes. */
2101
2102 char *
2103 target_read_stralloc (struct target_ops *ops, enum target_object object,
2104 const char *annex)
2105 {
2106 gdb_byte *buffer;
2107 char *bufstr;
2108 LONGEST i, transferred;
2109
2110 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2111 bufstr = (char *) buffer;
2112
2113 if (transferred < 0)
2114 return NULL;
2115
2116 if (transferred == 0)
2117 return xstrdup ("");
2118
2119 bufstr[transferred] = 0;
2120
2121 /* Check for embedded NUL bytes; but allow trailing NULs. */
2122 for (i = strlen (bufstr); i < transferred; i++)
2123 if (bufstr[i] != 0)
2124 {
2125 warning (_("target object %d, annex %s, "
2126 "contained unexpected null characters"),
2127 (int) object, annex ? annex : "(none)");
2128 break;
2129 }
2130
2131 return bufstr;
2132 }
2133
2134 /* Memory transfer methods. */
2135
2136 void
2137 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2138 LONGEST len)
2139 {
2140 /* This method is used to read from an alternate, non-current
2141 target. This read must bypass the overlay support (as symbols
2142 don't match this target), and GDB's internal cache (wrong cache
2143 for this target). */
2144 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2145 != len)
2146 memory_error (TARGET_XFER_E_IO, addr);
2147 }
2148
2149 ULONGEST
2150 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2151 int len, enum bfd_endian byte_order)
2152 {
2153 gdb_byte buf[sizeof (ULONGEST)];
2154
2155 gdb_assert (len <= sizeof (buf));
2156 get_target_memory (ops, addr, buf, len);
2157 return extract_unsigned_integer (buf, len, byte_order);
2158 }
2159
2160 /* See target.h. */
2161
2162 int
2163 target_insert_breakpoint (struct gdbarch *gdbarch,
2164 struct bp_target_info *bp_tgt)
2165 {
2166 if (!may_insert_breakpoints)
2167 {
2168 warning (_("May not insert breakpoints"));
2169 return 1;
2170 }
2171
2172 return current_target.to_insert_breakpoint (&current_target,
2173 gdbarch, bp_tgt);
2174 }
2175
2176 /* See target.h. */
2177
2178 int
2179 target_remove_breakpoint (struct gdbarch *gdbarch,
2180 struct bp_target_info *bp_tgt)
2181 {
2182 /* This is kind of a weird case to handle, but the permission might
2183 have been changed after breakpoints were inserted - in which case
2184 we should just take the user literally and assume that any
2185 breakpoints should be left in place. */
2186 if (!may_insert_breakpoints)
2187 {
2188 warning (_("May not remove breakpoints"));
2189 return 1;
2190 }
2191
2192 return current_target.to_remove_breakpoint (&current_target,
2193 gdbarch, bp_tgt);
2194 }
2195
2196 static void
2197 target_info (char *args, int from_tty)
2198 {
2199 struct target_ops *t;
2200 int has_all_mem = 0;
2201
2202 if (symfile_objfile != NULL)
2203 printf_unfiltered (_("Symbols from \"%s\".\n"),
2204 objfile_name (symfile_objfile));
2205
2206 for (t = target_stack; t != NULL; t = t->beneath)
2207 {
2208 if (!(*t->to_has_memory) (t))
2209 continue;
2210
2211 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2212 continue;
2213 if (has_all_mem)
2214 printf_unfiltered (_("\tWhile running this, "
2215 "GDB does not access memory from...\n"));
2216 printf_unfiltered ("%s:\n", t->to_longname);
2217 (t->to_files_info) (t);
2218 has_all_mem = (*t->to_has_all_memory) (t);
2219 }
2220 }
2221
2222 /* This function is called before any new inferior is created, e.g.
2223 by running a program, attaching, or connecting to a target.
2224 It cleans up any state from previous invocations which might
2225 change between runs. This is a subset of what target_preopen
2226 resets (things which might change between targets). */
2227
2228 void
2229 target_pre_inferior (int from_tty)
2230 {
2231 /* Clear out solib state. Otherwise the solib state of the previous
2232 inferior might have survived and is entirely wrong for the new
2233 target. This has been observed on GNU/Linux using glibc 2.3. How
2234 to reproduce:
2235
2236 bash$ ./foo&
2237 [1] 4711
2238 bash$ ./foo&
2239 [1] 4712
2240 bash$ gdb ./foo
2241 [...]
2242 (gdb) attach 4711
2243 (gdb) detach
2244 (gdb) attach 4712
2245 Cannot access memory at address 0xdeadbeef
2246 */
2247
2248 /* In some OSs, the shared library list is the same/global/shared
2249 across inferiors. If code is shared between processes, so are
2250 memory regions and features. */
2251 if (!gdbarch_has_global_solist (target_gdbarch ()))
2252 {
2253 no_shared_libraries (NULL, from_tty);
2254
2255 invalidate_target_mem_regions ();
2256
2257 target_clear_description ();
2258 }
2259
2260 agent_capability_invalidate ();
2261 }
2262
2263 /* Callback for iterate_over_inferiors. Gets rid of the given
2264 inferior. */
2265
2266 static int
2267 dispose_inferior (struct inferior *inf, void *args)
2268 {
2269 struct thread_info *thread;
2270
2271 thread = any_thread_of_process (inf->pid);
2272 if (thread)
2273 {
2274 switch_to_thread (thread->ptid);
2275
2276 /* Core inferiors actually should be detached, not killed. */
2277 if (target_has_execution)
2278 target_kill ();
2279 else
2280 target_detach (NULL, 0);
2281 }
2282
2283 return 0;
2284 }
2285
2286 /* This is to be called by the open routine before it does
2287 anything. */
2288
2289 void
2290 target_preopen (int from_tty)
2291 {
2292 dont_repeat ();
2293
2294 if (have_inferiors ())
2295 {
2296 if (!from_tty
2297 || !have_live_inferiors ()
2298 || query (_("A program is being debugged already. Kill it? ")))
2299 iterate_over_inferiors (dispose_inferior, NULL);
2300 else
2301 error (_("Program not killed."));
2302 }
2303
2304 /* Calling target_kill may remove the target from the stack. But if
2305 it doesn't (which seems like a win for UDI), remove it now. */
2306 /* Leave the exec target, though. The user may be switching from a
2307 live process to a core of the same program. */
2308 pop_all_targets_above (file_stratum);
2309
2310 target_pre_inferior (from_tty);
2311 }
2312
2313 /* Detach a target after doing deferred register stores. */
2314
2315 void
2316 target_detach (const char *args, int from_tty)
2317 {
2318 struct target_ops* t;
2319
2320 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2321 /* Don't remove global breakpoints here. They're removed on
2322 disconnection from the target. */
2323 ;
2324 else
2325 /* If we're in breakpoints-always-inserted mode, have to remove
2326 them before detaching. */
2327 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2328
2329 prepare_for_detach ();
2330
2331 current_target.to_detach (&current_target, args, from_tty);
2332 if (targetdebug)
2333 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2334 args, from_tty);
2335 }
2336
2337 void
2338 target_disconnect (char *args, int from_tty)
2339 {
2340 /* If we're in breakpoints-always-inserted mode or if breakpoints
2341 are global across processes, we have to remove them before
2342 disconnecting. */
2343 remove_breakpoints ();
2344
2345 if (targetdebug)
2346 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2347 args, from_tty);
2348 current_target.to_disconnect (&current_target, args, from_tty);
2349 }
2350
2351 ptid_t
2352 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2353 {
2354 struct target_ops *t;
2355 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2356 status, options);
2357
2358 if (targetdebug)
2359 {
2360 char *status_string;
2361 char *options_string;
2362
2363 status_string = target_waitstatus_to_string (status);
2364 options_string = target_options_to_string (options);
2365 fprintf_unfiltered (gdb_stdlog,
2366 "target_wait (%d, status, options={%s})"
2367 " = %d, %s\n",
2368 ptid_get_pid (ptid), options_string,
2369 ptid_get_pid (retval), status_string);
2370 xfree (status_string);
2371 xfree (options_string);
2372 }
2373
2374 return retval;
2375 }
2376
2377 char *
2378 target_pid_to_str (ptid_t ptid)
2379 {
2380 return (*current_target.to_pid_to_str) (&current_target, ptid);
2381 }
2382
2383 char *
2384 target_thread_name (struct thread_info *info)
2385 {
2386 return current_target.to_thread_name (&current_target, info);
2387 }
2388
2389 void
2390 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2391 {
2392 struct target_ops *t;
2393
2394 target_dcache_invalidate ();
2395
2396 current_target.to_resume (&current_target, ptid, step, signal);
2397 if (targetdebug)
2398 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2399 ptid_get_pid (ptid),
2400 step ? "step" : "continue",
2401 gdb_signal_to_name (signal));
2402
2403 registers_changed_ptid (ptid);
2404 set_executing (ptid, 1);
2405 set_running (ptid, 1);
2406 clear_inline_frame_state (ptid);
2407 }
2408
2409 void
2410 target_pass_signals (int numsigs, unsigned char *pass_signals)
2411 {
2412 if (targetdebug)
2413 {
2414 int i;
2415
2416 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2417 numsigs);
2418
2419 for (i = 0; i < numsigs; i++)
2420 if (pass_signals[i])
2421 fprintf_unfiltered (gdb_stdlog, " %s",
2422 gdb_signal_to_name (i));
2423
2424 fprintf_unfiltered (gdb_stdlog, " })\n");
2425 }
2426
2427 (*current_target.to_pass_signals) (&current_target, numsigs, pass_signals);
2428 }
2429
2430 void
2431 target_program_signals (int numsigs, unsigned char *program_signals)
2432 {
2433 if (targetdebug)
2434 {
2435 int i;
2436
2437 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2438 numsigs);
2439
2440 for (i = 0; i < numsigs; i++)
2441 if (program_signals[i])
2442 fprintf_unfiltered (gdb_stdlog, " %s",
2443 gdb_signal_to_name (i));
2444
2445 fprintf_unfiltered (gdb_stdlog, " })\n");
2446 }
2447
2448 (*current_target.to_program_signals) (&current_target,
2449 numsigs, program_signals);
2450 }
2451
2452 static int
2453 default_follow_fork (struct target_ops *self, int follow_child,
2454 int detach_fork)
2455 {
2456 /* Some target returned a fork event, but did not know how to follow it. */
2457 internal_error (__FILE__, __LINE__,
2458 _("could not find a target to follow fork"));
2459 }
2460
2461 /* Look through the list of possible targets for a target that can
2462 follow forks. */
2463
2464 int
2465 target_follow_fork (int follow_child, int detach_fork)
2466 {
2467 int retval = current_target.to_follow_fork (&current_target,
2468 follow_child, detach_fork);
2469
2470 if (targetdebug)
2471 fprintf_unfiltered (gdb_stdlog,
2472 "target_follow_fork (%d, %d) = %d\n",
2473 follow_child, detach_fork, retval);
2474 return retval;
2475 }
2476
2477 static void
2478 default_mourn_inferior (struct target_ops *self)
2479 {
2480 internal_error (__FILE__, __LINE__,
2481 _("could not find a target to follow mourn inferior"));
2482 }
2483
2484 void
2485 target_mourn_inferior (void)
2486 {
2487 current_target.to_mourn_inferior (&current_target);
2488 if (targetdebug)
2489 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2490
2491 /* We no longer need to keep handles on any of the object files.
2492 Make sure to release them to avoid unnecessarily locking any
2493 of them while we're not actually debugging. */
2494 bfd_cache_close_all ();
2495 }
2496
2497 /* Look for a target which can describe architectural features, starting
2498 from TARGET. If we find one, return its description. */
2499
2500 const struct target_desc *
2501 target_read_description (struct target_ops *target)
2502 {
2503 return target->to_read_description (target);
2504 }
2505
2506 /* This implements a basic search of memory, reading target memory and
2507 performing the search here (as opposed to performing the search in on the
2508 target side with, for example, gdbserver). */
2509
2510 int
2511 simple_search_memory (struct target_ops *ops,
2512 CORE_ADDR start_addr, ULONGEST search_space_len,
2513 const gdb_byte *pattern, ULONGEST pattern_len,
2514 CORE_ADDR *found_addrp)
2515 {
2516 /* NOTE: also defined in find.c testcase. */
2517 #define SEARCH_CHUNK_SIZE 16000
2518 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2519 /* Buffer to hold memory contents for searching. */
2520 gdb_byte *search_buf;
2521 unsigned search_buf_size;
2522 struct cleanup *old_cleanups;
2523
2524 search_buf_size = chunk_size + pattern_len - 1;
2525
2526 /* No point in trying to allocate a buffer larger than the search space. */
2527 if (search_space_len < search_buf_size)
2528 search_buf_size = search_space_len;
2529
2530 search_buf = malloc (search_buf_size);
2531 if (search_buf == NULL)
2532 error (_("Unable to allocate memory to perform the search."));
2533 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2534
2535 /* Prime the search buffer. */
2536
2537 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2538 search_buf, start_addr, search_buf_size) != search_buf_size)
2539 {
2540 warning (_("Unable to access %s bytes of target "
2541 "memory at %s, halting search."),
2542 pulongest (search_buf_size), hex_string (start_addr));
2543 do_cleanups (old_cleanups);
2544 return -1;
2545 }
2546
2547 /* Perform the search.
2548
2549 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2550 When we've scanned N bytes we copy the trailing bytes to the start and
2551 read in another N bytes. */
2552
2553 while (search_space_len >= pattern_len)
2554 {
2555 gdb_byte *found_ptr;
2556 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2557
2558 found_ptr = memmem (search_buf, nr_search_bytes,
2559 pattern, pattern_len);
2560
2561 if (found_ptr != NULL)
2562 {
2563 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2564
2565 *found_addrp = found_addr;
2566 do_cleanups (old_cleanups);
2567 return 1;
2568 }
2569
2570 /* Not found in this chunk, skip to next chunk. */
2571
2572 /* Don't let search_space_len wrap here, it's unsigned. */
2573 if (search_space_len >= chunk_size)
2574 search_space_len -= chunk_size;
2575 else
2576 search_space_len = 0;
2577
2578 if (search_space_len >= pattern_len)
2579 {
2580 unsigned keep_len = search_buf_size - chunk_size;
2581 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2582 int nr_to_read;
2583
2584 /* Copy the trailing part of the previous iteration to the front
2585 of the buffer for the next iteration. */
2586 gdb_assert (keep_len == pattern_len - 1);
2587 memcpy (search_buf, search_buf + chunk_size, keep_len);
2588
2589 nr_to_read = min (search_space_len - keep_len, chunk_size);
2590
2591 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2592 search_buf + keep_len, read_addr,
2593 nr_to_read) != nr_to_read)
2594 {
2595 warning (_("Unable to access %s bytes of target "
2596 "memory at %s, halting search."),
2597 plongest (nr_to_read),
2598 hex_string (read_addr));
2599 do_cleanups (old_cleanups);
2600 return -1;
2601 }
2602
2603 start_addr += chunk_size;
2604 }
2605 }
2606
2607 /* Not found. */
2608
2609 do_cleanups (old_cleanups);
2610 return 0;
2611 }
2612
2613 /* Default implementation of memory-searching. */
2614
2615 static int
2616 default_search_memory (struct target_ops *self,
2617 CORE_ADDR start_addr, ULONGEST search_space_len,
2618 const gdb_byte *pattern, ULONGEST pattern_len,
2619 CORE_ADDR *found_addrp)
2620 {
2621 /* Start over from the top of the target stack. */
2622 return simple_search_memory (current_target.beneath,
2623 start_addr, search_space_len,
2624 pattern, pattern_len, found_addrp);
2625 }
2626
2627 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2628 sequence of bytes in PATTERN with length PATTERN_LEN.
2629
2630 The result is 1 if found, 0 if not found, and -1 if there was an error
2631 requiring halting of the search (e.g. memory read error).
2632 If the pattern is found the address is recorded in FOUND_ADDRP. */
2633
2634 int
2635 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2636 const gdb_byte *pattern, ULONGEST pattern_len,
2637 CORE_ADDR *found_addrp)
2638 {
2639 int found;
2640
2641 if (targetdebug)
2642 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2643 hex_string (start_addr));
2644
2645 found = current_target.to_search_memory (&current_target, start_addr,
2646 search_space_len,
2647 pattern, pattern_len, found_addrp);
2648
2649 if (targetdebug)
2650 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2651
2652 return found;
2653 }
2654
2655 /* Look through the currently pushed targets. If none of them will
2656 be able to restart the currently running process, issue an error
2657 message. */
2658
2659 void
2660 target_require_runnable (void)
2661 {
2662 struct target_ops *t;
2663
2664 for (t = target_stack; t != NULL; t = t->beneath)
2665 {
2666 /* If this target knows how to create a new program, then
2667 assume we will still be able to after killing the current
2668 one. Either killing and mourning will not pop T, or else
2669 find_default_run_target will find it again. */
2670 if (t->to_create_inferior != NULL)
2671 return;
2672
2673 /* Do not worry about thread_stratum targets that can not
2674 create inferiors. Assume they will be pushed again if
2675 necessary, and continue to the process_stratum. */
2676 if (t->to_stratum == thread_stratum
2677 || t->to_stratum == arch_stratum)
2678 continue;
2679
2680 error (_("The \"%s\" target does not support \"run\". "
2681 "Try \"help target\" or \"continue\"."),
2682 t->to_shortname);
2683 }
2684
2685 /* This function is only called if the target is running. In that
2686 case there should have been a process_stratum target and it
2687 should either know how to create inferiors, or not... */
2688 internal_error (__FILE__, __LINE__, _("No targets found"));
2689 }
2690
2691 /* Look through the list of possible targets for a target that can
2692 execute a run or attach command without any other data. This is
2693 used to locate the default process stratum.
2694
2695 If DO_MESG is not NULL, the result is always valid (error() is
2696 called for errors); else, return NULL on error. */
2697
2698 static struct target_ops *
2699 find_default_run_target (char *do_mesg)
2700 {
2701 struct target_ops **t;
2702 struct target_ops *runable = NULL;
2703 int count;
2704
2705 count = 0;
2706
2707 for (t = target_structs; t < target_structs + target_struct_size;
2708 ++t)
2709 {
2710 if ((*t)->to_can_run != delegate_can_run && target_can_run (*t))
2711 {
2712 runable = *t;
2713 ++count;
2714 }
2715 }
2716
2717 if (count != 1)
2718 {
2719 if (do_mesg)
2720 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2721 else
2722 return NULL;
2723 }
2724
2725 return runable;
2726 }
2727
2728 void
2729 find_default_attach (struct target_ops *ops, char *args, int from_tty)
2730 {
2731 struct target_ops *t;
2732
2733 t = find_default_run_target ("attach");
2734 (t->to_attach) (t, args, from_tty);
2735 return;
2736 }
2737
2738 void
2739 find_default_create_inferior (struct target_ops *ops,
2740 char *exec_file, char *allargs, char **env,
2741 int from_tty)
2742 {
2743 struct target_ops *t;
2744
2745 t = find_default_run_target ("run");
2746 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
2747 return;
2748 }
2749
2750 static int
2751 find_default_can_async_p (struct target_ops *ignore)
2752 {
2753 struct target_ops *t;
2754
2755 /* This may be called before the target is pushed on the stack;
2756 look for the default process stratum. If there's none, gdb isn't
2757 configured with a native debugger, and target remote isn't
2758 connected yet. */
2759 t = find_default_run_target (NULL);
2760 if (t && t->to_can_async_p != delegate_can_async_p)
2761 return (t->to_can_async_p) (t);
2762 return 0;
2763 }
2764
2765 static int
2766 find_default_is_async_p (struct target_ops *ignore)
2767 {
2768 struct target_ops *t;
2769
2770 /* This may be called before the target is pushed on the stack;
2771 look for the default process stratum. If there's none, gdb isn't
2772 configured with a native debugger, and target remote isn't
2773 connected yet. */
2774 t = find_default_run_target (NULL);
2775 if (t && t->to_is_async_p != delegate_is_async_p)
2776 return (t->to_is_async_p) (t);
2777 return 0;
2778 }
2779
2780 static int
2781 find_default_supports_non_stop (struct target_ops *self)
2782 {
2783 struct target_ops *t;
2784
2785 t = find_default_run_target (NULL);
2786 if (t && t->to_supports_non_stop)
2787 return (t->to_supports_non_stop) (t);
2788 return 0;
2789 }
2790
2791 int
2792 target_supports_non_stop (void)
2793 {
2794 struct target_ops *t;
2795
2796 for (t = &current_target; t != NULL; t = t->beneath)
2797 if (t->to_supports_non_stop)
2798 return t->to_supports_non_stop (t);
2799
2800 return 0;
2801 }
2802
2803 /* Implement the "info proc" command. */
2804
2805 int
2806 target_info_proc (char *args, enum info_proc_what what)
2807 {
2808 struct target_ops *t;
2809
2810 /* If we're already connected to something that can get us OS
2811 related data, use it. Otherwise, try using the native
2812 target. */
2813 if (current_target.to_stratum >= process_stratum)
2814 t = current_target.beneath;
2815 else
2816 t = find_default_run_target (NULL);
2817
2818 for (; t != NULL; t = t->beneath)
2819 {
2820 if (t->to_info_proc != NULL)
2821 {
2822 t->to_info_proc (t, args, what);
2823
2824 if (targetdebug)
2825 fprintf_unfiltered (gdb_stdlog,
2826 "target_info_proc (\"%s\", %d)\n", args, what);
2827
2828 return 1;
2829 }
2830 }
2831
2832 return 0;
2833 }
2834
2835 static int
2836 find_default_supports_disable_randomization (struct target_ops *self)
2837 {
2838 struct target_ops *t;
2839
2840 t = find_default_run_target (NULL);
2841 if (t && t->to_supports_disable_randomization)
2842 return (t->to_supports_disable_randomization) (t);
2843 return 0;
2844 }
2845
2846 int
2847 target_supports_disable_randomization (void)
2848 {
2849 struct target_ops *t;
2850
2851 for (t = &current_target; t != NULL; t = t->beneath)
2852 if (t->to_supports_disable_randomization)
2853 return t->to_supports_disable_randomization (t);
2854
2855 return 0;
2856 }
2857
2858 char *
2859 target_get_osdata (const char *type)
2860 {
2861 struct target_ops *t;
2862
2863 /* If we're already connected to something that can get us OS
2864 related data, use it. Otherwise, try using the native
2865 target. */
2866 if (current_target.to_stratum >= process_stratum)
2867 t = current_target.beneath;
2868 else
2869 t = find_default_run_target ("get OS data");
2870
2871 if (!t)
2872 return NULL;
2873
2874 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
2875 }
2876
2877 /* Determine the current address space of thread PTID. */
2878
2879 struct address_space *
2880 target_thread_address_space (ptid_t ptid)
2881 {
2882 struct address_space *aspace;
2883 struct inferior *inf;
2884 struct target_ops *t;
2885
2886 for (t = current_target.beneath; t != NULL; t = t->beneath)
2887 {
2888 if (t->to_thread_address_space != NULL)
2889 {
2890 aspace = t->to_thread_address_space (t, ptid);
2891 gdb_assert (aspace);
2892
2893 if (targetdebug)
2894 fprintf_unfiltered (gdb_stdlog,
2895 "target_thread_address_space (%s) = %d\n",
2896 target_pid_to_str (ptid),
2897 address_space_num (aspace));
2898 return aspace;
2899 }
2900 }
2901
2902 /* Fall-back to the "main" address space of the inferior. */
2903 inf = find_inferior_pid (ptid_get_pid (ptid));
2904
2905 if (inf == NULL || inf->aspace == NULL)
2906 internal_error (__FILE__, __LINE__,
2907 _("Can't determine the current "
2908 "address space of thread %s\n"),
2909 target_pid_to_str (ptid));
2910
2911 return inf->aspace;
2912 }
2913
2914
2915 /* Target file operations. */
2916
2917 static struct target_ops *
2918 default_fileio_target (void)
2919 {
2920 /* If we're already connected to something that can perform
2921 file I/O, use it. Otherwise, try using the native target. */
2922 if (current_target.to_stratum >= process_stratum)
2923 return current_target.beneath;
2924 else
2925 return find_default_run_target ("file I/O");
2926 }
2927
2928 /* Open FILENAME on the target, using FLAGS and MODE. Return a
2929 target file descriptor, or -1 if an error occurs (and set
2930 *TARGET_ERRNO). */
2931 int
2932 target_fileio_open (const char *filename, int flags, int mode,
2933 int *target_errno)
2934 {
2935 struct target_ops *t;
2936
2937 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2938 {
2939 if (t->to_fileio_open != NULL)
2940 {
2941 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
2942
2943 if (targetdebug)
2944 fprintf_unfiltered (gdb_stdlog,
2945 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
2946 filename, flags, mode,
2947 fd, fd != -1 ? 0 : *target_errno);
2948 return fd;
2949 }
2950 }
2951
2952 *target_errno = FILEIO_ENOSYS;
2953 return -1;
2954 }
2955
2956 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
2957 Return the number of bytes written, or -1 if an error occurs
2958 (and set *TARGET_ERRNO). */
2959 int
2960 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
2961 ULONGEST offset, int *target_errno)
2962 {
2963 struct target_ops *t;
2964
2965 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2966 {
2967 if (t->to_fileio_pwrite != NULL)
2968 {
2969 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
2970 target_errno);
2971
2972 if (targetdebug)
2973 fprintf_unfiltered (gdb_stdlog,
2974 "target_fileio_pwrite (%d,...,%d,%s) "
2975 "= %d (%d)\n",
2976 fd, len, pulongest (offset),
2977 ret, ret != -1 ? 0 : *target_errno);
2978 return ret;
2979 }
2980 }
2981
2982 *target_errno = FILEIO_ENOSYS;
2983 return -1;
2984 }
2985
2986 /* Read up to LEN bytes FD on the target into READ_BUF.
2987 Return the number of bytes read, or -1 if an error occurs
2988 (and set *TARGET_ERRNO). */
2989 int
2990 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
2991 ULONGEST offset, int *target_errno)
2992 {
2993 struct target_ops *t;
2994
2995 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2996 {
2997 if (t->to_fileio_pread != NULL)
2998 {
2999 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3000 target_errno);
3001
3002 if (targetdebug)
3003 fprintf_unfiltered (gdb_stdlog,
3004 "target_fileio_pread (%d,...,%d,%s) "
3005 "= %d (%d)\n",
3006 fd, len, pulongest (offset),
3007 ret, ret != -1 ? 0 : *target_errno);
3008 return ret;
3009 }
3010 }
3011
3012 *target_errno = FILEIO_ENOSYS;
3013 return -1;
3014 }
3015
3016 /* Close FD on the target. Return 0, or -1 if an error occurs
3017 (and set *TARGET_ERRNO). */
3018 int
3019 target_fileio_close (int fd, int *target_errno)
3020 {
3021 struct target_ops *t;
3022
3023 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3024 {
3025 if (t->to_fileio_close != NULL)
3026 {
3027 int ret = t->to_fileio_close (t, fd, target_errno);
3028
3029 if (targetdebug)
3030 fprintf_unfiltered (gdb_stdlog,
3031 "target_fileio_close (%d) = %d (%d)\n",
3032 fd, ret, ret != -1 ? 0 : *target_errno);
3033 return ret;
3034 }
3035 }
3036
3037 *target_errno = FILEIO_ENOSYS;
3038 return -1;
3039 }
3040
3041 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3042 occurs (and set *TARGET_ERRNO). */
3043 int
3044 target_fileio_unlink (const char *filename, int *target_errno)
3045 {
3046 struct target_ops *t;
3047
3048 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3049 {
3050 if (t->to_fileio_unlink != NULL)
3051 {
3052 int ret = t->to_fileio_unlink (t, filename, target_errno);
3053
3054 if (targetdebug)
3055 fprintf_unfiltered (gdb_stdlog,
3056 "target_fileio_unlink (%s) = %d (%d)\n",
3057 filename, ret, ret != -1 ? 0 : *target_errno);
3058 return ret;
3059 }
3060 }
3061
3062 *target_errno = FILEIO_ENOSYS;
3063 return -1;
3064 }
3065
3066 /* Read value of symbolic link FILENAME on the target. Return a
3067 null-terminated string allocated via xmalloc, or NULL if an error
3068 occurs (and set *TARGET_ERRNO). */
3069 char *
3070 target_fileio_readlink (const char *filename, int *target_errno)
3071 {
3072 struct target_ops *t;
3073
3074 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3075 {
3076 if (t->to_fileio_readlink != NULL)
3077 {
3078 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3079
3080 if (targetdebug)
3081 fprintf_unfiltered (gdb_stdlog,
3082 "target_fileio_readlink (%s) = %s (%d)\n",
3083 filename, ret? ret : "(nil)",
3084 ret? 0 : *target_errno);
3085 return ret;
3086 }
3087 }
3088
3089 *target_errno = FILEIO_ENOSYS;
3090 return NULL;
3091 }
3092
3093 static void
3094 target_fileio_close_cleanup (void *opaque)
3095 {
3096 int fd = *(int *) opaque;
3097 int target_errno;
3098
3099 target_fileio_close (fd, &target_errno);
3100 }
3101
3102 /* Read target file FILENAME. Store the result in *BUF_P and
3103 return the size of the transferred data. PADDING additional bytes are
3104 available in *BUF_P. This is a helper function for
3105 target_fileio_read_alloc; see the declaration of that function for more
3106 information. */
3107
3108 static LONGEST
3109 target_fileio_read_alloc_1 (const char *filename,
3110 gdb_byte **buf_p, int padding)
3111 {
3112 struct cleanup *close_cleanup;
3113 size_t buf_alloc, buf_pos;
3114 gdb_byte *buf;
3115 LONGEST n;
3116 int fd;
3117 int target_errno;
3118
3119 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3120 if (fd == -1)
3121 return -1;
3122
3123 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3124
3125 /* Start by reading up to 4K at a time. The target will throttle
3126 this number down if necessary. */
3127 buf_alloc = 4096;
3128 buf = xmalloc (buf_alloc);
3129 buf_pos = 0;
3130 while (1)
3131 {
3132 n = target_fileio_pread (fd, &buf[buf_pos],
3133 buf_alloc - buf_pos - padding, buf_pos,
3134 &target_errno);
3135 if (n < 0)
3136 {
3137 /* An error occurred. */
3138 do_cleanups (close_cleanup);
3139 xfree (buf);
3140 return -1;
3141 }
3142 else if (n == 0)
3143 {
3144 /* Read all there was. */
3145 do_cleanups (close_cleanup);
3146 if (buf_pos == 0)
3147 xfree (buf);
3148 else
3149 *buf_p = buf;
3150 return buf_pos;
3151 }
3152
3153 buf_pos += n;
3154
3155 /* If the buffer is filling up, expand it. */
3156 if (buf_alloc < buf_pos * 2)
3157 {
3158 buf_alloc *= 2;
3159 buf = xrealloc (buf, buf_alloc);
3160 }
3161
3162 QUIT;
3163 }
3164 }
3165
3166 /* Read target file FILENAME. Store the result in *BUF_P and return
3167 the size of the transferred data. See the declaration in "target.h"
3168 function for more information about the return value. */
3169
3170 LONGEST
3171 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3172 {
3173 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3174 }
3175
3176 /* Read target file FILENAME. The result is NUL-terminated and
3177 returned as a string, allocated using xmalloc. If an error occurs
3178 or the transfer is unsupported, NULL is returned. Empty objects
3179 are returned as allocated but empty strings. A warning is issued
3180 if the result contains any embedded NUL bytes. */
3181
3182 char *
3183 target_fileio_read_stralloc (const char *filename)
3184 {
3185 gdb_byte *buffer;
3186 char *bufstr;
3187 LONGEST i, transferred;
3188
3189 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3190 bufstr = (char *) buffer;
3191
3192 if (transferred < 0)
3193 return NULL;
3194
3195 if (transferred == 0)
3196 return xstrdup ("");
3197
3198 bufstr[transferred] = 0;
3199
3200 /* Check for embedded NUL bytes; but allow trailing NULs. */
3201 for (i = strlen (bufstr); i < transferred; i++)
3202 if (bufstr[i] != 0)
3203 {
3204 warning (_("target file %s "
3205 "contained unexpected null characters"),
3206 filename);
3207 break;
3208 }
3209
3210 return bufstr;
3211 }
3212
3213
3214 static int
3215 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3216 CORE_ADDR addr, int len)
3217 {
3218 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3219 }
3220
3221 static int
3222 default_watchpoint_addr_within_range (struct target_ops *target,
3223 CORE_ADDR addr,
3224 CORE_ADDR start, int length)
3225 {
3226 return addr >= start && addr < start + length;
3227 }
3228
3229 static struct gdbarch *
3230 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3231 {
3232 return target_gdbarch ();
3233 }
3234
3235 static int
3236 return_zero (struct target_ops *ignore)
3237 {
3238 return 0;
3239 }
3240
3241 static int
3242 return_zero_has_execution (struct target_ops *ignore, ptid_t ignore2)
3243 {
3244 return 0;
3245 }
3246
3247 /*
3248 * Find the next target down the stack from the specified target.
3249 */
3250
3251 struct target_ops *
3252 find_target_beneath (struct target_ops *t)
3253 {
3254 return t->beneath;
3255 }
3256
3257 /* See target.h. */
3258
3259 struct target_ops *
3260 find_target_at (enum strata stratum)
3261 {
3262 struct target_ops *t;
3263
3264 for (t = current_target.beneath; t != NULL; t = t->beneath)
3265 if (t->to_stratum == stratum)
3266 return t;
3267
3268 return NULL;
3269 }
3270
3271 \f
3272 /* The inferior process has died. Long live the inferior! */
3273
3274 void
3275 generic_mourn_inferior (void)
3276 {
3277 ptid_t ptid;
3278
3279 ptid = inferior_ptid;
3280 inferior_ptid = null_ptid;
3281
3282 /* Mark breakpoints uninserted in case something tries to delete a
3283 breakpoint while we delete the inferior's threads (which would
3284 fail, since the inferior is long gone). */
3285 mark_breakpoints_out ();
3286
3287 if (!ptid_equal (ptid, null_ptid))
3288 {
3289 int pid = ptid_get_pid (ptid);
3290 exit_inferior (pid);
3291 }
3292
3293 /* Note this wipes step-resume breakpoints, so needs to be done
3294 after exit_inferior, which ends up referencing the step-resume
3295 breakpoints through clear_thread_inferior_resources. */
3296 breakpoint_init_inferior (inf_exited);
3297
3298 registers_changed ();
3299
3300 reopen_exec_file ();
3301 reinit_frame_cache ();
3302
3303 if (deprecated_detach_hook)
3304 deprecated_detach_hook ();
3305 }
3306 \f
3307 /* Convert a normal process ID to a string. Returns the string in a
3308 static buffer. */
3309
3310 char *
3311 normal_pid_to_str (ptid_t ptid)
3312 {
3313 static char buf[32];
3314
3315 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3316 return buf;
3317 }
3318
3319 static char *
3320 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3321 {
3322 return normal_pid_to_str (ptid);
3323 }
3324
3325 /* Error-catcher for target_find_memory_regions. */
3326 static int
3327 dummy_find_memory_regions (struct target_ops *self,
3328 find_memory_region_ftype ignore1, void *ignore2)
3329 {
3330 error (_("Command not implemented for this target."));
3331 return 0;
3332 }
3333
3334 /* Error-catcher for target_make_corefile_notes. */
3335 static char *
3336 dummy_make_corefile_notes (struct target_ops *self,
3337 bfd *ignore1, int *ignore2)
3338 {
3339 error (_("Command not implemented for this target."));
3340 return NULL;
3341 }
3342
3343 /* Set up the handful of non-empty slots needed by the dummy target
3344 vector. */
3345
3346 static void
3347 init_dummy_target (void)
3348 {
3349 dummy_target.to_shortname = "None";
3350 dummy_target.to_longname = "None";
3351 dummy_target.to_doc = "";
3352 dummy_target.to_create_inferior = find_default_create_inferior;
3353 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3354 dummy_target.to_supports_disable_randomization
3355 = find_default_supports_disable_randomization;
3356 dummy_target.to_stratum = dummy_stratum;
3357 dummy_target.to_has_all_memory = return_zero;
3358 dummy_target.to_has_memory = return_zero;
3359 dummy_target.to_has_stack = return_zero;
3360 dummy_target.to_has_registers = return_zero;
3361 dummy_target.to_has_execution = return_zero_has_execution;
3362 dummy_target.to_magic = OPS_MAGIC;
3363
3364 install_dummy_methods (&dummy_target);
3365 }
3366 \f
3367 static void
3368 debug_to_open (char *args, int from_tty)
3369 {
3370 debug_target.to_open (args, from_tty);
3371
3372 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3373 }
3374
3375 void
3376 target_close (struct target_ops *targ)
3377 {
3378 gdb_assert (!target_is_pushed (targ));
3379
3380 if (targ->to_xclose != NULL)
3381 targ->to_xclose (targ);
3382 else if (targ->to_close != NULL)
3383 targ->to_close (targ);
3384
3385 if (targetdebug)
3386 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3387 }
3388
3389 void
3390 target_attach (char *args, int from_tty)
3391 {
3392 current_target.to_attach (&current_target, args, from_tty);
3393 if (targetdebug)
3394 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3395 args, from_tty);
3396 }
3397
3398 int
3399 target_thread_alive (ptid_t ptid)
3400 {
3401 int retval;
3402
3403 retval = current_target.to_thread_alive (&current_target, ptid);
3404 if (targetdebug)
3405 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3406 ptid_get_pid (ptid), retval);
3407
3408 return retval;
3409 }
3410
3411 void
3412 target_find_new_threads (void)
3413 {
3414 current_target.to_find_new_threads (&current_target);
3415 if (targetdebug)
3416 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3417 }
3418
3419 void
3420 target_stop (ptid_t ptid)
3421 {
3422 if (!may_stop)
3423 {
3424 warning (_("May not interrupt or stop the target, ignoring attempt"));
3425 return;
3426 }
3427
3428 (*current_target.to_stop) (&current_target, ptid);
3429 }
3430
3431 static void
3432 debug_to_post_attach (struct target_ops *self, int pid)
3433 {
3434 debug_target.to_post_attach (&debug_target, pid);
3435
3436 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3437 }
3438
3439 /* Concatenate ELEM to LIST, a comma separate list, and return the
3440 result. The LIST incoming argument is released. */
3441
3442 static char *
3443 str_comma_list_concat_elem (char *list, const char *elem)
3444 {
3445 if (list == NULL)
3446 return xstrdup (elem);
3447 else
3448 return reconcat (list, list, ", ", elem, (char *) NULL);
3449 }
3450
3451 /* Helper for target_options_to_string. If OPT is present in
3452 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3453 Returns the new resulting string. OPT is removed from
3454 TARGET_OPTIONS. */
3455
3456 static char *
3457 do_option (int *target_options, char *ret,
3458 int opt, char *opt_str)
3459 {
3460 if ((*target_options & opt) != 0)
3461 {
3462 ret = str_comma_list_concat_elem (ret, opt_str);
3463 *target_options &= ~opt;
3464 }
3465
3466 return ret;
3467 }
3468
3469 char *
3470 target_options_to_string (int target_options)
3471 {
3472 char *ret = NULL;
3473
3474 #define DO_TARG_OPTION(OPT) \
3475 ret = do_option (&target_options, ret, OPT, #OPT)
3476
3477 DO_TARG_OPTION (TARGET_WNOHANG);
3478
3479 if (target_options != 0)
3480 ret = str_comma_list_concat_elem (ret, "unknown???");
3481
3482 if (ret == NULL)
3483 ret = xstrdup ("");
3484 return ret;
3485 }
3486
3487 static void
3488 debug_print_register (const char * func,
3489 struct regcache *regcache, int regno)
3490 {
3491 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3492
3493 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3494 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3495 && gdbarch_register_name (gdbarch, regno) != NULL
3496 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3497 fprintf_unfiltered (gdb_stdlog, "(%s)",
3498 gdbarch_register_name (gdbarch, regno));
3499 else
3500 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3501 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3502 {
3503 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3504 int i, size = register_size (gdbarch, regno);
3505 gdb_byte buf[MAX_REGISTER_SIZE];
3506
3507 regcache_raw_collect (regcache, regno, buf);
3508 fprintf_unfiltered (gdb_stdlog, " = ");
3509 for (i = 0; i < size; i++)
3510 {
3511 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3512 }
3513 if (size <= sizeof (LONGEST))
3514 {
3515 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3516
3517 fprintf_unfiltered (gdb_stdlog, " %s %s",
3518 core_addr_to_string_nz (val), plongest (val));
3519 }
3520 }
3521 fprintf_unfiltered (gdb_stdlog, "\n");
3522 }
3523
3524 void
3525 target_fetch_registers (struct regcache *regcache, int regno)
3526 {
3527 current_target.to_fetch_registers (&current_target, regcache, regno);
3528 if (targetdebug)
3529 debug_print_register ("target_fetch_registers", regcache, regno);
3530 }
3531
3532 void
3533 target_store_registers (struct regcache *regcache, int regno)
3534 {
3535 struct target_ops *t;
3536
3537 if (!may_write_registers)
3538 error (_("Writing to registers is not allowed (regno %d)"), regno);
3539
3540 current_target.to_store_registers (&current_target, regcache, regno);
3541 if (targetdebug)
3542 {
3543 debug_print_register ("target_store_registers", regcache, regno);
3544 }
3545 }
3546
3547 int
3548 target_core_of_thread (ptid_t ptid)
3549 {
3550 int retval = current_target.to_core_of_thread (&current_target, ptid);
3551
3552 if (targetdebug)
3553 fprintf_unfiltered (gdb_stdlog,
3554 "target_core_of_thread (%d) = %d\n",
3555 ptid_get_pid (ptid), retval);
3556 return retval;
3557 }
3558
3559 int
3560 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3561 {
3562 int retval = current_target.to_verify_memory (&current_target,
3563 data, memaddr, size);
3564
3565 if (targetdebug)
3566 fprintf_unfiltered (gdb_stdlog,
3567 "target_verify_memory (%s, %s) = %d\n",
3568 paddress (target_gdbarch (), memaddr),
3569 pulongest (size),
3570 retval);
3571 return retval;
3572 }
3573
3574 /* The documentation for this function is in its prototype declaration in
3575 target.h. */
3576
3577 int
3578 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3579 {
3580 int ret;
3581
3582 ret = current_target.to_insert_mask_watchpoint (&current_target,
3583 addr, mask, rw);
3584
3585 if (targetdebug)
3586 fprintf_unfiltered (gdb_stdlog, "\
3587 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3588 core_addr_to_string (addr),
3589 core_addr_to_string (mask), rw, ret);
3590
3591 return ret;
3592 }
3593
3594 /* The documentation for this function is in its prototype declaration in
3595 target.h. */
3596
3597 int
3598 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3599 {
3600 int ret;
3601
3602 ret = current_target.to_remove_mask_watchpoint (&current_target,
3603 addr, mask, rw);
3604
3605 if (targetdebug)
3606 fprintf_unfiltered (gdb_stdlog, "\
3607 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3608 core_addr_to_string (addr),
3609 core_addr_to_string (mask), rw, ret);
3610
3611 return ret;
3612 }
3613
3614 /* The documentation for this function is in its prototype declaration
3615 in target.h. */
3616
3617 int
3618 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3619 {
3620 return current_target.to_masked_watch_num_registers (&current_target,
3621 addr, mask);
3622 }
3623
3624 /* The documentation for this function is in its prototype declaration
3625 in target.h. */
3626
3627 int
3628 target_ranged_break_num_registers (void)
3629 {
3630 return current_target.to_ranged_break_num_registers (&current_target);
3631 }
3632
3633 /* See target.h. */
3634
3635 struct btrace_target_info *
3636 target_enable_btrace (ptid_t ptid)
3637 {
3638 return current_target.to_enable_btrace (&current_target, ptid);
3639 }
3640
3641 /* See target.h. */
3642
3643 void
3644 target_disable_btrace (struct btrace_target_info *btinfo)
3645 {
3646 current_target.to_disable_btrace (&current_target, btinfo);
3647 }
3648
3649 /* See target.h. */
3650
3651 void
3652 target_teardown_btrace (struct btrace_target_info *btinfo)
3653 {
3654 current_target.to_teardown_btrace (&current_target, btinfo);
3655 }
3656
3657 /* See target.h. */
3658
3659 enum btrace_error
3660 target_read_btrace (VEC (btrace_block_s) **btrace,
3661 struct btrace_target_info *btinfo,
3662 enum btrace_read_type type)
3663 {
3664 return current_target.to_read_btrace (&current_target, btrace, btinfo, type);
3665 }
3666
3667 /* See target.h. */
3668
3669 void
3670 target_stop_recording (void)
3671 {
3672 current_target.to_stop_recording (&current_target);
3673 }
3674
3675 /* See target.h. */
3676
3677 void
3678 target_info_record (void)
3679 {
3680 struct target_ops *t;
3681
3682 for (t = current_target.beneath; t != NULL; t = t->beneath)
3683 if (t->to_info_record != NULL)
3684 {
3685 t->to_info_record (t);
3686 return;
3687 }
3688
3689 tcomplain ();
3690 }
3691
3692 /* See target.h. */
3693
3694 void
3695 target_save_record (const char *filename)
3696 {
3697 current_target.to_save_record (&current_target, filename);
3698 }
3699
3700 /* See target.h. */
3701
3702 int
3703 target_supports_delete_record (void)
3704 {
3705 struct target_ops *t;
3706
3707 for (t = current_target.beneath; t != NULL; t = t->beneath)
3708 if (t->to_delete_record != NULL)
3709 return 1;
3710
3711 return 0;
3712 }
3713
3714 /* See target.h. */
3715
3716 void
3717 target_delete_record (void)
3718 {
3719 current_target.to_delete_record (&current_target);
3720 }
3721
3722 /* See target.h. */
3723
3724 int
3725 target_record_is_replaying (void)
3726 {
3727 return current_target.to_record_is_replaying (&current_target);
3728 }
3729
3730 /* See target.h. */
3731
3732 void
3733 target_goto_record_begin (void)
3734 {
3735 current_target.to_goto_record_begin (&current_target);
3736 }
3737
3738 /* See target.h. */
3739
3740 void
3741 target_goto_record_end (void)
3742 {
3743 current_target.to_goto_record_end (&current_target);
3744 }
3745
3746 /* See target.h. */
3747
3748 void
3749 target_goto_record (ULONGEST insn)
3750 {
3751 current_target.to_goto_record (&current_target, insn);
3752 }
3753
3754 /* See target.h. */
3755
3756 void
3757 target_insn_history (int size, int flags)
3758 {
3759 current_target.to_insn_history (&current_target, size, flags);
3760 }
3761
3762 /* See target.h. */
3763
3764 void
3765 target_insn_history_from (ULONGEST from, int size, int flags)
3766 {
3767 current_target.to_insn_history_from (&current_target, from, size, flags);
3768 }
3769
3770 /* See target.h. */
3771
3772 void
3773 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
3774 {
3775 current_target.to_insn_history_range (&current_target, begin, end, flags);
3776 }
3777
3778 /* See target.h. */
3779
3780 void
3781 target_call_history (int size, int flags)
3782 {
3783 current_target.to_call_history (&current_target, size, flags);
3784 }
3785
3786 /* See target.h. */
3787
3788 void
3789 target_call_history_from (ULONGEST begin, int size, int flags)
3790 {
3791 current_target.to_call_history_from (&current_target, begin, size, flags);
3792 }
3793
3794 /* See target.h. */
3795
3796 void
3797 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
3798 {
3799 current_target.to_call_history_range (&current_target, begin, end, flags);
3800 }
3801
3802 static void
3803 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
3804 {
3805 debug_target.to_prepare_to_store (&debug_target, regcache);
3806
3807 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
3808 }
3809
3810 /* See target.h. */
3811
3812 const struct frame_unwind *
3813 target_get_unwinder (void)
3814 {
3815 return current_target.to_get_unwinder (&current_target);
3816 }
3817
3818 /* See target.h. */
3819
3820 const struct frame_unwind *
3821 target_get_tailcall_unwinder (void)
3822 {
3823 return current_target.to_get_tailcall_unwinder (&current_target);
3824 }
3825
3826 /* Default implementation of to_decr_pc_after_break. */
3827
3828 static CORE_ADDR
3829 default_target_decr_pc_after_break (struct target_ops *ops,
3830 struct gdbarch *gdbarch)
3831 {
3832 return gdbarch_decr_pc_after_break (gdbarch);
3833 }
3834
3835 /* See target.h. */
3836
3837 CORE_ADDR
3838 target_decr_pc_after_break (struct gdbarch *gdbarch)
3839 {
3840 return current_target.to_decr_pc_after_break (&current_target, gdbarch);
3841 }
3842
3843 static int
3844 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
3845 int write, struct mem_attrib *attrib,
3846 struct target_ops *target)
3847 {
3848 int retval;
3849
3850 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
3851 attrib, target);
3852
3853 fprintf_unfiltered (gdb_stdlog,
3854 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
3855 paddress (target_gdbarch (), memaddr), len,
3856 write ? "write" : "read", retval);
3857
3858 if (retval > 0)
3859 {
3860 int i;
3861
3862 fputs_unfiltered (", bytes =", gdb_stdlog);
3863 for (i = 0; i < retval; i++)
3864 {
3865 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
3866 {
3867 if (targetdebug < 2 && i > 0)
3868 {
3869 fprintf_unfiltered (gdb_stdlog, " ...");
3870 break;
3871 }
3872 fprintf_unfiltered (gdb_stdlog, "\n");
3873 }
3874
3875 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
3876 }
3877 }
3878
3879 fputc_unfiltered ('\n', gdb_stdlog);
3880
3881 return retval;
3882 }
3883
3884 static void
3885 debug_to_files_info (struct target_ops *target)
3886 {
3887 debug_target.to_files_info (target);
3888
3889 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
3890 }
3891
3892 static int
3893 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
3894 struct bp_target_info *bp_tgt)
3895 {
3896 int retval;
3897
3898 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
3899
3900 fprintf_unfiltered (gdb_stdlog,
3901 "target_insert_breakpoint (%s, xxx) = %ld\n",
3902 core_addr_to_string (bp_tgt->placed_address),
3903 (unsigned long) retval);
3904 return retval;
3905 }
3906
3907 static int
3908 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
3909 struct bp_target_info *bp_tgt)
3910 {
3911 int retval;
3912
3913 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
3914
3915 fprintf_unfiltered (gdb_stdlog,
3916 "target_remove_breakpoint (%s, xxx) = %ld\n",
3917 core_addr_to_string (bp_tgt->placed_address),
3918 (unsigned long) retval);
3919 return retval;
3920 }
3921
3922 static int
3923 debug_to_can_use_hw_breakpoint (struct target_ops *self,
3924 int type, int cnt, int from_tty)
3925 {
3926 int retval;
3927
3928 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
3929 type, cnt, from_tty);
3930
3931 fprintf_unfiltered (gdb_stdlog,
3932 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
3933 (unsigned long) type,
3934 (unsigned long) cnt,
3935 (unsigned long) from_tty,
3936 (unsigned long) retval);
3937 return retval;
3938 }
3939
3940 static int
3941 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
3942 CORE_ADDR addr, int len)
3943 {
3944 CORE_ADDR retval;
3945
3946 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
3947 addr, len);
3948
3949 fprintf_unfiltered (gdb_stdlog,
3950 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
3951 core_addr_to_string (addr), (unsigned long) len,
3952 core_addr_to_string (retval));
3953 return retval;
3954 }
3955
3956 static int
3957 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
3958 CORE_ADDR addr, int len, int rw,
3959 struct expression *cond)
3960 {
3961 int retval;
3962
3963 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
3964 addr, len,
3965 rw, cond);
3966
3967 fprintf_unfiltered (gdb_stdlog,
3968 "target_can_accel_watchpoint_condition "
3969 "(%s, %d, %d, %s) = %ld\n",
3970 core_addr_to_string (addr), len, rw,
3971 host_address_to_string (cond), (unsigned long) retval);
3972 return retval;
3973 }
3974
3975 static int
3976 debug_to_stopped_by_watchpoint (struct target_ops *ops)
3977 {
3978 int retval;
3979
3980 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
3981
3982 fprintf_unfiltered (gdb_stdlog,
3983 "target_stopped_by_watchpoint () = %ld\n",
3984 (unsigned long) retval);
3985 return retval;
3986 }
3987
3988 static int
3989 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
3990 {
3991 int retval;
3992
3993 retval = debug_target.to_stopped_data_address (target, addr);
3994
3995 fprintf_unfiltered (gdb_stdlog,
3996 "target_stopped_data_address ([%s]) = %ld\n",
3997 core_addr_to_string (*addr),
3998 (unsigned long)retval);
3999 return retval;
4000 }
4001
4002 static int
4003 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4004 CORE_ADDR addr,
4005 CORE_ADDR start, int length)
4006 {
4007 int retval;
4008
4009 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4010 start, length);
4011
4012 fprintf_filtered (gdb_stdlog,
4013 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4014 core_addr_to_string (addr), core_addr_to_string (start),
4015 length, retval);
4016 return retval;
4017 }
4018
4019 static int
4020 debug_to_insert_hw_breakpoint (struct target_ops *self,
4021 struct gdbarch *gdbarch,
4022 struct bp_target_info *bp_tgt)
4023 {
4024 int retval;
4025
4026 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4027 gdbarch, bp_tgt);
4028
4029 fprintf_unfiltered (gdb_stdlog,
4030 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4031 core_addr_to_string (bp_tgt->placed_address),
4032 (unsigned long) retval);
4033 return retval;
4034 }
4035
4036 static int
4037 debug_to_remove_hw_breakpoint (struct target_ops *self,
4038 struct gdbarch *gdbarch,
4039 struct bp_target_info *bp_tgt)
4040 {
4041 int retval;
4042
4043 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4044 gdbarch, bp_tgt);
4045
4046 fprintf_unfiltered (gdb_stdlog,
4047 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4048 core_addr_to_string (bp_tgt->placed_address),
4049 (unsigned long) retval);
4050 return retval;
4051 }
4052
4053 static int
4054 debug_to_insert_watchpoint (struct target_ops *self,
4055 CORE_ADDR addr, int len, int type,
4056 struct expression *cond)
4057 {
4058 int retval;
4059
4060 retval = debug_target.to_insert_watchpoint (&debug_target,
4061 addr, len, type, cond);
4062
4063 fprintf_unfiltered (gdb_stdlog,
4064 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4065 core_addr_to_string (addr), len, type,
4066 host_address_to_string (cond), (unsigned long) retval);
4067 return retval;
4068 }
4069
4070 static int
4071 debug_to_remove_watchpoint (struct target_ops *self,
4072 CORE_ADDR addr, int len, int type,
4073 struct expression *cond)
4074 {
4075 int retval;
4076
4077 retval = debug_target.to_remove_watchpoint (&debug_target,
4078 addr, len, type, cond);
4079
4080 fprintf_unfiltered (gdb_stdlog,
4081 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4082 core_addr_to_string (addr), len, type,
4083 host_address_to_string (cond), (unsigned long) retval);
4084 return retval;
4085 }
4086
4087 static void
4088 debug_to_terminal_init (struct target_ops *self)
4089 {
4090 debug_target.to_terminal_init (&debug_target);
4091
4092 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4093 }
4094
4095 static void
4096 debug_to_terminal_inferior (struct target_ops *self)
4097 {
4098 debug_target.to_terminal_inferior (&debug_target);
4099
4100 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4101 }
4102
4103 static void
4104 debug_to_terminal_ours_for_output (struct target_ops *self)
4105 {
4106 debug_target.to_terminal_ours_for_output (&debug_target);
4107
4108 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4109 }
4110
4111 static void
4112 debug_to_terminal_ours (struct target_ops *self)
4113 {
4114 debug_target.to_terminal_ours (&debug_target);
4115
4116 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4117 }
4118
4119 static void
4120 debug_to_terminal_save_ours (struct target_ops *self)
4121 {
4122 debug_target.to_terminal_save_ours (&debug_target);
4123
4124 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4125 }
4126
4127 static void
4128 debug_to_terminal_info (struct target_ops *self,
4129 const char *arg, int from_tty)
4130 {
4131 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4132
4133 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4134 from_tty);
4135 }
4136
4137 static void
4138 debug_to_load (struct target_ops *self, char *args, int from_tty)
4139 {
4140 debug_target.to_load (&debug_target, args, from_tty);
4141
4142 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4143 }
4144
4145 static void
4146 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4147 {
4148 debug_target.to_post_startup_inferior (&debug_target, ptid);
4149
4150 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4151 ptid_get_pid (ptid));
4152 }
4153
4154 static int
4155 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4156 {
4157 int retval;
4158
4159 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4160
4161 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4162 pid, retval);
4163
4164 return retval;
4165 }
4166
4167 static int
4168 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4169 {
4170 int retval;
4171
4172 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4173
4174 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4175 pid, retval);
4176
4177 return retval;
4178 }
4179
4180 static int
4181 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4182 {
4183 int retval;
4184
4185 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4186
4187 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4188 pid, retval);
4189
4190 return retval;
4191 }
4192
4193 static int
4194 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4195 {
4196 int retval;
4197
4198 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4199
4200 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4201 pid, retval);
4202
4203 return retval;
4204 }
4205
4206 static int
4207 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4208 {
4209 int retval;
4210
4211 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4212
4213 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4214 pid, retval);
4215
4216 return retval;
4217 }
4218
4219 static int
4220 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4221 {
4222 int retval;
4223
4224 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4225
4226 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4227 pid, retval);
4228
4229 return retval;
4230 }
4231
4232 static int
4233 debug_to_has_exited (struct target_ops *self,
4234 int pid, int wait_status, int *exit_status)
4235 {
4236 int has_exited;
4237
4238 has_exited = debug_target.to_has_exited (&debug_target,
4239 pid, wait_status, exit_status);
4240
4241 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4242 pid, wait_status, *exit_status, has_exited);
4243
4244 return has_exited;
4245 }
4246
4247 static int
4248 debug_to_can_run (struct target_ops *self)
4249 {
4250 int retval;
4251
4252 retval = debug_target.to_can_run (&debug_target);
4253
4254 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4255
4256 return retval;
4257 }
4258
4259 static struct gdbarch *
4260 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4261 {
4262 struct gdbarch *retval;
4263
4264 retval = debug_target.to_thread_architecture (ops, ptid);
4265
4266 fprintf_unfiltered (gdb_stdlog,
4267 "target_thread_architecture (%s) = %s [%s]\n",
4268 target_pid_to_str (ptid),
4269 host_address_to_string (retval),
4270 gdbarch_bfd_arch_info (retval)->printable_name);
4271 return retval;
4272 }
4273
4274 static void
4275 debug_to_stop (struct target_ops *self, ptid_t ptid)
4276 {
4277 debug_target.to_stop (&debug_target, ptid);
4278
4279 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4280 target_pid_to_str (ptid));
4281 }
4282
4283 static void
4284 debug_to_rcmd (struct target_ops *self, char *command,
4285 struct ui_file *outbuf)
4286 {
4287 debug_target.to_rcmd (&debug_target, command, outbuf);
4288 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4289 }
4290
4291 static char *
4292 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4293 {
4294 char *exec_file;
4295
4296 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4297
4298 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4299 pid, exec_file);
4300
4301 return exec_file;
4302 }
4303
4304 static void
4305 setup_target_debug (void)
4306 {
4307 memcpy (&debug_target, &current_target, sizeof debug_target);
4308
4309 current_target.to_open = debug_to_open;
4310 current_target.to_post_attach = debug_to_post_attach;
4311 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4312 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4313 current_target.to_files_info = debug_to_files_info;
4314 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4315 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4316 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4317 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4318 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4319 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4320 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4321 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4322 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4323 current_target.to_watchpoint_addr_within_range
4324 = debug_to_watchpoint_addr_within_range;
4325 current_target.to_region_ok_for_hw_watchpoint
4326 = debug_to_region_ok_for_hw_watchpoint;
4327 current_target.to_can_accel_watchpoint_condition
4328 = debug_to_can_accel_watchpoint_condition;
4329 current_target.to_terminal_init = debug_to_terminal_init;
4330 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4331 current_target.to_terminal_ours_for_output
4332 = debug_to_terminal_ours_for_output;
4333 current_target.to_terminal_ours = debug_to_terminal_ours;
4334 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4335 current_target.to_terminal_info = debug_to_terminal_info;
4336 current_target.to_load = debug_to_load;
4337 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4338 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4339 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4340 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4341 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4342 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4343 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4344 current_target.to_has_exited = debug_to_has_exited;
4345 current_target.to_can_run = debug_to_can_run;
4346 current_target.to_stop = debug_to_stop;
4347 current_target.to_rcmd = debug_to_rcmd;
4348 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4349 current_target.to_thread_architecture = debug_to_thread_architecture;
4350 }
4351 \f
4352
4353 static char targ_desc[] =
4354 "Names of targets and files being debugged.\nShows the entire \
4355 stack of targets currently in use (including the exec-file,\n\
4356 core-file, and process, if any), as well as the symbol file name.";
4357
4358 static void
4359 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
4360 {
4361 error (_("\"monitor\" command not supported by this target."));
4362 }
4363
4364 static void
4365 do_monitor_command (char *cmd,
4366 int from_tty)
4367 {
4368 target_rcmd (cmd, gdb_stdtarg);
4369 }
4370
4371 /* Print the name of each layers of our target stack. */
4372
4373 static void
4374 maintenance_print_target_stack (char *cmd, int from_tty)
4375 {
4376 struct target_ops *t;
4377
4378 printf_filtered (_("The current target stack is:\n"));
4379
4380 for (t = target_stack; t != NULL; t = t->beneath)
4381 {
4382 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4383 }
4384 }
4385
4386 /* Controls if async mode is permitted. */
4387 int target_async_permitted = 0;
4388
4389 /* The set command writes to this variable. If the inferior is
4390 executing, target_async_permitted is *not* updated. */
4391 static int target_async_permitted_1 = 0;
4392
4393 static void
4394 set_target_async_command (char *args, int from_tty,
4395 struct cmd_list_element *c)
4396 {
4397 if (have_live_inferiors ())
4398 {
4399 target_async_permitted_1 = target_async_permitted;
4400 error (_("Cannot change this setting while the inferior is running."));
4401 }
4402
4403 target_async_permitted = target_async_permitted_1;
4404 }
4405
4406 static void
4407 show_target_async_command (struct ui_file *file, int from_tty,
4408 struct cmd_list_element *c,
4409 const char *value)
4410 {
4411 fprintf_filtered (file,
4412 _("Controlling the inferior in "
4413 "asynchronous mode is %s.\n"), value);
4414 }
4415
4416 /* Temporary copies of permission settings. */
4417
4418 static int may_write_registers_1 = 1;
4419 static int may_write_memory_1 = 1;
4420 static int may_insert_breakpoints_1 = 1;
4421 static int may_insert_tracepoints_1 = 1;
4422 static int may_insert_fast_tracepoints_1 = 1;
4423 static int may_stop_1 = 1;
4424
4425 /* Make the user-set values match the real values again. */
4426
4427 void
4428 update_target_permissions (void)
4429 {
4430 may_write_registers_1 = may_write_registers;
4431 may_write_memory_1 = may_write_memory;
4432 may_insert_breakpoints_1 = may_insert_breakpoints;
4433 may_insert_tracepoints_1 = may_insert_tracepoints;
4434 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4435 may_stop_1 = may_stop;
4436 }
4437
4438 /* The one function handles (most of) the permission flags in the same
4439 way. */
4440
4441 static void
4442 set_target_permissions (char *args, int from_tty,
4443 struct cmd_list_element *c)
4444 {
4445 if (target_has_execution)
4446 {
4447 update_target_permissions ();
4448 error (_("Cannot change this setting while the inferior is running."));
4449 }
4450
4451 /* Make the real values match the user-changed values. */
4452 may_write_registers = may_write_registers_1;
4453 may_insert_breakpoints = may_insert_breakpoints_1;
4454 may_insert_tracepoints = may_insert_tracepoints_1;
4455 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4456 may_stop = may_stop_1;
4457 update_observer_mode ();
4458 }
4459
4460 /* Set memory write permission independently of observer mode. */
4461
4462 static void
4463 set_write_memory_permission (char *args, int from_tty,
4464 struct cmd_list_element *c)
4465 {
4466 /* Make the real values match the user-changed values. */
4467 may_write_memory = may_write_memory_1;
4468 update_observer_mode ();
4469 }
4470
4471
4472 void
4473 initialize_targets (void)
4474 {
4475 init_dummy_target ();
4476 push_target (&dummy_target);
4477
4478 add_info ("target", target_info, targ_desc);
4479 add_info ("files", target_info, targ_desc);
4480
4481 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4482 Set target debugging."), _("\
4483 Show target debugging."), _("\
4484 When non-zero, target debugging is enabled. Higher numbers are more\n\
4485 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4486 command."),
4487 NULL,
4488 show_targetdebug,
4489 &setdebuglist, &showdebuglist);
4490
4491 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4492 &trust_readonly, _("\
4493 Set mode for reading from readonly sections."), _("\
4494 Show mode for reading from readonly sections."), _("\
4495 When this mode is on, memory reads from readonly sections (such as .text)\n\
4496 will be read from the object file instead of from the target. This will\n\
4497 result in significant performance improvement for remote targets."),
4498 NULL,
4499 show_trust_readonly,
4500 &setlist, &showlist);
4501
4502 add_com ("monitor", class_obscure, do_monitor_command,
4503 _("Send a command to the remote monitor (remote targets only)."));
4504
4505 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4506 _("Print the name of each layer of the internal target stack."),
4507 &maintenanceprintlist);
4508
4509 add_setshow_boolean_cmd ("target-async", no_class,
4510 &target_async_permitted_1, _("\
4511 Set whether gdb controls the inferior in asynchronous mode."), _("\
4512 Show whether gdb controls the inferior in asynchronous mode."), _("\
4513 Tells gdb whether to control the inferior in asynchronous mode."),
4514 set_target_async_command,
4515 show_target_async_command,
4516 &setlist,
4517 &showlist);
4518
4519 add_setshow_boolean_cmd ("may-write-registers", class_support,
4520 &may_write_registers_1, _("\
4521 Set permission to write into registers."), _("\
4522 Show permission to write into registers."), _("\
4523 When this permission is on, GDB may write into the target's registers.\n\
4524 Otherwise, any sort of write attempt will result in an error."),
4525 set_target_permissions, NULL,
4526 &setlist, &showlist);
4527
4528 add_setshow_boolean_cmd ("may-write-memory", class_support,
4529 &may_write_memory_1, _("\
4530 Set permission to write into target memory."), _("\
4531 Show permission to write into target memory."), _("\
4532 When this permission is on, GDB may write into the target's memory.\n\
4533 Otherwise, any sort of write attempt will result in an error."),
4534 set_write_memory_permission, NULL,
4535 &setlist, &showlist);
4536
4537 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4538 &may_insert_breakpoints_1, _("\
4539 Set permission to insert breakpoints in the target."), _("\
4540 Show permission to insert breakpoints in the target."), _("\
4541 When this permission is on, GDB may insert breakpoints in the program.\n\
4542 Otherwise, any sort of insertion attempt will result in an error."),
4543 set_target_permissions, NULL,
4544 &setlist, &showlist);
4545
4546 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4547 &may_insert_tracepoints_1, _("\
4548 Set permission to insert tracepoints in the target."), _("\
4549 Show permission to insert tracepoints in the target."), _("\
4550 When this permission is on, GDB may insert tracepoints in the program.\n\
4551 Otherwise, any sort of insertion attempt will result in an error."),
4552 set_target_permissions, NULL,
4553 &setlist, &showlist);
4554
4555 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4556 &may_insert_fast_tracepoints_1, _("\
4557 Set permission to insert fast tracepoints in the target."), _("\
4558 Show permission to insert fast tracepoints in the target."), _("\
4559 When this permission is on, GDB may insert fast tracepoints.\n\
4560 Otherwise, any sort of insertion attempt will result in an error."),
4561 set_target_permissions, NULL,
4562 &setlist, &showlist);
4563
4564 add_setshow_boolean_cmd ("may-interrupt", class_support,
4565 &may_stop_1, _("\
4566 Set permission to interrupt or signal the target."), _("\
4567 Show permission to interrupt or signal the target."), _("\
4568 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4569 Otherwise, any attempt to interrupt or stop will be ignored."),
4570 set_target_permissions, NULL,
4571 &setlist, &showlist);
4572 }