* infrun.c (adjust_pc_after_break): Do nothing if executing in
[binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
4 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 Free Software Foundation, Inc.
6
7 Contributed by Cygnus Support.
8
9 This file is part of GDB.
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3 of the License, or
14 (at your option) any later version.
15
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23
24 #include "defs.h"
25 #include <errno.h>
26 #include "gdb_string.h"
27 #include "target.h"
28 #include "gdbcmd.h"
29 #include "symtab.h"
30 #include "inferior.h"
31 #include "bfd.h"
32 #include "symfile.h"
33 #include "objfiles.h"
34 #include "gdb_wait.h"
35 #include "dcache.h"
36 #include <signal.h>
37 #include "regcache.h"
38 #include "gdb_assert.h"
39 #include "gdbcore.h"
40 #include "exceptions.h"
41 #include "target-descriptions.h"
42 #include "gdbthread.h"
43 #include "solib.h"
44
45 static void target_info (char *, int);
46
47 static void kill_or_be_killed (int);
48
49 static void default_terminal_info (char *, int);
50
51 static int default_watchpoint_addr_within_range (struct target_ops *,
52 CORE_ADDR, CORE_ADDR, int);
53
54 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
55
56 static int nosymbol (char *, CORE_ADDR *);
57
58 static void tcomplain (void) ATTR_NORETURN;
59
60 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
61
62 static int return_zero (void);
63
64 static int return_one (void);
65
66 static int return_minus_one (void);
67
68 void target_ignore (void);
69
70 static void target_command (char *, int);
71
72 static struct target_ops *find_default_run_target (char *);
73
74 static void nosupport_runtime (void);
75
76 static LONGEST default_xfer_partial (struct target_ops *ops,
77 enum target_object object,
78 const char *annex, gdb_byte *readbuf,
79 const gdb_byte *writebuf,
80 ULONGEST offset, LONGEST len);
81
82 static LONGEST current_xfer_partial (struct target_ops *ops,
83 enum target_object object,
84 const char *annex, gdb_byte *readbuf,
85 const gdb_byte *writebuf,
86 ULONGEST offset, LONGEST len);
87
88 static LONGEST target_xfer_partial (struct target_ops *ops,
89 enum target_object object,
90 const char *annex,
91 void *readbuf, const void *writebuf,
92 ULONGEST offset, LONGEST len);
93
94 static void init_dummy_target (void);
95
96 static struct target_ops debug_target;
97
98 static void debug_to_open (char *, int);
99
100 static void debug_to_close (int);
101
102 static void debug_to_attach (char *, int);
103
104 static void debug_to_detach (char *, int);
105
106 static void debug_to_resume (ptid_t, int, enum target_signal);
107
108 static ptid_t debug_to_wait (ptid_t, struct target_waitstatus *);
109
110 static void debug_to_fetch_registers (struct regcache *, int);
111
112 static void debug_to_store_registers (struct regcache *, int);
113
114 static void debug_to_prepare_to_store (struct regcache *);
115
116 static void debug_to_files_info (struct target_ops *);
117
118 static int debug_to_insert_breakpoint (struct bp_target_info *);
119
120 static int debug_to_remove_breakpoint (struct bp_target_info *);
121
122 static int debug_to_can_use_hw_breakpoint (int, int, int);
123
124 static int debug_to_insert_hw_breakpoint (struct bp_target_info *);
125
126 static int debug_to_remove_hw_breakpoint (struct bp_target_info *);
127
128 static int debug_to_insert_watchpoint (CORE_ADDR, int, int);
129
130 static int debug_to_remove_watchpoint (CORE_ADDR, int, int);
131
132 static int debug_to_stopped_by_watchpoint (void);
133
134 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
135
136 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
137 CORE_ADDR, CORE_ADDR, int);
138
139 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
140
141 static void debug_to_terminal_init (void);
142
143 static void debug_to_terminal_inferior (void);
144
145 static void debug_to_terminal_ours_for_output (void);
146
147 static void debug_to_terminal_save_ours (void);
148
149 static void debug_to_terminal_ours (void);
150
151 static void debug_to_terminal_info (char *, int);
152
153 static void debug_to_kill (void);
154
155 static void debug_to_load (char *, int);
156
157 static int debug_to_lookup_symbol (char *, CORE_ADDR *);
158
159 static void debug_to_mourn_inferior (void);
160
161 static int debug_to_can_run (void);
162
163 static void debug_to_notice_signals (ptid_t);
164
165 static int debug_to_thread_alive (ptid_t);
166
167 static void debug_to_stop (ptid_t);
168
169 /* NOTE: cagney/2004-09-29: Many targets reference this variable in
170 wierd and mysterious ways. Putting the variable here lets those
171 wierd and mysterious ways keep building while they are being
172 converted to the inferior inheritance structure. */
173 struct target_ops deprecated_child_ops;
174
175 /* Pointer to array of target architecture structures; the size of the
176 array; the current index into the array; the allocated size of the
177 array. */
178 struct target_ops **target_structs;
179 unsigned target_struct_size;
180 unsigned target_struct_index;
181 unsigned target_struct_allocsize;
182 #define DEFAULT_ALLOCSIZE 10
183
184 /* The initial current target, so that there is always a semi-valid
185 current target. */
186
187 static struct target_ops dummy_target;
188
189 /* Top of target stack. */
190
191 static struct target_ops *target_stack;
192
193 /* The target structure we are currently using to talk to a process
194 or file or whatever "inferior" we have. */
195
196 struct target_ops current_target;
197
198 /* Command list for target. */
199
200 static struct cmd_list_element *targetlist = NULL;
201
202 /* Nonzero if we should trust readonly sections from the
203 executable when reading memory. */
204
205 static int trust_readonly = 0;
206
207 /* Nonzero if we should show true memory content including
208 memory breakpoint inserted by gdb. */
209
210 static int show_memory_breakpoints = 0;
211
212 /* Non-zero if we want to see trace of target level stuff. */
213
214 static int targetdebug = 0;
215 static void
216 show_targetdebug (struct ui_file *file, int from_tty,
217 struct cmd_list_element *c, const char *value)
218 {
219 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
220 }
221
222 static void setup_target_debug (void);
223
224 DCACHE *target_dcache;
225
226 /* The user just typed 'target' without the name of a target. */
227
228 static void
229 target_command (char *arg, int from_tty)
230 {
231 fputs_filtered ("Argument required (target name). Try `help target'\n",
232 gdb_stdout);
233 }
234
235 /* Add a possible target architecture to the list. */
236
237 void
238 add_target (struct target_ops *t)
239 {
240 /* Provide default values for all "must have" methods. */
241 if (t->to_xfer_partial == NULL)
242 t->to_xfer_partial = default_xfer_partial;
243
244 if (!target_structs)
245 {
246 target_struct_allocsize = DEFAULT_ALLOCSIZE;
247 target_structs = (struct target_ops **) xmalloc
248 (target_struct_allocsize * sizeof (*target_structs));
249 }
250 if (target_struct_size >= target_struct_allocsize)
251 {
252 target_struct_allocsize *= 2;
253 target_structs = (struct target_ops **)
254 xrealloc ((char *) target_structs,
255 target_struct_allocsize * sizeof (*target_structs));
256 }
257 target_structs[target_struct_size++] = t;
258
259 if (targetlist == NULL)
260 add_prefix_cmd ("target", class_run, target_command, _("\
261 Connect to a target machine or process.\n\
262 The first argument is the type or protocol of the target machine.\n\
263 Remaining arguments are interpreted by the target protocol. For more\n\
264 information on the arguments for a particular protocol, type\n\
265 `help target ' followed by the protocol name."),
266 &targetlist, "target ", 0, &cmdlist);
267 add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc, &targetlist);
268 }
269
270 /* Stub functions */
271
272 void
273 target_ignore (void)
274 {
275 }
276
277 void
278 target_load (char *arg, int from_tty)
279 {
280 dcache_invalidate (target_dcache);
281 (*current_target.to_load) (arg, from_tty);
282 }
283
284 static int
285 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
286 struct target_ops *t)
287 {
288 errno = EIO; /* Can't read/write this location */
289 return 0; /* No bytes handled */
290 }
291
292 static void
293 tcomplain (void)
294 {
295 error (_("You can't do that when your target is `%s'"),
296 current_target.to_shortname);
297 }
298
299 void
300 noprocess (void)
301 {
302 error (_("You can't do that without a process to debug."));
303 }
304
305 static int
306 nosymbol (char *name, CORE_ADDR *addrp)
307 {
308 return 1; /* Symbol does not exist in target env */
309 }
310
311 static void
312 nosupport_runtime (void)
313 {
314 if (ptid_equal (inferior_ptid, null_ptid))
315 noprocess ();
316 else
317 error (_("No run-time support for this"));
318 }
319
320
321 static void
322 default_terminal_info (char *args, int from_tty)
323 {
324 printf_unfiltered (_("No saved terminal information.\n"));
325 }
326
327 /* This is the default target_create_inferior and target_attach function.
328 If the current target is executing, it asks whether to kill it off.
329 If this function returns without calling error(), it has killed off
330 the target, and the operation should be attempted. */
331
332 static void
333 kill_or_be_killed (int from_tty)
334 {
335 if (target_has_execution)
336 {
337 printf_unfiltered (_("You are already running a program:\n"));
338 target_files_info ();
339 if (query ("Kill it? "))
340 {
341 target_kill ();
342 if (target_has_execution)
343 error (_("Killing the program did not help."));
344 return;
345 }
346 else
347 {
348 error (_("Program not killed."));
349 }
350 }
351 tcomplain ();
352 }
353
354 /* Go through the target stack from top to bottom, copying over zero
355 entries in current_target, then filling in still empty entries. In
356 effect, we are doing class inheritance through the pushed target
357 vectors.
358
359 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
360 is currently implemented, is that it discards any knowledge of
361 which target an inherited method originally belonged to.
362 Consequently, new new target methods should instead explicitly and
363 locally search the target stack for the target that can handle the
364 request. */
365
366 static void
367 update_current_target (void)
368 {
369 struct target_ops *t;
370
371 /* First, reset current's contents. */
372 memset (&current_target, 0, sizeof (current_target));
373
374 #define INHERIT(FIELD, TARGET) \
375 if (!current_target.FIELD) \
376 current_target.FIELD = (TARGET)->FIELD
377
378 for (t = target_stack; t; t = t->beneath)
379 {
380 INHERIT (to_shortname, t);
381 INHERIT (to_longname, t);
382 INHERIT (to_doc, t);
383 /* Do not inherit to_open. */
384 /* Do not inherit to_close. */
385 INHERIT (to_attach, t);
386 INHERIT (to_post_attach, t);
387 INHERIT (to_attach_no_wait, t);
388 INHERIT (to_detach, t);
389 /* Do not inherit to_disconnect. */
390 INHERIT (to_resume, t);
391 INHERIT (to_wait, t);
392 INHERIT (to_fetch_registers, t);
393 INHERIT (to_store_registers, t);
394 INHERIT (to_prepare_to_store, t);
395 INHERIT (deprecated_xfer_memory, t);
396 INHERIT (to_files_info, t);
397 INHERIT (to_insert_breakpoint, t);
398 INHERIT (to_remove_breakpoint, t);
399 INHERIT (to_can_use_hw_breakpoint, t);
400 INHERIT (to_insert_hw_breakpoint, t);
401 INHERIT (to_remove_hw_breakpoint, t);
402 INHERIT (to_insert_watchpoint, t);
403 INHERIT (to_remove_watchpoint, t);
404 INHERIT (to_stopped_data_address, t);
405 INHERIT (to_have_steppable_watchpoint, t);
406 INHERIT (to_have_continuable_watchpoint, t);
407 INHERIT (to_stopped_by_watchpoint, t);
408 INHERIT (to_watchpoint_addr_within_range, t);
409 INHERIT (to_region_ok_for_hw_watchpoint, t);
410 INHERIT (to_terminal_init, t);
411 INHERIT (to_terminal_inferior, t);
412 INHERIT (to_terminal_ours_for_output, t);
413 INHERIT (to_terminal_ours, t);
414 INHERIT (to_terminal_save_ours, t);
415 INHERIT (to_terminal_info, t);
416 INHERIT (to_kill, t);
417 INHERIT (to_load, t);
418 INHERIT (to_lookup_symbol, t);
419 INHERIT (to_create_inferior, t);
420 INHERIT (to_post_startup_inferior, t);
421 INHERIT (to_acknowledge_created_inferior, t);
422 INHERIT (to_insert_fork_catchpoint, t);
423 INHERIT (to_remove_fork_catchpoint, t);
424 INHERIT (to_insert_vfork_catchpoint, t);
425 INHERIT (to_remove_vfork_catchpoint, t);
426 /* Do not inherit to_follow_fork. */
427 INHERIT (to_insert_exec_catchpoint, t);
428 INHERIT (to_remove_exec_catchpoint, t);
429 INHERIT (to_has_exited, t);
430 INHERIT (to_mourn_inferior, t);
431 INHERIT (to_can_run, t);
432 INHERIT (to_notice_signals, t);
433 INHERIT (to_thread_alive, t);
434 INHERIT (to_find_new_threads, t);
435 INHERIT (to_pid_to_str, t);
436 INHERIT (to_extra_thread_info, t);
437 INHERIT (to_stop, t);
438 /* Do not inherit to_xfer_partial. */
439 INHERIT (to_rcmd, t);
440 INHERIT (to_pid_to_exec_file, t);
441 INHERIT (to_log_command, t);
442 INHERIT (to_stratum, t);
443 INHERIT (to_has_all_memory, t);
444 INHERIT (to_has_memory, t);
445 INHERIT (to_has_stack, t);
446 INHERIT (to_has_registers, t);
447 INHERIT (to_has_execution, t);
448 INHERIT (to_has_thread_control, t);
449 INHERIT (to_sections, t);
450 INHERIT (to_sections_end, t);
451 INHERIT (to_can_async_p, t);
452 INHERIT (to_is_async_p, t);
453 INHERIT (to_async, t);
454 INHERIT (to_async_mask, t);
455 INHERIT (to_find_memory_regions, t);
456 INHERIT (to_make_corefile_notes, t);
457 INHERIT (to_get_thread_local_address, t);
458 INHERIT (to_can_execute_reverse, t);
459 /* Do not inherit to_read_description. */
460 /* Do not inherit to_search_memory. */
461 INHERIT (to_magic, t);
462 /* Do not inherit to_memory_map. */
463 /* Do not inherit to_flash_erase. */
464 /* Do not inherit to_flash_done. */
465 }
466 #undef INHERIT
467
468 /* Clean up a target struct so it no longer has any zero pointers in
469 it. Some entries are defaulted to a method that print an error,
470 others are hard-wired to a standard recursive default. */
471
472 #define de_fault(field, value) \
473 if (!current_target.field) \
474 current_target.field = value
475
476 de_fault (to_open,
477 (void (*) (char *, int))
478 tcomplain);
479 de_fault (to_close,
480 (void (*) (int))
481 target_ignore);
482 de_fault (to_post_attach,
483 (void (*) (int))
484 target_ignore);
485 de_fault (to_detach,
486 (void (*) (char *, int))
487 target_ignore);
488 de_fault (to_resume,
489 (void (*) (ptid_t, int, enum target_signal))
490 noprocess);
491 de_fault (to_wait,
492 (ptid_t (*) (ptid_t, struct target_waitstatus *))
493 noprocess);
494 de_fault (to_fetch_registers,
495 (void (*) (struct regcache *, int))
496 target_ignore);
497 de_fault (to_store_registers,
498 (void (*) (struct regcache *, int))
499 noprocess);
500 de_fault (to_prepare_to_store,
501 (void (*) (struct regcache *))
502 noprocess);
503 de_fault (deprecated_xfer_memory,
504 (int (*) (CORE_ADDR, gdb_byte *, int, int, struct mem_attrib *, struct target_ops *))
505 nomemory);
506 de_fault (to_files_info,
507 (void (*) (struct target_ops *))
508 target_ignore);
509 de_fault (to_insert_breakpoint,
510 memory_insert_breakpoint);
511 de_fault (to_remove_breakpoint,
512 memory_remove_breakpoint);
513 de_fault (to_can_use_hw_breakpoint,
514 (int (*) (int, int, int))
515 return_zero);
516 de_fault (to_insert_hw_breakpoint,
517 (int (*) (struct bp_target_info *))
518 return_minus_one);
519 de_fault (to_remove_hw_breakpoint,
520 (int (*) (struct bp_target_info *))
521 return_minus_one);
522 de_fault (to_insert_watchpoint,
523 (int (*) (CORE_ADDR, int, int))
524 return_minus_one);
525 de_fault (to_remove_watchpoint,
526 (int (*) (CORE_ADDR, int, int))
527 return_minus_one);
528 de_fault (to_stopped_by_watchpoint,
529 (int (*) (void))
530 return_zero);
531 de_fault (to_stopped_data_address,
532 (int (*) (struct target_ops *, CORE_ADDR *))
533 return_zero);
534 de_fault (to_watchpoint_addr_within_range,
535 default_watchpoint_addr_within_range);
536 de_fault (to_region_ok_for_hw_watchpoint,
537 default_region_ok_for_hw_watchpoint);
538 de_fault (to_terminal_init,
539 (void (*) (void))
540 target_ignore);
541 de_fault (to_terminal_inferior,
542 (void (*) (void))
543 target_ignore);
544 de_fault (to_terminal_ours_for_output,
545 (void (*) (void))
546 target_ignore);
547 de_fault (to_terminal_ours,
548 (void (*) (void))
549 target_ignore);
550 de_fault (to_terminal_save_ours,
551 (void (*) (void))
552 target_ignore);
553 de_fault (to_terminal_info,
554 default_terminal_info);
555 de_fault (to_kill,
556 (void (*) (void))
557 noprocess);
558 de_fault (to_load,
559 (void (*) (char *, int))
560 tcomplain);
561 de_fault (to_lookup_symbol,
562 (int (*) (char *, CORE_ADDR *))
563 nosymbol);
564 de_fault (to_post_startup_inferior,
565 (void (*) (ptid_t))
566 target_ignore);
567 de_fault (to_acknowledge_created_inferior,
568 (void (*) (int))
569 target_ignore);
570 de_fault (to_insert_fork_catchpoint,
571 (void (*) (int))
572 tcomplain);
573 de_fault (to_remove_fork_catchpoint,
574 (int (*) (int))
575 tcomplain);
576 de_fault (to_insert_vfork_catchpoint,
577 (void (*) (int))
578 tcomplain);
579 de_fault (to_remove_vfork_catchpoint,
580 (int (*) (int))
581 tcomplain);
582 de_fault (to_insert_exec_catchpoint,
583 (void (*) (int))
584 tcomplain);
585 de_fault (to_remove_exec_catchpoint,
586 (int (*) (int))
587 tcomplain);
588 de_fault (to_has_exited,
589 (int (*) (int, int, int *))
590 return_zero);
591 de_fault (to_mourn_inferior,
592 (void (*) (void))
593 noprocess);
594 de_fault (to_can_run,
595 return_zero);
596 de_fault (to_notice_signals,
597 (void (*) (ptid_t))
598 target_ignore);
599 de_fault (to_thread_alive,
600 (int (*) (ptid_t))
601 return_zero);
602 de_fault (to_find_new_threads,
603 (void (*) (void))
604 target_ignore);
605 de_fault (to_extra_thread_info,
606 (char *(*) (struct thread_info *))
607 return_zero);
608 de_fault (to_stop,
609 (void (*) (ptid_t))
610 target_ignore);
611 current_target.to_xfer_partial = current_xfer_partial;
612 de_fault (to_rcmd,
613 (void (*) (char *, struct ui_file *))
614 tcomplain);
615 de_fault (to_pid_to_exec_file,
616 (char *(*) (int))
617 return_zero);
618 de_fault (to_async,
619 (void (*) (void (*) (enum inferior_event_type, void*), void*))
620 tcomplain);
621 de_fault (to_async_mask,
622 (int (*) (int))
623 return_one);
624 current_target.to_read_description = NULL;
625 #undef de_fault
626
627 /* Finally, position the target-stack beneath the squashed
628 "current_target". That way code looking for a non-inherited
629 target method can quickly and simply find it. */
630 current_target.beneath = target_stack;
631
632 if (targetdebug)
633 setup_target_debug ();
634 }
635
636 /* Mark OPS as a running target. This reverses the effect
637 of target_mark_exited. */
638
639 void
640 target_mark_running (struct target_ops *ops)
641 {
642 struct target_ops *t;
643
644 for (t = target_stack; t != NULL; t = t->beneath)
645 if (t == ops)
646 break;
647 if (t == NULL)
648 internal_error (__FILE__, __LINE__,
649 "Attempted to mark unpushed target \"%s\" as running",
650 ops->to_shortname);
651
652 ops->to_has_execution = 1;
653 ops->to_has_all_memory = 1;
654 ops->to_has_memory = 1;
655 ops->to_has_stack = 1;
656 ops->to_has_registers = 1;
657
658 update_current_target ();
659 }
660
661 /* Mark OPS as a non-running target. This reverses the effect
662 of target_mark_running. */
663
664 void
665 target_mark_exited (struct target_ops *ops)
666 {
667 struct target_ops *t;
668
669 for (t = target_stack; t != NULL; t = t->beneath)
670 if (t == ops)
671 break;
672 if (t == NULL)
673 internal_error (__FILE__, __LINE__,
674 "Attempted to mark unpushed target \"%s\" as running",
675 ops->to_shortname);
676
677 ops->to_has_execution = 0;
678 ops->to_has_all_memory = 0;
679 ops->to_has_memory = 0;
680 ops->to_has_stack = 0;
681 ops->to_has_registers = 0;
682
683 update_current_target ();
684 }
685
686 /* Push a new target type into the stack of the existing target accessors,
687 possibly superseding some of the existing accessors.
688
689 Result is zero if the pushed target ended up on top of the stack,
690 nonzero if at least one target is on top of it.
691
692 Rather than allow an empty stack, we always have the dummy target at
693 the bottom stratum, so we can call the function vectors without
694 checking them. */
695
696 int
697 push_target (struct target_ops *t)
698 {
699 struct target_ops **cur;
700
701 /* Check magic number. If wrong, it probably means someone changed
702 the struct definition, but not all the places that initialize one. */
703 if (t->to_magic != OPS_MAGIC)
704 {
705 fprintf_unfiltered (gdb_stderr,
706 "Magic number of %s target struct wrong\n",
707 t->to_shortname);
708 internal_error (__FILE__, __LINE__, _("failed internal consistency check"));
709 }
710
711 /* Find the proper stratum to install this target in. */
712 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
713 {
714 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
715 break;
716 }
717
718 /* If there's already targets at this stratum, remove them. */
719 /* FIXME: cagney/2003-10-15: I think this should be popping all
720 targets to CUR, and not just those at this stratum level. */
721 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
722 {
723 /* There's already something at this stratum level. Close it,
724 and un-hook it from the stack. */
725 struct target_ops *tmp = (*cur);
726 (*cur) = (*cur)->beneath;
727 tmp->beneath = NULL;
728 target_close (tmp, 0);
729 }
730
731 /* We have removed all targets in our stratum, now add the new one. */
732 t->beneath = (*cur);
733 (*cur) = t;
734
735 update_current_target ();
736
737 /* Not on top? */
738 return (t != target_stack);
739 }
740
741 /* Remove a target_ops vector from the stack, wherever it may be.
742 Return how many times it was removed (0 or 1). */
743
744 int
745 unpush_target (struct target_ops *t)
746 {
747 struct target_ops **cur;
748 struct target_ops *tmp;
749
750 if (t->to_stratum == dummy_stratum)
751 internal_error (__FILE__, __LINE__,
752 "Attempt to unpush the dummy target");
753
754 /* Look for the specified target. Note that we assume that a target
755 can only occur once in the target stack. */
756
757 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
758 {
759 if ((*cur) == t)
760 break;
761 }
762
763 if ((*cur) == NULL)
764 return 0; /* Didn't find target_ops, quit now */
765
766 /* NOTE: cagney/2003-12-06: In '94 the close call was made
767 unconditional by moving it to before the above check that the
768 target was in the target stack (something about "Change the way
769 pushing and popping of targets work to support target overlays
770 and inheritance"). This doesn't make much sense - only open
771 targets should be closed. */
772 target_close (t, 0);
773
774 /* Unchain the target */
775 tmp = (*cur);
776 (*cur) = (*cur)->beneath;
777 tmp->beneath = NULL;
778
779 update_current_target ();
780
781 return 1;
782 }
783
784 void
785 pop_target (void)
786 {
787 target_close (target_stack, 0); /* Let it clean up */
788 if (unpush_target (target_stack) == 1)
789 return;
790
791 fprintf_unfiltered (gdb_stderr,
792 "pop_target couldn't find target %s\n",
793 current_target.to_shortname);
794 internal_error (__FILE__, __LINE__, _("failed internal consistency check"));
795 }
796
797 void
798 pop_all_targets_above (enum strata above_stratum, int quitting)
799 {
800 while ((int) (current_target.to_stratum) > (int) above_stratum)
801 {
802 target_close (target_stack, quitting);
803 if (!unpush_target (target_stack))
804 {
805 fprintf_unfiltered (gdb_stderr,
806 "pop_all_targets couldn't find target %s\n",
807 target_stack->to_shortname);
808 internal_error (__FILE__, __LINE__,
809 _("failed internal consistency check"));
810 break;
811 }
812 }
813 }
814
815 void
816 pop_all_targets (int quitting)
817 {
818 pop_all_targets_above (dummy_stratum, quitting);
819 }
820
821 /* Using the objfile specified in OBJFILE, find the address for the
822 current thread's thread-local storage with offset OFFSET. */
823 CORE_ADDR
824 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
825 {
826 volatile CORE_ADDR addr = 0;
827
828 if (target_get_thread_local_address_p ()
829 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch))
830 {
831 ptid_t ptid = inferior_ptid;
832 volatile struct gdb_exception ex;
833
834 TRY_CATCH (ex, RETURN_MASK_ALL)
835 {
836 CORE_ADDR lm_addr;
837
838 /* Fetch the load module address for this objfile. */
839 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch,
840 objfile);
841 /* If it's 0, throw the appropriate exception. */
842 if (lm_addr == 0)
843 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
844 _("TLS load module not found"));
845
846 addr = target_get_thread_local_address (ptid, lm_addr, offset);
847 }
848 /* If an error occurred, print TLS related messages here. Otherwise,
849 throw the error to some higher catcher. */
850 if (ex.reason < 0)
851 {
852 int objfile_is_library = (objfile->flags & OBJF_SHARED);
853
854 switch (ex.error)
855 {
856 case TLS_NO_LIBRARY_SUPPORT_ERROR:
857 error (_("Cannot find thread-local variables in this thread library."));
858 break;
859 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
860 if (objfile_is_library)
861 error (_("Cannot find shared library `%s' in dynamic"
862 " linker's load module list"), objfile->name);
863 else
864 error (_("Cannot find executable file `%s' in dynamic"
865 " linker's load module list"), objfile->name);
866 break;
867 case TLS_NOT_ALLOCATED_YET_ERROR:
868 if (objfile_is_library)
869 error (_("The inferior has not yet allocated storage for"
870 " thread-local variables in\n"
871 "the shared library `%s'\n"
872 "for %s"),
873 objfile->name, target_pid_to_str (ptid));
874 else
875 error (_("The inferior has not yet allocated storage for"
876 " thread-local variables in\n"
877 "the executable `%s'\n"
878 "for %s"),
879 objfile->name, target_pid_to_str (ptid));
880 break;
881 case TLS_GENERIC_ERROR:
882 if (objfile_is_library)
883 error (_("Cannot find thread-local storage for %s, "
884 "shared library %s:\n%s"),
885 target_pid_to_str (ptid),
886 objfile->name, ex.message);
887 else
888 error (_("Cannot find thread-local storage for %s, "
889 "executable file %s:\n%s"),
890 target_pid_to_str (ptid),
891 objfile->name, ex.message);
892 break;
893 default:
894 throw_exception (ex);
895 break;
896 }
897 }
898 }
899 /* It wouldn't be wrong here to try a gdbarch method, too; finding
900 TLS is an ABI-specific thing. But we don't do that yet. */
901 else
902 error (_("Cannot find thread-local variables on this target"));
903
904 return addr;
905 }
906
907 #undef MIN
908 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
909
910 /* target_read_string -- read a null terminated string, up to LEN bytes,
911 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
912 Set *STRING to a pointer to malloc'd memory containing the data; the caller
913 is responsible for freeing it. Return the number of bytes successfully
914 read. */
915
916 int
917 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
918 {
919 int tlen, origlen, offset, i;
920 gdb_byte buf[4];
921 int errcode = 0;
922 char *buffer;
923 int buffer_allocated;
924 char *bufptr;
925 unsigned int nbytes_read = 0;
926
927 gdb_assert (string);
928
929 /* Small for testing. */
930 buffer_allocated = 4;
931 buffer = xmalloc (buffer_allocated);
932 bufptr = buffer;
933
934 origlen = len;
935
936 while (len > 0)
937 {
938 tlen = MIN (len, 4 - (memaddr & 3));
939 offset = memaddr & 3;
940
941 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
942 if (errcode != 0)
943 {
944 /* The transfer request might have crossed the boundary to an
945 unallocated region of memory. Retry the transfer, requesting
946 a single byte. */
947 tlen = 1;
948 offset = 0;
949 errcode = target_read_memory (memaddr, buf, 1);
950 if (errcode != 0)
951 goto done;
952 }
953
954 if (bufptr - buffer + tlen > buffer_allocated)
955 {
956 unsigned int bytes;
957 bytes = bufptr - buffer;
958 buffer_allocated *= 2;
959 buffer = xrealloc (buffer, buffer_allocated);
960 bufptr = buffer + bytes;
961 }
962
963 for (i = 0; i < tlen; i++)
964 {
965 *bufptr++ = buf[i + offset];
966 if (buf[i + offset] == '\000')
967 {
968 nbytes_read += i + 1;
969 goto done;
970 }
971 }
972
973 memaddr += tlen;
974 len -= tlen;
975 nbytes_read += tlen;
976 }
977 done:
978 *string = buffer;
979 if (errnop != NULL)
980 *errnop = errcode;
981 return nbytes_read;
982 }
983
984 /* Find a section containing ADDR. */
985 struct section_table *
986 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
987 {
988 struct section_table *secp;
989 for (secp = target->to_sections;
990 secp < target->to_sections_end;
991 secp++)
992 {
993 if (addr >= secp->addr && addr < secp->endaddr)
994 return secp;
995 }
996 return NULL;
997 }
998
999 /* Perform a partial memory transfer. The arguments and return
1000 value are just as for target_xfer_partial. */
1001
1002 static LONGEST
1003 memory_xfer_partial (struct target_ops *ops, void *readbuf, const void *writebuf,
1004 ULONGEST memaddr, LONGEST len)
1005 {
1006 LONGEST res;
1007 int reg_len;
1008 struct mem_region *region;
1009
1010 /* Zero length requests are ok and require no work. */
1011 if (len == 0)
1012 return 0;
1013
1014 /* Try the executable file, if "trust-readonly-sections" is set. */
1015 if (readbuf != NULL && trust_readonly)
1016 {
1017 struct section_table *secp;
1018
1019 secp = target_section_by_addr (ops, memaddr);
1020 if (secp != NULL
1021 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1022 & SEC_READONLY))
1023 return xfer_memory (memaddr, readbuf, len, 0, NULL, ops);
1024 }
1025
1026 /* Likewise for accesses to unmapped overlay sections. */
1027 if (readbuf != NULL && overlay_debugging)
1028 {
1029 struct obj_section *section = find_pc_overlay (memaddr);
1030 if (pc_in_unmapped_range (memaddr, section))
1031 return xfer_memory (memaddr, readbuf, len, 0, NULL, ops);
1032 }
1033
1034 /* Try GDB's internal data cache. */
1035 region = lookup_mem_region (memaddr);
1036 /* region->hi == 0 means there's no upper bound. */
1037 if (memaddr + len < region->hi || region->hi == 0)
1038 reg_len = len;
1039 else
1040 reg_len = region->hi - memaddr;
1041
1042 switch (region->attrib.mode)
1043 {
1044 case MEM_RO:
1045 if (writebuf != NULL)
1046 return -1;
1047 break;
1048
1049 case MEM_WO:
1050 if (readbuf != NULL)
1051 return -1;
1052 break;
1053
1054 case MEM_FLASH:
1055 /* We only support writing to flash during "load" for now. */
1056 if (writebuf != NULL)
1057 error (_("Writing to flash memory forbidden in this context"));
1058 break;
1059
1060 case MEM_NONE:
1061 return -1;
1062 }
1063
1064 if (region->attrib.cache)
1065 {
1066 /* FIXME drow/2006-08-09: This call discards OPS, so the raw
1067 memory request will start back at current_target. */
1068 if (readbuf != NULL)
1069 res = dcache_xfer_memory (target_dcache, memaddr, readbuf,
1070 reg_len, 0);
1071 else
1072 /* FIXME drow/2006-08-09: If we're going to preserve const
1073 correctness dcache_xfer_memory should take readbuf and
1074 writebuf. */
1075 res = dcache_xfer_memory (target_dcache, memaddr,
1076 (void *) writebuf,
1077 reg_len, 1);
1078 if (res <= 0)
1079 return -1;
1080 else
1081 {
1082 if (readbuf && !show_memory_breakpoints)
1083 breakpoint_restore_shadows (readbuf, memaddr, reg_len);
1084 return res;
1085 }
1086 }
1087
1088 /* If none of those methods found the memory we wanted, fall back
1089 to a target partial transfer. Normally a single call to
1090 to_xfer_partial is enough; if it doesn't recognize an object
1091 it will call the to_xfer_partial of the next target down.
1092 But for memory this won't do. Memory is the only target
1093 object which can be read from more than one valid target.
1094 A core file, for instance, could have some of memory but
1095 delegate other bits to the target below it. So, we must
1096 manually try all targets. */
1097
1098 do
1099 {
1100 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1101 readbuf, writebuf, memaddr, reg_len);
1102 if (res > 0)
1103 break;
1104
1105 /* We want to continue past core files to executables, but not
1106 past a running target's memory. */
1107 if (ops->to_has_all_memory)
1108 break;
1109
1110 ops = ops->beneath;
1111 }
1112 while (ops != NULL);
1113
1114 if (readbuf && !show_memory_breakpoints)
1115 breakpoint_restore_shadows (readbuf, memaddr, reg_len);
1116
1117 /* If we still haven't got anything, return the last error. We
1118 give up. */
1119 return res;
1120 }
1121
1122 static void
1123 restore_show_memory_breakpoints (void *arg)
1124 {
1125 show_memory_breakpoints = (uintptr_t) arg;
1126 }
1127
1128 struct cleanup *
1129 make_show_memory_breakpoints_cleanup (int show)
1130 {
1131 int current = show_memory_breakpoints;
1132 show_memory_breakpoints = show;
1133
1134 return make_cleanup (restore_show_memory_breakpoints,
1135 (void *) (uintptr_t) current);
1136 }
1137
1138 static LONGEST
1139 target_xfer_partial (struct target_ops *ops,
1140 enum target_object object, const char *annex,
1141 void *readbuf, const void *writebuf,
1142 ULONGEST offset, LONGEST len)
1143 {
1144 LONGEST retval;
1145
1146 gdb_assert (ops->to_xfer_partial != NULL);
1147
1148 /* If this is a memory transfer, let the memory-specific code
1149 have a look at it instead. Memory transfers are more
1150 complicated. */
1151 if (object == TARGET_OBJECT_MEMORY)
1152 retval = memory_xfer_partial (ops, readbuf, writebuf, offset, len);
1153 else
1154 {
1155 enum target_object raw_object = object;
1156
1157 /* If this is a raw memory transfer, request the normal
1158 memory object from other layers. */
1159 if (raw_object == TARGET_OBJECT_RAW_MEMORY)
1160 raw_object = TARGET_OBJECT_MEMORY;
1161
1162 retval = ops->to_xfer_partial (ops, raw_object, annex, readbuf,
1163 writebuf, offset, len);
1164 }
1165
1166 if (targetdebug)
1167 {
1168 const unsigned char *myaddr = NULL;
1169
1170 fprintf_unfiltered (gdb_stdlog,
1171 "%s:target_xfer_partial (%d, %s, 0x%lx, 0x%lx, %s, %s) = %s",
1172 ops->to_shortname,
1173 (int) object,
1174 (annex ? annex : "(null)"),
1175 (long) readbuf, (long) writebuf,
1176 core_addr_to_string_nz (offset),
1177 plongest (len), plongest (retval));
1178
1179 if (readbuf)
1180 myaddr = readbuf;
1181 if (writebuf)
1182 myaddr = writebuf;
1183 if (retval > 0 && myaddr != NULL)
1184 {
1185 int i;
1186
1187 fputs_unfiltered (", bytes =", gdb_stdlog);
1188 for (i = 0; i < retval; i++)
1189 {
1190 if ((((long) &(myaddr[i])) & 0xf) == 0)
1191 {
1192 if (targetdebug < 2 && i > 0)
1193 {
1194 fprintf_unfiltered (gdb_stdlog, " ...");
1195 break;
1196 }
1197 fprintf_unfiltered (gdb_stdlog, "\n");
1198 }
1199
1200 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1201 }
1202 }
1203
1204 fputc_unfiltered ('\n', gdb_stdlog);
1205 }
1206 return retval;
1207 }
1208
1209 /* Read LEN bytes of target memory at address MEMADDR, placing the results in
1210 GDB's memory at MYADDR. Returns either 0 for success or an errno value
1211 if any error occurs.
1212
1213 If an error occurs, no guarantee is made about the contents of the data at
1214 MYADDR. In particular, the caller should not depend upon partial reads
1215 filling the buffer with good data. There is no way for the caller to know
1216 how much good data might have been transfered anyway. Callers that can
1217 deal with partial reads should call target_read (which will retry until
1218 it makes no progress, and then return how much was transferred). */
1219
1220 int
1221 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1222 {
1223 if (target_read (&current_target, TARGET_OBJECT_MEMORY, NULL,
1224 myaddr, memaddr, len) == len)
1225 return 0;
1226 else
1227 return EIO;
1228 }
1229
1230 int
1231 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1232 {
1233 if (target_write (&current_target, TARGET_OBJECT_MEMORY, NULL,
1234 myaddr, memaddr, len) == len)
1235 return 0;
1236 else
1237 return EIO;
1238 }
1239
1240 /* Fetch the target's memory map. */
1241
1242 VEC(mem_region_s) *
1243 target_memory_map (void)
1244 {
1245 VEC(mem_region_s) *result;
1246 struct mem_region *last_one, *this_one;
1247 int ix;
1248 struct target_ops *t;
1249
1250 if (targetdebug)
1251 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1252
1253 for (t = current_target.beneath; t != NULL; t = t->beneath)
1254 if (t->to_memory_map != NULL)
1255 break;
1256
1257 if (t == NULL)
1258 return NULL;
1259
1260 result = t->to_memory_map (t);
1261 if (result == NULL)
1262 return NULL;
1263
1264 qsort (VEC_address (mem_region_s, result),
1265 VEC_length (mem_region_s, result),
1266 sizeof (struct mem_region), mem_region_cmp);
1267
1268 /* Check that regions do not overlap. Simultaneously assign
1269 a numbering for the "mem" commands to use to refer to
1270 each region. */
1271 last_one = NULL;
1272 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1273 {
1274 this_one->number = ix;
1275
1276 if (last_one && last_one->hi > this_one->lo)
1277 {
1278 warning (_("Overlapping regions in memory map: ignoring"));
1279 VEC_free (mem_region_s, result);
1280 return NULL;
1281 }
1282 last_one = this_one;
1283 }
1284
1285 return result;
1286 }
1287
1288 void
1289 target_flash_erase (ULONGEST address, LONGEST length)
1290 {
1291 struct target_ops *t;
1292
1293 for (t = current_target.beneath; t != NULL; t = t->beneath)
1294 if (t->to_flash_erase != NULL)
1295 {
1296 if (targetdebug)
1297 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1298 paddr (address), phex (length, 0));
1299 t->to_flash_erase (t, address, length);
1300 return;
1301 }
1302
1303 tcomplain ();
1304 }
1305
1306 void
1307 target_flash_done (void)
1308 {
1309 struct target_ops *t;
1310
1311 for (t = current_target.beneath; t != NULL; t = t->beneath)
1312 if (t->to_flash_done != NULL)
1313 {
1314 if (targetdebug)
1315 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1316 t->to_flash_done (t);
1317 return;
1318 }
1319
1320 tcomplain ();
1321 }
1322
1323 #ifndef target_stopped_data_address_p
1324 int
1325 target_stopped_data_address_p (struct target_ops *target)
1326 {
1327 if (target->to_stopped_data_address
1328 == (int (*) (struct target_ops *, CORE_ADDR *)) return_zero)
1329 return 0;
1330 if (target->to_stopped_data_address == debug_to_stopped_data_address
1331 && (debug_target.to_stopped_data_address
1332 == (int (*) (struct target_ops *, CORE_ADDR *)) return_zero))
1333 return 0;
1334 return 1;
1335 }
1336 #endif
1337
1338 static void
1339 show_trust_readonly (struct ui_file *file, int from_tty,
1340 struct cmd_list_element *c, const char *value)
1341 {
1342 fprintf_filtered (file, _("\
1343 Mode for reading from readonly sections is %s.\n"),
1344 value);
1345 }
1346
1347 /* More generic transfers. */
1348
1349 static LONGEST
1350 default_xfer_partial (struct target_ops *ops, enum target_object object,
1351 const char *annex, gdb_byte *readbuf,
1352 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1353 {
1354 if (object == TARGET_OBJECT_MEMORY
1355 && ops->deprecated_xfer_memory != NULL)
1356 /* If available, fall back to the target's
1357 "deprecated_xfer_memory" method. */
1358 {
1359 int xfered = -1;
1360 errno = 0;
1361 if (writebuf != NULL)
1362 {
1363 void *buffer = xmalloc (len);
1364 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1365 memcpy (buffer, writebuf, len);
1366 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1367 1/*write*/, NULL, ops);
1368 do_cleanups (cleanup);
1369 }
1370 if (readbuf != NULL)
1371 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1372 0/*read*/, NULL, ops);
1373 if (xfered > 0)
1374 return xfered;
1375 else if (xfered == 0 && errno == 0)
1376 /* "deprecated_xfer_memory" uses 0, cross checked against
1377 ERRNO as one indication of an error. */
1378 return 0;
1379 else
1380 return -1;
1381 }
1382 else if (ops->beneath != NULL)
1383 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1384 readbuf, writebuf, offset, len);
1385 else
1386 return -1;
1387 }
1388
1389 /* The xfer_partial handler for the topmost target. Unlike the default,
1390 it does not need to handle memory specially; it just passes all
1391 requests down the stack. */
1392
1393 static LONGEST
1394 current_xfer_partial (struct target_ops *ops, enum target_object object,
1395 const char *annex, gdb_byte *readbuf,
1396 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1397 {
1398 if (ops->beneath != NULL)
1399 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1400 readbuf, writebuf, offset, len);
1401 else
1402 return -1;
1403 }
1404
1405 /* Target vector read/write partial wrapper functions.
1406
1407 NOTE: cagney/2003-10-21: I wonder if having "to_xfer_partial
1408 (inbuf, outbuf)", instead of separate read/write methods, make life
1409 easier. */
1410
1411 static LONGEST
1412 target_read_partial (struct target_ops *ops,
1413 enum target_object object,
1414 const char *annex, gdb_byte *buf,
1415 ULONGEST offset, LONGEST len)
1416 {
1417 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
1418 }
1419
1420 static LONGEST
1421 target_write_partial (struct target_ops *ops,
1422 enum target_object object,
1423 const char *annex, const gdb_byte *buf,
1424 ULONGEST offset, LONGEST len)
1425 {
1426 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
1427 }
1428
1429 /* Wrappers to perform the full transfer. */
1430 LONGEST
1431 target_read (struct target_ops *ops,
1432 enum target_object object,
1433 const char *annex, gdb_byte *buf,
1434 ULONGEST offset, LONGEST len)
1435 {
1436 LONGEST xfered = 0;
1437 while (xfered < len)
1438 {
1439 LONGEST xfer = target_read_partial (ops, object, annex,
1440 (gdb_byte *) buf + xfered,
1441 offset + xfered, len - xfered);
1442 /* Call an observer, notifying them of the xfer progress? */
1443 if (xfer == 0)
1444 return xfered;
1445 if (xfer < 0)
1446 return -1;
1447 xfered += xfer;
1448 QUIT;
1449 }
1450 return len;
1451 }
1452
1453 LONGEST
1454 target_read_until_error (struct target_ops *ops,
1455 enum target_object object,
1456 const char *annex, gdb_byte *buf,
1457 ULONGEST offset, LONGEST len)
1458 {
1459 LONGEST xfered = 0;
1460 while (xfered < len)
1461 {
1462 LONGEST xfer = target_read_partial (ops, object, annex,
1463 (gdb_byte *) buf + xfered,
1464 offset + xfered, len - xfered);
1465 /* Call an observer, notifying them of the xfer progress? */
1466 if (xfer == 0)
1467 return xfered;
1468 if (xfer < 0)
1469 {
1470 /* We've got an error. Try to read in smaller blocks. */
1471 ULONGEST start = offset + xfered;
1472 ULONGEST remaining = len - xfered;
1473 ULONGEST half;
1474
1475 /* If an attempt was made to read a random memory address,
1476 it's likely that the very first byte is not accessible.
1477 Try reading the first byte, to avoid doing log N tries
1478 below. */
1479 xfer = target_read_partial (ops, object, annex,
1480 (gdb_byte *) buf + xfered, start, 1);
1481 if (xfer <= 0)
1482 return xfered;
1483 start += 1;
1484 remaining -= 1;
1485 half = remaining/2;
1486
1487 while (half > 0)
1488 {
1489 xfer = target_read_partial (ops, object, annex,
1490 (gdb_byte *) buf + xfered,
1491 start, half);
1492 if (xfer == 0)
1493 return xfered;
1494 if (xfer < 0)
1495 {
1496 remaining = half;
1497 }
1498 else
1499 {
1500 /* We have successfully read the first half. So, the
1501 error must be in the second half. Adjust start and
1502 remaining to point at the second half. */
1503 xfered += xfer;
1504 start += xfer;
1505 remaining -= xfer;
1506 }
1507 half = remaining/2;
1508 }
1509
1510 return xfered;
1511 }
1512 xfered += xfer;
1513 QUIT;
1514 }
1515 return len;
1516 }
1517
1518
1519 /* An alternative to target_write with progress callbacks. */
1520
1521 LONGEST
1522 target_write_with_progress (struct target_ops *ops,
1523 enum target_object object,
1524 const char *annex, const gdb_byte *buf,
1525 ULONGEST offset, LONGEST len,
1526 void (*progress) (ULONGEST, void *), void *baton)
1527 {
1528 LONGEST xfered = 0;
1529
1530 /* Give the progress callback a chance to set up. */
1531 if (progress)
1532 (*progress) (0, baton);
1533
1534 while (xfered < len)
1535 {
1536 LONGEST xfer = target_write_partial (ops, object, annex,
1537 (gdb_byte *) buf + xfered,
1538 offset + xfered, len - xfered);
1539
1540 if (xfer == 0)
1541 return xfered;
1542 if (xfer < 0)
1543 return -1;
1544
1545 if (progress)
1546 (*progress) (xfer, baton);
1547
1548 xfered += xfer;
1549 QUIT;
1550 }
1551 return len;
1552 }
1553
1554 LONGEST
1555 target_write (struct target_ops *ops,
1556 enum target_object object,
1557 const char *annex, const gdb_byte *buf,
1558 ULONGEST offset, LONGEST len)
1559 {
1560 return target_write_with_progress (ops, object, annex, buf, offset, len,
1561 NULL, NULL);
1562 }
1563
1564 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
1565 the size of the transferred data. PADDING additional bytes are
1566 available in *BUF_P. This is a helper function for
1567 target_read_alloc; see the declaration of that function for more
1568 information. */
1569
1570 static LONGEST
1571 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
1572 const char *annex, gdb_byte **buf_p, int padding)
1573 {
1574 size_t buf_alloc, buf_pos;
1575 gdb_byte *buf;
1576 LONGEST n;
1577
1578 /* This function does not have a length parameter; it reads the
1579 entire OBJECT). Also, it doesn't support objects fetched partly
1580 from one target and partly from another (in a different stratum,
1581 e.g. a core file and an executable). Both reasons make it
1582 unsuitable for reading memory. */
1583 gdb_assert (object != TARGET_OBJECT_MEMORY);
1584
1585 /* Start by reading up to 4K at a time. The target will throttle
1586 this number down if necessary. */
1587 buf_alloc = 4096;
1588 buf = xmalloc (buf_alloc);
1589 buf_pos = 0;
1590 while (1)
1591 {
1592 n = target_read_partial (ops, object, annex, &buf[buf_pos],
1593 buf_pos, buf_alloc - buf_pos - padding);
1594 if (n < 0)
1595 {
1596 /* An error occurred. */
1597 xfree (buf);
1598 return -1;
1599 }
1600 else if (n == 0)
1601 {
1602 /* Read all there was. */
1603 if (buf_pos == 0)
1604 xfree (buf);
1605 else
1606 *buf_p = buf;
1607 return buf_pos;
1608 }
1609
1610 buf_pos += n;
1611
1612 /* If the buffer is filling up, expand it. */
1613 if (buf_alloc < buf_pos * 2)
1614 {
1615 buf_alloc *= 2;
1616 buf = xrealloc (buf, buf_alloc);
1617 }
1618
1619 QUIT;
1620 }
1621 }
1622
1623 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
1624 the size of the transferred data. See the declaration in "target.h"
1625 function for more information about the return value. */
1626
1627 LONGEST
1628 target_read_alloc (struct target_ops *ops, enum target_object object,
1629 const char *annex, gdb_byte **buf_p)
1630 {
1631 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
1632 }
1633
1634 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
1635 returned as a string, allocated using xmalloc. If an error occurs
1636 or the transfer is unsupported, NULL is returned. Empty objects
1637 are returned as allocated but empty strings. A warning is issued
1638 if the result contains any embedded NUL bytes. */
1639
1640 char *
1641 target_read_stralloc (struct target_ops *ops, enum target_object object,
1642 const char *annex)
1643 {
1644 gdb_byte *buffer;
1645 LONGEST transferred;
1646
1647 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
1648
1649 if (transferred < 0)
1650 return NULL;
1651
1652 if (transferred == 0)
1653 return xstrdup ("");
1654
1655 buffer[transferred] = 0;
1656 if (strlen (buffer) < transferred)
1657 warning (_("target object %d, annex %s, "
1658 "contained unexpected null characters"),
1659 (int) object, annex ? annex : "(none)");
1660
1661 return (char *) buffer;
1662 }
1663
1664 /* Memory transfer methods. */
1665
1666 void
1667 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
1668 LONGEST len)
1669 {
1670 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL, buf, addr, len)
1671 != len)
1672 memory_error (EIO, addr);
1673 }
1674
1675 ULONGEST
1676 get_target_memory_unsigned (struct target_ops *ops,
1677 CORE_ADDR addr, int len)
1678 {
1679 gdb_byte buf[sizeof (ULONGEST)];
1680
1681 gdb_assert (len <= sizeof (buf));
1682 get_target_memory (ops, addr, buf, len);
1683 return extract_unsigned_integer (buf, len);
1684 }
1685
1686 static void
1687 target_info (char *args, int from_tty)
1688 {
1689 struct target_ops *t;
1690 int has_all_mem = 0;
1691
1692 if (symfile_objfile != NULL)
1693 printf_unfiltered (_("Symbols from \"%s\".\n"), symfile_objfile->name);
1694
1695 for (t = target_stack; t != NULL; t = t->beneath)
1696 {
1697 if (!t->to_has_memory)
1698 continue;
1699
1700 if ((int) (t->to_stratum) <= (int) dummy_stratum)
1701 continue;
1702 if (has_all_mem)
1703 printf_unfiltered (_("\tWhile running this, GDB does not access memory from...\n"));
1704 printf_unfiltered ("%s:\n", t->to_longname);
1705 (t->to_files_info) (t);
1706 has_all_mem = t->to_has_all_memory;
1707 }
1708 }
1709
1710 /* This function is called before any new inferior is created, e.g.
1711 by running a program, attaching, or connecting to a target.
1712 It cleans up any state from previous invocations which might
1713 change between runs. This is a subset of what target_preopen
1714 resets (things which might change between targets). */
1715
1716 void
1717 target_pre_inferior (int from_tty)
1718 {
1719 /* Clear out solib state. Otherwise the solib state of the previous
1720 inferior might have survived and is entirely wrong for the new
1721 target. This has been observed on GNU/Linux using glibc 2.3. How
1722 to reproduce:
1723
1724 bash$ ./foo&
1725 [1] 4711
1726 bash$ ./foo&
1727 [1] 4712
1728 bash$ gdb ./foo
1729 [...]
1730 (gdb) attach 4711
1731 (gdb) detach
1732 (gdb) attach 4712
1733 Cannot access memory at address 0xdeadbeef
1734 */
1735 no_shared_libraries (NULL, from_tty);
1736
1737 invalidate_target_mem_regions ();
1738
1739 target_clear_description ();
1740 }
1741
1742 /* This is to be called by the open routine before it does
1743 anything. */
1744
1745 void
1746 target_preopen (int from_tty)
1747 {
1748 dont_repeat ();
1749
1750 if (target_has_execution)
1751 {
1752 if (!from_tty
1753 || query (_("A program is being debugged already. Kill it? ")))
1754 target_kill ();
1755 else
1756 error (_("Program not killed."));
1757 }
1758
1759 /* Calling target_kill may remove the target from the stack. But if
1760 it doesn't (which seems like a win for UDI), remove it now. */
1761 /* Leave the exec target, though. The user may be switching from a
1762 live process to a core of the same program. */
1763 pop_all_targets_above (file_stratum, 0);
1764
1765 target_pre_inferior (from_tty);
1766 }
1767
1768 /* Detach a target after doing deferred register stores. */
1769
1770 void
1771 target_detach (char *args, int from_tty)
1772 {
1773 /* If we're in breakpoints-always-inserted mode, have to
1774 remove them before detaching. */
1775 remove_breakpoints ();
1776
1777 (current_target.to_detach) (args, from_tty);
1778 }
1779
1780 void
1781 target_disconnect (char *args, int from_tty)
1782 {
1783 struct target_ops *t;
1784
1785 /* If we're in breakpoints-always-inserted mode, have to
1786 remove them before disconnecting. */
1787 remove_breakpoints ();
1788
1789 for (t = current_target.beneath; t != NULL; t = t->beneath)
1790 if (t->to_disconnect != NULL)
1791 {
1792 if (targetdebug)
1793 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
1794 args, from_tty);
1795 t->to_disconnect (t, args, from_tty);
1796 return;
1797 }
1798
1799 tcomplain ();
1800 }
1801
1802 void
1803 target_resume (ptid_t ptid, int step, enum target_signal signal)
1804 {
1805 dcache_invalidate (target_dcache);
1806 (*current_target.to_resume) (ptid, step, signal);
1807 set_executing (ptid, 1);
1808 set_running (ptid, 1);
1809 }
1810 /* Look through the list of possible targets for a target that can
1811 follow forks. */
1812
1813 int
1814 target_follow_fork (int follow_child)
1815 {
1816 struct target_ops *t;
1817
1818 for (t = current_target.beneath; t != NULL; t = t->beneath)
1819 {
1820 if (t->to_follow_fork != NULL)
1821 {
1822 int retval = t->to_follow_fork (t, follow_child);
1823 if (targetdebug)
1824 fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
1825 follow_child, retval);
1826 return retval;
1827 }
1828 }
1829
1830 /* Some target returned a fork event, but did not know how to follow it. */
1831 internal_error (__FILE__, __LINE__,
1832 "could not find a target to follow fork");
1833 }
1834
1835 /* Look for a target which can describe architectural features, starting
1836 from TARGET. If we find one, return its description. */
1837
1838 const struct target_desc *
1839 target_read_description (struct target_ops *target)
1840 {
1841 struct target_ops *t;
1842
1843 for (t = target; t != NULL; t = t->beneath)
1844 if (t->to_read_description != NULL)
1845 {
1846 const struct target_desc *tdesc;
1847
1848 tdesc = t->to_read_description (t);
1849 if (tdesc)
1850 return tdesc;
1851 }
1852
1853 return NULL;
1854 }
1855
1856 /* The default implementation of to_search_memory.
1857 This implements a basic search of memory, reading target memory and
1858 performing the search here (as opposed to performing the search in on the
1859 target side with, for example, gdbserver). */
1860
1861 int
1862 simple_search_memory (struct target_ops *ops,
1863 CORE_ADDR start_addr, ULONGEST search_space_len,
1864 const gdb_byte *pattern, ULONGEST pattern_len,
1865 CORE_ADDR *found_addrp)
1866 {
1867 /* NOTE: also defined in find.c testcase. */
1868 #define SEARCH_CHUNK_SIZE 16000
1869 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
1870 /* Buffer to hold memory contents for searching. */
1871 gdb_byte *search_buf;
1872 unsigned search_buf_size;
1873 struct cleanup *old_cleanups;
1874
1875 search_buf_size = chunk_size + pattern_len - 1;
1876
1877 /* No point in trying to allocate a buffer larger than the search space. */
1878 if (search_space_len < search_buf_size)
1879 search_buf_size = search_space_len;
1880
1881 search_buf = malloc (search_buf_size);
1882 if (search_buf == NULL)
1883 error (_("Unable to allocate memory to perform the search."));
1884 old_cleanups = make_cleanup (free_current_contents, &search_buf);
1885
1886 /* Prime the search buffer. */
1887
1888 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1889 search_buf, start_addr, search_buf_size) != search_buf_size)
1890 {
1891 warning (_("Unable to access target memory at %s, halting search."),
1892 hex_string (start_addr));
1893 do_cleanups (old_cleanups);
1894 return -1;
1895 }
1896
1897 /* Perform the search.
1898
1899 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
1900 When we've scanned N bytes we copy the trailing bytes to the start and
1901 read in another N bytes. */
1902
1903 while (search_space_len >= pattern_len)
1904 {
1905 gdb_byte *found_ptr;
1906 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
1907
1908 found_ptr = memmem (search_buf, nr_search_bytes,
1909 pattern, pattern_len);
1910
1911 if (found_ptr != NULL)
1912 {
1913 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
1914 *found_addrp = found_addr;
1915 do_cleanups (old_cleanups);
1916 return 1;
1917 }
1918
1919 /* Not found in this chunk, skip to next chunk. */
1920
1921 /* Don't let search_space_len wrap here, it's unsigned. */
1922 if (search_space_len >= chunk_size)
1923 search_space_len -= chunk_size;
1924 else
1925 search_space_len = 0;
1926
1927 if (search_space_len >= pattern_len)
1928 {
1929 unsigned keep_len = search_buf_size - chunk_size;
1930 CORE_ADDR read_addr = start_addr + keep_len;
1931 int nr_to_read;
1932
1933 /* Copy the trailing part of the previous iteration to the front
1934 of the buffer for the next iteration. */
1935 gdb_assert (keep_len == pattern_len - 1);
1936 memcpy (search_buf, search_buf + chunk_size, keep_len);
1937
1938 nr_to_read = min (search_space_len - keep_len, chunk_size);
1939
1940 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1941 search_buf + keep_len, read_addr,
1942 nr_to_read) != nr_to_read)
1943 {
1944 warning (_("Unable to access target memory at %s, halting search."),
1945 hex_string (read_addr));
1946 do_cleanups (old_cleanups);
1947 return -1;
1948 }
1949
1950 start_addr += chunk_size;
1951 }
1952 }
1953
1954 /* Not found. */
1955
1956 do_cleanups (old_cleanups);
1957 return 0;
1958 }
1959
1960 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
1961 sequence of bytes in PATTERN with length PATTERN_LEN.
1962
1963 The result is 1 if found, 0 if not found, and -1 if there was an error
1964 requiring halting of the search (e.g. memory read error).
1965 If the pattern is found the address is recorded in FOUND_ADDRP. */
1966
1967 int
1968 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
1969 const gdb_byte *pattern, ULONGEST pattern_len,
1970 CORE_ADDR *found_addrp)
1971 {
1972 struct target_ops *t;
1973 int found;
1974
1975 /* We don't use INHERIT to set current_target.to_search_memory,
1976 so we have to scan the target stack and handle targetdebug
1977 ourselves. */
1978
1979 if (targetdebug)
1980 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
1981 hex_string (start_addr));
1982
1983 for (t = current_target.beneath; t != NULL; t = t->beneath)
1984 if (t->to_search_memory != NULL)
1985 break;
1986
1987 if (t != NULL)
1988 {
1989 found = t->to_search_memory (t, start_addr, search_space_len,
1990 pattern, pattern_len, found_addrp);
1991 }
1992 else
1993 {
1994 /* If a special version of to_search_memory isn't available, use the
1995 simple version. */
1996 found = simple_search_memory (&current_target,
1997 start_addr, search_space_len,
1998 pattern, pattern_len, found_addrp);
1999 }
2000
2001 if (targetdebug)
2002 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2003
2004 return found;
2005 }
2006
2007 /* Look through the currently pushed targets. If none of them will
2008 be able to restart the currently running process, issue an error
2009 message. */
2010
2011 void
2012 target_require_runnable (void)
2013 {
2014 struct target_ops *t;
2015
2016 for (t = target_stack; t != NULL; t = t->beneath)
2017 {
2018 /* If this target knows how to create a new program, then
2019 assume we will still be able to after killing the current
2020 one. Either killing and mourning will not pop T, or else
2021 find_default_run_target will find it again. */
2022 if (t->to_create_inferior != NULL)
2023 return;
2024
2025 /* Do not worry about thread_stratum targets that can not
2026 create inferiors. Assume they will be pushed again if
2027 necessary, and continue to the process_stratum. */
2028 if (t->to_stratum == thread_stratum)
2029 continue;
2030
2031 error (_("\
2032 The \"%s\" target does not support \"run\". Try \"help target\" or \"continue\"."),
2033 t->to_shortname);
2034 }
2035
2036 /* This function is only called if the target is running. In that
2037 case there should have been a process_stratum target and it
2038 should either know how to create inferiors, or not... */
2039 internal_error (__FILE__, __LINE__, "No targets found");
2040 }
2041
2042 /* Look through the list of possible targets for a target that can
2043 execute a run or attach command without any other data. This is
2044 used to locate the default process stratum.
2045
2046 If DO_MESG is not NULL, the result is always valid (error() is
2047 called for errors); else, return NULL on error. */
2048
2049 static struct target_ops *
2050 find_default_run_target (char *do_mesg)
2051 {
2052 struct target_ops **t;
2053 struct target_ops *runable = NULL;
2054 int count;
2055
2056 count = 0;
2057
2058 for (t = target_structs; t < target_structs + target_struct_size;
2059 ++t)
2060 {
2061 if ((*t)->to_can_run && target_can_run (*t))
2062 {
2063 runable = *t;
2064 ++count;
2065 }
2066 }
2067
2068 if (count != 1)
2069 {
2070 if (do_mesg)
2071 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2072 else
2073 return NULL;
2074 }
2075
2076 return runable;
2077 }
2078
2079 void
2080 find_default_attach (char *args, int from_tty)
2081 {
2082 struct target_ops *t;
2083
2084 t = find_default_run_target ("attach");
2085 (t->to_attach) (args, from_tty);
2086 return;
2087 }
2088
2089 void
2090 find_default_create_inferior (char *exec_file, char *allargs, char **env,
2091 int from_tty)
2092 {
2093 struct target_ops *t;
2094
2095 t = find_default_run_target ("run");
2096 (t->to_create_inferior) (exec_file, allargs, env, from_tty);
2097 return;
2098 }
2099
2100 int
2101 find_default_can_async_p (void)
2102 {
2103 struct target_ops *t;
2104
2105 /* This may be called before the target is pushed on the stack;
2106 look for the default process stratum. If there's none, gdb isn't
2107 configured with a native debugger, and target remote isn't
2108 connected yet. */
2109 t = find_default_run_target (NULL);
2110 if (t && t->to_can_async_p)
2111 return (t->to_can_async_p) ();
2112 return 0;
2113 }
2114
2115 int
2116 find_default_is_async_p (void)
2117 {
2118 struct target_ops *t;
2119
2120 /* This may be called before the target is pushed on the stack;
2121 look for the default process stratum. If there's none, gdb isn't
2122 configured with a native debugger, and target remote isn't
2123 connected yet. */
2124 t = find_default_run_target (NULL);
2125 if (t && t->to_is_async_p)
2126 return (t->to_is_async_p) ();
2127 return 0;
2128 }
2129
2130 int
2131 find_default_supports_non_stop (void)
2132 {
2133 struct target_ops *t;
2134
2135 t = find_default_run_target (NULL);
2136 if (t && t->to_supports_non_stop)
2137 return (t->to_supports_non_stop) ();
2138 return 0;
2139 }
2140
2141 int
2142 target_supports_non_stop ()
2143 {
2144 struct target_ops *t;
2145 for (t = &current_target; t != NULL; t = t->beneath)
2146 if (t->to_supports_non_stop)
2147 return t->to_supports_non_stop ();
2148
2149 return 0;
2150 }
2151
2152
2153 static int
2154 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
2155 {
2156 return (len <= gdbarch_ptr_bit (target_gdbarch) / TARGET_CHAR_BIT);
2157 }
2158
2159 static int
2160 default_watchpoint_addr_within_range (struct target_ops *target,
2161 CORE_ADDR addr,
2162 CORE_ADDR start, int length)
2163 {
2164 return addr >= start && addr < start + length;
2165 }
2166
2167 static int
2168 return_zero (void)
2169 {
2170 return 0;
2171 }
2172
2173 static int
2174 return_one (void)
2175 {
2176 return 1;
2177 }
2178
2179 static int
2180 return_minus_one (void)
2181 {
2182 return -1;
2183 }
2184
2185 /*
2186 * Resize the to_sections pointer. Also make sure that anyone that
2187 * was holding on to an old value of it gets updated.
2188 * Returns the old size.
2189 */
2190
2191 int
2192 target_resize_to_sections (struct target_ops *target, int num_added)
2193 {
2194 struct target_ops **t;
2195 struct section_table *old_value;
2196 int old_count;
2197
2198 old_value = target->to_sections;
2199
2200 if (target->to_sections)
2201 {
2202 old_count = target->to_sections_end - target->to_sections;
2203 target->to_sections = (struct section_table *)
2204 xrealloc ((char *) target->to_sections,
2205 (sizeof (struct section_table)) * (num_added + old_count));
2206 }
2207 else
2208 {
2209 old_count = 0;
2210 target->to_sections = (struct section_table *)
2211 xmalloc ((sizeof (struct section_table)) * num_added);
2212 }
2213 target->to_sections_end = target->to_sections + (num_added + old_count);
2214
2215 /* Check to see if anyone else was pointing to this structure.
2216 If old_value was null, then no one was. */
2217
2218 if (old_value)
2219 {
2220 for (t = target_structs; t < target_structs + target_struct_size;
2221 ++t)
2222 {
2223 if ((*t)->to_sections == old_value)
2224 {
2225 (*t)->to_sections = target->to_sections;
2226 (*t)->to_sections_end = target->to_sections_end;
2227 }
2228 }
2229 /* There is a flattened view of the target stack in current_target,
2230 so its to_sections pointer might also need updating. */
2231 if (current_target.to_sections == old_value)
2232 {
2233 current_target.to_sections = target->to_sections;
2234 current_target.to_sections_end = target->to_sections_end;
2235 }
2236 }
2237
2238 return old_count;
2239
2240 }
2241
2242 /* Remove all target sections taken from ABFD.
2243
2244 Scan the current target stack for targets whose section tables
2245 refer to sections from BFD, and remove those sections. We use this
2246 when we notice that the inferior has unloaded a shared object, for
2247 example. */
2248 void
2249 remove_target_sections (bfd *abfd)
2250 {
2251 struct target_ops **t;
2252
2253 for (t = target_structs; t < target_structs + target_struct_size; t++)
2254 {
2255 struct section_table *src, *dest;
2256
2257 dest = (*t)->to_sections;
2258 for (src = (*t)->to_sections; src < (*t)->to_sections_end; src++)
2259 if (src->bfd != abfd)
2260 {
2261 /* Keep this section. */
2262 if (dest < src) *dest = *src;
2263 dest++;
2264 }
2265
2266 /* If we've dropped any sections, resize the section table. */
2267 if (dest < src)
2268 target_resize_to_sections (*t, dest - src);
2269 }
2270 }
2271
2272
2273
2274
2275 /* Find a single runnable target in the stack and return it. If for
2276 some reason there is more than one, return NULL. */
2277
2278 struct target_ops *
2279 find_run_target (void)
2280 {
2281 struct target_ops **t;
2282 struct target_ops *runable = NULL;
2283 int count;
2284
2285 count = 0;
2286
2287 for (t = target_structs; t < target_structs + target_struct_size; ++t)
2288 {
2289 if ((*t)->to_can_run && target_can_run (*t))
2290 {
2291 runable = *t;
2292 ++count;
2293 }
2294 }
2295
2296 return (count == 1 ? runable : NULL);
2297 }
2298
2299 /* Find a single core_stratum target in the list of targets and return it.
2300 If for some reason there is more than one, return NULL. */
2301
2302 struct target_ops *
2303 find_core_target (void)
2304 {
2305 struct target_ops **t;
2306 struct target_ops *runable = NULL;
2307 int count;
2308
2309 count = 0;
2310
2311 for (t = target_structs; t < target_structs + target_struct_size;
2312 ++t)
2313 {
2314 if ((*t)->to_stratum == core_stratum)
2315 {
2316 runable = *t;
2317 ++count;
2318 }
2319 }
2320
2321 return (count == 1 ? runable : NULL);
2322 }
2323
2324 /*
2325 * Find the next target down the stack from the specified target.
2326 */
2327
2328 struct target_ops *
2329 find_target_beneath (struct target_ops *t)
2330 {
2331 return t->beneath;
2332 }
2333
2334 \f
2335 /* The inferior process has died. Long live the inferior! */
2336
2337 void
2338 generic_mourn_inferior (void)
2339 {
2340 ptid_t ptid;
2341
2342 ptid = inferior_ptid;
2343 inferior_ptid = null_ptid;
2344
2345 if (!ptid_equal (ptid, null_ptid))
2346 {
2347 int pid = ptid_get_pid (ptid);
2348 delete_inferior (pid);
2349 }
2350
2351 breakpoint_init_inferior (inf_exited);
2352 registers_changed ();
2353
2354 reopen_exec_file ();
2355 reinit_frame_cache ();
2356
2357 if (deprecated_detach_hook)
2358 deprecated_detach_hook ();
2359 }
2360 \f
2361 /* Helper function for child_wait and the derivatives of child_wait.
2362 HOSTSTATUS is the waitstatus from wait() or the equivalent; store our
2363 translation of that in OURSTATUS. */
2364 void
2365 store_waitstatus (struct target_waitstatus *ourstatus, int hoststatus)
2366 {
2367 if (WIFEXITED (hoststatus))
2368 {
2369 ourstatus->kind = TARGET_WAITKIND_EXITED;
2370 ourstatus->value.integer = WEXITSTATUS (hoststatus);
2371 }
2372 else if (!WIFSTOPPED (hoststatus))
2373 {
2374 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2375 ourstatus->value.sig = target_signal_from_host (WTERMSIG (hoststatus));
2376 }
2377 else
2378 {
2379 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2380 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (hoststatus));
2381 }
2382 }
2383 \f
2384 /* Returns zero to leave the inferior alone, one to interrupt it. */
2385 int (*target_activity_function) (void);
2386 int target_activity_fd;
2387 \f
2388 /* Convert a normal process ID to a string. Returns the string in a
2389 static buffer. */
2390
2391 char *
2392 normal_pid_to_str (ptid_t ptid)
2393 {
2394 static char buf[32];
2395
2396 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
2397 return buf;
2398 }
2399
2400 /* Error-catcher for target_find_memory_regions */
2401 static int dummy_find_memory_regions (int (*ignore1) (), void *ignore2)
2402 {
2403 error (_("No target."));
2404 return 0;
2405 }
2406
2407 /* Error-catcher for target_make_corefile_notes */
2408 static char * dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
2409 {
2410 error (_("No target."));
2411 return NULL;
2412 }
2413
2414 /* Set up the handful of non-empty slots needed by the dummy target
2415 vector. */
2416
2417 static void
2418 init_dummy_target (void)
2419 {
2420 dummy_target.to_shortname = "None";
2421 dummy_target.to_longname = "None";
2422 dummy_target.to_doc = "";
2423 dummy_target.to_attach = find_default_attach;
2424 dummy_target.to_create_inferior = find_default_create_inferior;
2425 dummy_target.to_can_async_p = find_default_can_async_p;
2426 dummy_target.to_is_async_p = find_default_is_async_p;
2427 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
2428 dummy_target.to_pid_to_str = normal_pid_to_str;
2429 dummy_target.to_stratum = dummy_stratum;
2430 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
2431 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
2432 dummy_target.to_xfer_partial = default_xfer_partial;
2433 dummy_target.to_magic = OPS_MAGIC;
2434 }
2435 \f
2436 static void
2437 debug_to_open (char *args, int from_tty)
2438 {
2439 debug_target.to_open (args, from_tty);
2440
2441 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
2442 }
2443
2444 static void
2445 debug_to_close (int quitting)
2446 {
2447 target_close (&debug_target, quitting);
2448 fprintf_unfiltered (gdb_stdlog, "target_close (%d)\n", quitting);
2449 }
2450
2451 void
2452 target_close (struct target_ops *targ, int quitting)
2453 {
2454 if (targ->to_xclose != NULL)
2455 targ->to_xclose (targ, quitting);
2456 else if (targ->to_close != NULL)
2457 targ->to_close (quitting);
2458 }
2459
2460 static void
2461 debug_to_attach (char *args, int from_tty)
2462 {
2463 debug_target.to_attach (args, from_tty);
2464
2465 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n", args, from_tty);
2466 }
2467
2468
2469 static void
2470 debug_to_post_attach (int pid)
2471 {
2472 debug_target.to_post_attach (pid);
2473
2474 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
2475 }
2476
2477 static void
2478 debug_to_detach (char *args, int from_tty)
2479 {
2480 debug_target.to_detach (args, from_tty);
2481
2482 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n", args, from_tty);
2483 }
2484
2485 static void
2486 debug_to_resume (ptid_t ptid, int step, enum target_signal siggnal)
2487 {
2488 debug_target.to_resume (ptid, step, siggnal);
2489
2490 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n", PIDGET (ptid),
2491 step ? "step" : "continue",
2492 target_signal_to_name (siggnal));
2493 }
2494
2495 static ptid_t
2496 debug_to_wait (ptid_t ptid, struct target_waitstatus *status)
2497 {
2498 ptid_t retval;
2499
2500 retval = debug_target.to_wait (ptid, status);
2501
2502 fprintf_unfiltered (gdb_stdlog,
2503 "target_wait (%d, status) = %d, ", PIDGET (ptid),
2504 PIDGET (retval));
2505 fprintf_unfiltered (gdb_stdlog, "status->kind = ");
2506 switch (status->kind)
2507 {
2508 case TARGET_WAITKIND_EXITED:
2509 fprintf_unfiltered (gdb_stdlog, "exited, status = %d\n",
2510 status->value.integer);
2511 break;
2512 case TARGET_WAITKIND_STOPPED:
2513 fprintf_unfiltered (gdb_stdlog, "stopped, signal = %s\n",
2514 target_signal_to_name (status->value.sig));
2515 break;
2516 case TARGET_WAITKIND_SIGNALLED:
2517 fprintf_unfiltered (gdb_stdlog, "signalled, signal = %s\n",
2518 target_signal_to_name (status->value.sig));
2519 break;
2520 case TARGET_WAITKIND_LOADED:
2521 fprintf_unfiltered (gdb_stdlog, "loaded\n");
2522 break;
2523 case TARGET_WAITKIND_FORKED:
2524 fprintf_unfiltered (gdb_stdlog, "forked\n");
2525 break;
2526 case TARGET_WAITKIND_VFORKED:
2527 fprintf_unfiltered (gdb_stdlog, "vforked\n");
2528 break;
2529 case TARGET_WAITKIND_EXECD:
2530 fprintf_unfiltered (gdb_stdlog, "execd\n");
2531 break;
2532 case TARGET_WAITKIND_SPURIOUS:
2533 fprintf_unfiltered (gdb_stdlog, "spurious\n");
2534 break;
2535 default:
2536 fprintf_unfiltered (gdb_stdlog, "unknown???\n");
2537 break;
2538 }
2539
2540 return retval;
2541 }
2542
2543 static void
2544 debug_print_register (const char * func,
2545 struct regcache *regcache, int regno)
2546 {
2547 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2548 fprintf_unfiltered (gdb_stdlog, "%s ", func);
2549 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
2550 && gdbarch_register_name (gdbarch, regno) != NULL
2551 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
2552 fprintf_unfiltered (gdb_stdlog, "(%s)",
2553 gdbarch_register_name (gdbarch, regno));
2554 else
2555 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
2556 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
2557 {
2558 int i, size = register_size (gdbarch, regno);
2559 unsigned char buf[MAX_REGISTER_SIZE];
2560 regcache_raw_collect (regcache, regno, buf);
2561 fprintf_unfiltered (gdb_stdlog, " = ");
2562 for (i = 0; i < size; i++)
2563 {
2564 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
2565 }
2566 if (size <= sizeof (LONGEST))
2567 {
2568 ULONGEST val = extract_unsigned_integer (buf, size);
2569 fprintf_unfiltered (gdb_stdlog, " %s %s",
2570 core_addr_to_string_nz (val), plongest (val));
2571 }
2572 }
2573 fprintf_unfiltered (gdb_stdlog, "\n");
2574 }
2575
2576 static void
2577 debug_to_fetch_registers (struct regcache *regcache, int regno)
2578 {
2579 debug_target.to_fetch_registers (regcache, regno);
2580 debug_print_register ("target_fetch_registers", regcache, regno);
2581 }
2582
2583 static void
2584 debug_to_store_registers (struct regcache *regcache, int regno)
2585 {
2586 debug_target.to_store_registers (regcache, regno);
2587 debug_print_register ("target_store_registers", regcache, regno);
2588 fprintf_unfiltered (gdb_stdlog, "\n");
2589 }
2590
2591 static void
2592 debug_to_prepare_to_store (struct regcache *regcache)
2593 {
2594 debug_target.to_prepare_to_store (regcache);
2595
2596 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
2597 }
2598
2599 static int
2600 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
2601 int write, struct mem_attrib *attrib,
2602 struct target_ops *target)
2603 {
2604 int retval;
2605
2606 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
2607 attrib, target);
2608
2609 fprintf_unfiltered (gdb_stdlog,
2610 "target_xfer_memory (0x%x, xxx, %d, %s, xxx) = %d",
2611 (unsigned int) memaddr, /* possable truncate long long */
2612 len, write ? "write" : "read", retval);
2613
2614 if (retval > 0)
2615 {
2616 int i;
2617
2618 fputs_unfiltered (", bytes =", gdb_stdlog);
2619 for (i = 0; i < retval; i++)
2620 {
2621 if ((((long) &(myaddr[i])) & 0xf) == 0)
2622 {
2623 if (targetdebug < 2 && i > 0)
2624 {
2625 fprintf_unfiltered (gdb_stdlog, " ...");
2626 break;
2627 }
2628 fprintf_unfiltered (gdb_stdlog, "\n");
2629 }
2630
2631 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
2632 }
2633 }
2634
2635 fputc_unfiltered ('\n', gdb_stdlog);
2636
2637 return retval;
2638 }
2639
2640 static void
2641 debug_to_files_info (struct target_ops *target)
2642 {
2643 debug_target.to_files_info (target);
2644
2645 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
2646 }
2647
2648 static int
2649 debug_to_insert_breakpoint (struct bp_target_info *bp_tgt)
2650 {
2651 int retval;
2652
2653 retval = debug_target.to_insert_breakpoint (bp_tgt);
2654
2655 fprintf_unfiltered (gdb_stdlog,
2656 "target_insert_breakpoint (0x%lx, xxx) = %ld\n",
2657 (unsigned long) bp_tgt->placed_address,
2658 (unsigned long) retval);
2659 return retval;
2660 }
2661
2662 static int
2663 debug_to_remove_breakpoint (struct bp_target_info *bp_tgt)
2664 {
2665 int retval;
2666
2667 retval = debug_target.to_remove_breakpoint (bp_tgt);
2668
2669 fprintf_unfiltered (gdb_stdlog,
2670 "target_remove_breakpoint (0x%lx, xxx) = %ld\n",
2671 (unsigned long) bp_tgt->placed_address,
2672 (unsigned long) retval);
2673 return retval;
2674 }
2675
2676 static int
2677 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
2678 {
2679 int retval;
2680
2681 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
2682
2683 fprintf_unfiltered (gdb_stdlog,
2684 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
2685 (unsigned long) type,
2686 (unsigned long) cnt,
2687 (unsigned long) from_tty,
2688 (unsigned long) retval);
2689 return retval;
2690 }
2691
2692 static int
2693 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
2694 {
2695 CORE_ADDR retval;
2696
2697 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
2698
2699 fprintf_unfiltered (gdb_stdlog,
2700 "TARGET_REGION_OK_FOR_HW_WATCHPOINT (%ld, %ld) = 0x%lx\n",
2701 (unsigned long) addr,
2702 (unsigned long) len,
2703 (unsigned long) retval);
2704 return retval;
2705 }
2706
2707 static int
2708 debug_to_stopped_by_watchpoint (void)
2709 {
2710 int retval;
2711
2712 retval = debug_target.to_stopped_by_watchpoint ();
2713
2714 fprintf_unfiltered (gdb_stdlog,
2715 "STOPPED_BY_WATCHPOINT () = %ld\n",
2716 (unsigned long) retval);
2717 return retval;
2718 }
2719
2720 static int
2721 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
2722 {
2723 int retval;
2724
2725 retval = debug_target.to_stopped_data_address (target, addr);
2726
2727 fprintf_unfiltered (gdb_stdlog,
2728 "target_stopped_data_address ([0x%lx]) = %ld\n",
2729 (unsigned long)*addr,
2730 (unsigned long)retval);
2731 return retval;
2732 }
2733
2734 static int
2735 debug_to_watchpoint_addr_within_range (struct target_ops *target,
2736 CORE_ADDR addr,
2737 CORE_ADDR start, int length)
2738 {
2739 int retval;
2740
2741 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
2742 start, length);
2743
2744 fprintf_filtered (gdb_stdlog,
2745 "target_watchpoint_addr_within_range (0x%lx, 0x%lx, %d) = %d\n",
2746 (unsigned long) addr, (unsigned long) start, length,
2747 retval);
2748 return retval;
2749 }
2750
2751 static int
2752 debug_to_insert_hw_breakpoint (struct bp_target_info *bp_tgt)
2753 {
2754 int retval;
2755
2756 retval = debug_target.to_insert_hw_breakpoint (bp_tgt);
2757
2758 fprintf_unfiltered (gdb_stdlog,
2759 "target_insert_hw_breakpoint (0x%lx, xxx) = %ld\n",
2760 (unsigned long) bp_tgt->placed_address,
2761 (unsigned long) retval);
2762 return retval;
2763 }
2764
2765 static int
2766 debug_to_remove_hw_breakpoint (struct bp_target_info *bp_tgt)
2767 {
2768 int retval;
2769
2770 retval = debug_target.to_remove_hw_breakpoint (bp_tgt);
2771
2772 fprintf_unfiltered (gdb_stdlog,
2773 "target_remove_hw_breakpoint (0x%lx, xxx) = %ld\n",
2774 (unsigned long) bp_tgt->placed_address,
2775 (unsigned long) retval);
2776 return retval;
2777 }
2778
2779 static int
2780 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type)
2781 {
2782 int retval;
2783
2784 retval = debug_target.to_insert_watchpoint (addr, len, type);
2785
2786 fprintf_unfiltered (gdb_stdlog,
2787 "target_insert_watchpoint (0x%lx, %d, %d) = %ld\n",
2788 (unsigned long) addr, len, type, (unsigned long) retval);
2789 return retval;
2790 }
2791
2792 static int
2793 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type)
2794 {
2795 int retval;
2796
2797 retval = debug_target.to_remove_watchpoint (addr, len, type);
2798
2799 fprintf_unfiltered (gdb_stdlog,
2800 "target_remove_watchpoint (0x%lx, %d, %d) = %ld\n",
2801 (unsigned long) addr, len, type, (unsigned long) retval);
2802 return retval;
2803 }
2804
2805 static void
2806 debug_to_terminal_init (void)
2807 {
2808 debug_target.to_terminal_init ();
2809
2810 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
2811 }
2812
2813 static void
2814 debug_to_terminal_inferior (void)
2815 {
2816 debug_target.to_terminal_inferior ();
2817
2818 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
2819 }
2820
2821 static void
2822 debug_to_terminal_ours_for_output (void)
2823 {
2824 debug_target.to_terminal_ours_for_output ();
2825
2826 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
2827 }
2828
2829 static void
2830 debug_to_terminal_ours (void)
2831 {
2832 debug_target.to_terminal_ours ();
2833
2834 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
2835 }
2836
2837 static void
2838 debug_to_terminal_save_ours (void)
2839 {
2840 debug_target.to_terminal_save_ours ();
2841
2842 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
2843 }
2844
2845 static void
2846 debug_to_terminal_info (char *arg, int from_tty)
2847 {
2848 debug_target.to_terminal_info (arg, from_tty);
2849
2850 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
2851 from_tty);
2852 }
2853
2854 static void
2855 debug_to_kill (void)
2856 {
2857 debug_target.to_kill ();
2858
2859 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
2860 }
2861
2862 static void
2863 debug_to_load (char *args, int from_tty)
2864 {
2865 debug_target.to_load (args, from_tty);
2866
2867 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
2868 }
2869
2870 static int
2871 debug_to_lookup_symbol (char *name, CORE_ADDR *addrp)
2872 {
2873 int retval;
2874
2875 retval = debug_target.to_lookup_symbol (name, addrp);
2876
2877 fprintf_unfiltered (gdb_stdlog, "target_lookup_symbol (%s, xxx)\n", name);
2878
2879 return retval;
2880 }
2881
2882 static void
2883 debug_to_create_inferior (char *exec_file, char *args, char **env,
2884 int from_tty)
2885 {
2886 debug_target.to_create_inferior (exec_file, args, env, from_tty);
2887
2888 fprintf_unfiltered (gdb_stdlog, "target_create_inferior (%s, %s, xxx, %d)\n",
2889 exec_file, args, from_tty);
2890 }
2891
2892 static void
2893 debug_to_post_startup_inferior (ptid_t ptid)
2894 {
2895 debug_target.to_post_startup_inferior (ptid);
2896
2897 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
2898 PIDGET (ptid));
2899 }
2900
2901 static void
2902 debug_to_acknowledge_created_inferior (int pid)
2903 {
2904 debug_target.to_acknowledge_created_inferior (pid);
2905
2906 fprintf_unfiltered (gdb_stdlog, "target_acknowledge_created_inferior (%d)\n",
2907 pid);
2908 }
2909
2910 static void
2911 debug_to_insert_fork_catchpoint (int pid)
2912 {
2913 debug_target.to_insert_fork_catchpoint (pid);
2914
2915 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d)\n",
2916 pid);
2917 }
2918
2919 static int
2920 debug_to_remove_fork_catchpoint (int pid)
2921 {
2922 int retval;
2923
2924 retval = debug_target.to_remove_fork_catchpoint (pid);
2925
2926 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
2927 pid, retval);
2928
2929 return retval;
2930 }
2931
2932 static void
2933 debug_to_insert_vfork_catchpoint (int pid)
2934 {
2935 debug_target.to_insert_vfork_catchpoint (pid);
2936
2937 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d)\n",
2938 pid);
2939 }
2940
2941 static int
2942 debug_to_remove_vfork_catchpoint (int pid)
2943 {
2944 int retval;
2945
2946 retval = debug_target.to_remove_vfork_catchpoint (pid);
2947
2948 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
2949 pid, retval);
2950
2951 return retval;
2952 }
2953
2954 static void
2955 debug_to_insert_exec_catchpoint (int pid)
2956 {
2957 debug_target.to_insert_exec_catchpoint (pid);
2958
2959 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d)\n",
2960 pid);
2961 }
2962
2963 static int
2964 debug_to_remove_exec_catchpoint (int pid)
2965 {
2966 int retval;
2967
2968 retval = debug_target.to_remove_exec_catchpoint (pid);
2969
2970 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
2971 pid, retval);
2972
2973 return retval;
2974 }
2975
2976 static int
2977 debug_to_has_exited (int pid, int wait_status, int *exit_status)
2978 {
2979 int has_exited;
2980
2981 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
2982
2983 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
2984 pid, wait_status, *exit_status, has_exited);
2985
2986 return has_exited;
2987 }
2988
2989 static void
2990 debug_to_mourn_inferior (void)
2991 {
2992 debug_target.to_mourn_inferior ();
2993
2994 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2995 }
2996
2997 static int
2998 debug_to_can_run (void)
2999 {
3000 int retval;
3001
3002 retval = debug_target.to_can_run ();
3003
3004 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
3005
3006 return retval;
3007 }
3008
3009 static void
3010 debug_to_notice_signals (ptid_t ptid)
3011 {
3012 debug_target.to_notice_signals (ptid);
3013
3014 fprintf_unfiltered (gdb_stdlog, "target_notice_signals (%d)\n",
3015 PIDGET (ptid));
3016 }
3017
3018 static int
3019 debug_to_thread_alive (ptid_t ptid)
3020 {
3021 int retval;
3022
3023 retval = debug_target.to_thread_alive (ptid);
3024
3025 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3026 PIDGET (ptid), retval);
3027
3028 return retval;
3029 }
3030
3031 static void
3032 debug_to_find_new_threads (void)
3033 {
3034 debug_target.to_find_new_threads ();
3035
3036 fputs_unfiltered ("target_find_new_threads ()\n", gdb_stdlog);
3037 }
3038
3039 static void
3040 debug_to_stop (ptid_t ptid)
3041 {
3042 debug_target.to_stop (ptid);
3043
3044 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
3045 target_pid_to_str (ptid));
3046 }
3047
3048 static void
3049 debug_to_rcmd (char *command,
3050 struct ui_file *outbuf)
3051 {
3052 debug_target.to_rcmd (command, outbuf);
3053 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
3054 }
3055
3056 static char *
3057 debug_to_pid_to_exec_file (int pid)
3058 {
3059 char *exec_file;
3060
3061 exec_file = debug_target.to_pid_to_exec_file (pid);
3062
3063 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
3064 pid, exec_file);
3065
3066 return exec_file;
3067 }
3068
3069 static void
3070 setup_target_debug (void)
3071 {
3072 memcpy (&debug_target, &current_target, sizeof debug_target);
3073
3074 current_target.to_open = debug_to_open;
3075 current_target.to_close = debug_to_close;
3076 current_target.to_attach = debug_to_attach;
3077 current_target.to_post_attach = debug_to_post_attach;
3078 current_target.to_detach = debug_to_detach;
3079 current_target.to_resume = debug_to_resume;
3080 current_target.to_wait = debug_to_wait;
3081 current_target.to_fetch_registers = debug_to_fetch_registers;
3082 current_target.to_store_registers = debug_to_store_registers;
3083 current_target.to_prepare_to_store = debug_to_prepare_to_store;
3084 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
3085 current_target.to_files_info = debug_to_files_info;
3086 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
3087 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
3088 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
3089 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
3090 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
3091 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
3092 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
3093 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
3094 current_target.to_stopped_data_address = debug_to_stopped_data_address;
3095 current_target.to_watchpoint_addr_within_range = debug_to_watchpoint_addr_within_range;
3096 current_target.to_region_ok_for_hw_watchpoint = debug_to_region_ok_for_hw_watchpoint;
3097 current_target.to_terminal_init = debug_to_terminal_init;
3098 current_target.to_terminal_inferior = debug_to_terminal_inferior;
3099 current_target.to_terminal_ours_for_output = debug_to_terminal_ours_for_output;
3100 current_target.to_terminal_ours = debug_to_terminal_ours;
3101 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
3102 current_target.to_terminal_info = debug_to_terminal_info;
3103 current_target.to_kill = debug_to_kill;
3104 current_target.to_load = debug_to_load;
3105 current_target.to_lookup_symbol = debug_to_lookup_symbol;
3106 current_target.to_create_inferior = debug_to_create_inferior;
3107 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
3108 current_target.to_acknowledge_created_inferior = debug_to_acknowledge_created_inferior;
3109 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
3110 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
3111 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
3112 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
3113 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
3114 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
3115 current_target.to_has_exited = debug_to_has_exited;
3116 current_target.to_mourn_inferior = debug_to_mourn_inferior;
3117 current_target.to_can_run = debug_to_can_run;
3118 current_target.to_notice_signals = debug_to_notice_signals;
3119 current_target.to_thread_alive = debug_to_thread_alive;
3120 current_target.to_find_new_threads = debug_to_find_new_threads;
3121 current_target.to_stop = debug_to_stop;
3122 current_target.to_rcmd = debug_to_rcmd;
3123 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
3124 }
3125 \f
3126
3127 static char targ_desc[] =
3128 "Names of targets and files being debugged.\n\
3129 Shows the entire stack of targets currently in use (including the exec-file,\n\
3130 core-file, and process, if any), as well as the symbol file name.";
3131
3132 static void
3133 do_monitor_command (char *cmd,
3134 int from_tty)
3135 {
3136 if ((current_target.to_rcmd
3137 == (void (*) (char *, struct ui_file *)) tcomplain)
3138 || (current_target.to_rcmd == debug_to_rcmd
3139 && (debug_target.to_rcmd
3140 == (void (*) (char *, struct ui_file *)) tcomplain)))
3141 error (_("\"monitor\" command not supported by this target."));
3142 target_rcmd (cmd, gdb_stdtarg);
3143 }
3144
3145 /* Print the name of each layers of our target stack. */
3146
3147 static void
3148 maintenance_print_target_stack (char *cmd, int from_tty)
3149 {
3150 struct target_ops *t;
3151
3152 printf_filtered (_("The current target stack is:\n"));
3153
3154 for (t = target_stack; t != NULL; t = t->beneath)
3155 {
3156 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
3157 }
3158 }
3159
3160 /* Controls if async mode is permitted. */
3161 int target_async_permitted = 0;
3162
3163 /* The set command writes to this variable. If the inferior is
3164 executing, linux_nat_async_permitted is *not* updated. */
3165 static int target_async_permitted_1 = 0;
3166
3167 static void
3168 set_maintenance_target_async_permitted (char *args, int from_tty,
3169 struct cmd_list_element *c)
3170 {
3171 if (target_has_execution)
3172 {
3173 target_async_permitted_1 = target_async_permitted;
3174 error (_("Cannot change this setting while the inferior is running."));
3175 }
3176
3177 target_async_permitted = target_async_permitted_1;
3178 }
3179
3180 static void
3181 show_maintenance_target_async_permitted (struct ui_file *file, int from_tty,
3182 struct cmd_list_element *c,
3183 const char *value)
3184 {
3185 fprintf_filtered (file, _("\
3186 Controlling the inferior in asynchronous mode is %s.\n"), value);
3187 }
3188
3189 void
3190 initialize_targets (void)
3191 {
3192 init_dummy_target ();
3193 push_target (&dummy_target);
3194
3195 add_info ("target", target_info, targ_desc);
3196 add_info ("files", target_info, targ_desc);
3197
3198 add_setshow_zinteger_cmd ("target", class_maintenance, &targetdebug, _("\
3199 Set target debugging."), _("\
3200 Show target debugging."), _("\
3201 When non-zero, target debugging is enabled. Higher numbers are more\n\
3202 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
3203 command."),
3204 NULL,
3205 show_targetdebug,
3206 &setdebuglist, &showdebuglist);
3207
3208 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
3209 &trust_readonly, _("\
3210 Set mode for reading from readonly sections."), _("\
3211 Show mode for reading from readonly sections."), _("\
3212 When this mode is on, memory reads from readonly sections (such as .text)\n\
3213 will be read from the object file instead of from the target. This will\n\
3214 result in significant performance improvement for remote targets."),
3215 NULL,
3216 show_trust_readonly,
3217 &setlist, &showlist);
3218
3219 add_com ("monitor", class_obscure, do_monitor_command,
3220 _("Send a command to the remote monitor (remote targets only)."));
3221
3222 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
3223 _("Print the name of each layer of the internal target stack."),
3224 &maintenanceprintlist);
3225
3226 add_setshow_boolean_cmd ("target-async", no_class,
3227 &target_async_permitted_1, _("\
3228 Set whether gdb controls the inferior in asynchronous mode."), _("\
3229 Show whether gdb controls the inferior in asynchronous mode."), _("\
3230 Tells gdb whether to control the inferior in asynchronous mode."),
3231 set_maintenance_target_async_permitted,
3232 show_maintenance_target_async_permitted,
3233 &setlist,
3234 &showlist);
3235
3236 target_dcache = dcache_init ();
3237 }